{"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"convert_to_unicode","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_summary":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_tokens":["Converts","text","to","Unicode","(","if","it","s","not","already",")","assuming","utf","-","8","input","."],"function":"def convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","convert_to_unicode","(","text",")",":","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","elif","isinstance","(","text",",","unicode",")",":","return","text","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L36-L53"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"InputExample.__init__","parameters":"(self, guid, text_a, text_b=None, label=None)","argument_list":"","return_statement":"","docstring":"Constructs a InputExample.\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.","docstring_summary":"Constructs a InputExample.\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.","docstring_tokens":["Constructs","a","InputExample",".","Args",":","guid",":","Unique","id","for","the","example",".","text_a",":","string",".","The","untokenized","text","of","the","first","sequence",".","For","single","sequence","tasks","only","this","sequence","must","be","specified",".","text_b",":","(","Optional",")","string",".","The","untokenized","text","of","the","second","sequence",".","Only","must","be","specified","for","sequence","pair","tasks",".","label",":","(","Optional",")","string",".","The","label","of","the","example",".","This","should","be","specified","for","train","and","dev","examples","but","not","for","test","examples","."],"function":"def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label","function_tokens":["def","__init__","(","self",",","guid",",","text_a",",","text_b","=","None",",","label","=","None",")",":","self",".","guid","=","guid","self",".","text_a","=","text_a","self",".","text_b","=","text_b","self",".","label","=","label"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L59-L73"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"DataProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the train set.","docstring_summary":"Gets a collection of `InputExample`s for the train set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","train","set","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L90-L92"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"DataProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the dev set.","docstring_summary":"Gets a collection of `InputExample`s for the dev set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","dev","set","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L94-L96"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"DataProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for prediction.","docstring_summary":"Gets a collection of `InputExample`s for prediction.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","prediction","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L98-L100"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"DataProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Gets the list of labels for this data set.","docstring_summary":"Gets the list of labels for this data set.","docstring_tokens":["Gets","the","list","of","labels","for","this","data","set","."],"function":"def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_labels","(","self",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L102-L104"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"DataProcessor._read_tsv","parameters":"(cls, input_file, delimiter=\"\\t\", quotechar=None)","argument_list":"","return_statement":"","docstring":"Reads a tab separated value file.","docstring_summary":"Reads a tab separated value file.","docstring_tokens":["Reads","a","tab","separated","value","file","."],"function":"def _read_tsv(cls, input_file, delimiter=\"\\t\", quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines","function_tokens":["def","_read_tsv","(","cls",",","input_file",",","delimiter","=","\"\\t\"",",","quotechar","=","None",")",":","with","tf",".","gfile",".","Open","(","input_file",",","\"r\"",")","as","f",":","reader","=","csv",".","reader","(","f",",","delimiter","=","delimiter",",","quotechar","=","quotechar",")","lines","=","[","]","for","line","in","reader",":","lines",".","append","(","line",")","return","lines"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L107-L114"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"DataProcessor._read_txt","parameters":"(cls, input_file)","argument_list":"","return_statement":"","docstring":"Reads a tab separated value file.","docstring_summary":"Reads a tab separated value file.","docstring_tokens":["Reads","a","tab","separated","value","file","."],"function":"def _read_txt(cls, input_file):\n \"\"\"Reads a tab separated value file.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = f.readlines()\n lines = []\n for line in reader:\n lines.append(line.strip().split(\"_!_\"))\n return lines","function_tokens":["def","_read_txt","(","cls",",","input_file",")",":","with","tf",".","gfile",".","Open","(","input_file",",","\"r\"",")","as","f",":","reader","=","f",".","readlines","(",")","lines","=","[","]","for","line","in","reader",":","lines",".","append","(","line",".","strip","(",")",".","split","(","\"_!_\"",")",")","return","lines"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L117-L124"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"DataProcessor._read_json","parameters":"(cls, input_file)","argument_list":"","return_statement":"","docstring":"Reads a tab separated value file.","docstring_summary":"Reads a tab separated value file.","docstring_tokens":["Reads","a","tab","separated","value","file","."],"function":"def _read_json(cls, input_file):\n \"\"\"Reads a tab separated value file.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = f.readlines()\n lines = []\n for line in reader:\n lines.append(json.loads(line.strip()))\n return lines","function_tokens":["def","_read_json","(","cls",",","input_file",")",":","with","tf",".","gfile",".","Open","(","input_file",",","\"r\"",")","as","f",":","reader","=","f",".","readlines","(",")","lines","=","[","]","for","line","in","reader",":","lines",".","append","(","json",".","loads","(","line",".","strip","(",")",")",")","return","lines"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L127-L134"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"XnliProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L140-L143"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"XnliProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L145-L148"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"XnliProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L150-L153"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"XnliProcessor._create_examples","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def _create_examples(self, lines, set_type):\n \"\"\"See base class.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = convert_to_unicode(line['premise'])\n text_b = convert_to_unicode(line['hypo'])\n label = convert_to_unicode(line['label']) if set_type != 'test' else 'contradiction'\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples","function_tokens":["def","_create_examples","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","text_a","=","convert_to_unicode","(","line","[","'premise'","]",")","text_b","=","convert_to_unicode","(","line","[","'hypo'","]",")","label","=","convert_to_unicode","(","line","[","'label'","]",")","if","set_type","!=","'test'","else","'contradiction'","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L155-L165"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"XnliProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return [\"contradiction\", \"entailment\", \"neutral\"]","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]","function_tokens":["def","get_labels","(","self",")",":","return","[","\"contradiction\"",",","\"entailment\"",",","\"neutral\"","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L167-L169"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"TnewsProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L217-L220"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"TnewsProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L222-L225"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"TnewsProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L227-L230"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"TnewsProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return labels","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n labels = []\n for i in range(17):\n if i == 5 or i == 11:\n continue\n labels.append(str(100 + i))\n return labels","function_tokens":["def","get_labels","(","self",")",":","labels","=","[","]","for","i","in","range","(","17",")",":","if","i","==","5","or","i","==","11",":","continue","labels",".","append","(","str","(","100","+","i",")",")","return","labels"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L232-L239"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"TnewsProcessor._create_examples","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = convert_to_unicode(line['sentence'])\n text_b = None\n label = convert_to_unicode(line['label']) if set_type != 'test' else \"100\"\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples","function_tokens":["def","_create_examples","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","text_a","=","convert_to_unicode","(","line","[","'sentence'","]",")","text_b","=","None","label","=","convert_to_unicode","(","line","[","'label'","]",")","if","set_type","!=","'test'","else","\"100\"","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L241-L251"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"iFLYTEKDataProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L297-L300"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"iFLYTEKDataProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L302-L305"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"iFLYTEKDataProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L307-L310"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"iFLYTEKDataProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return labels","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n labels = []\n for i in range(119):\n labels.append(str(i))\n return labels","function_tokens":["def","get_labels","(","self",")",":","labels","=","[","]","for","i","in","range","(","119",")",":","labels",".","append","(","str","(","i",")",")","return","labels"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L312-L317"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"iFLYTEKDataProcessor._create_examples","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = convert_to_unicode(line['sentence'])\n text_b = None\n label = convert_to_unicode(line['label']) if set_type != 'test' else \"0\"\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples","function_tokens":["def","_create_examples","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","text_a","=","convert_to_unicode","(","line","[","'sentence'","]",")","text_b","=","None","label","=","convert_to_unicode","(","line","[","'label'","]",")","if","set_type","!=","'test'","else","\"0\"","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L319-L329"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"AFQMCProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L335-L338"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"AFQMCProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L340-L343"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"AFQMCProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L345-L348"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"AFQMCProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return [\"0\", \"1\"]","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]","function_tokens":["def","get_labels","(","self",")",":","return","[","\"0\"",",","\"1\"","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L350-L352"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"AFQMCProcessor._create_examples","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = convert_to_unicode(line['sentence1'])\n text_b = convert_to_unicode(line['sentence2'])\n label = convert_to_unicode(line['label']) if set_type != 'test' else '0'\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples","function_tokens":["def","_create_examples","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","text_a","=","convert_to_unicode","(","line","[","'sentence1'","]",")","text_b","=","convert_to_unicode","(","line","[","'sentence2'","]",")","label","=","convert_to_unicode","(","line","[","'label'","]",")","if","set_type","!=","'test'","else","'0'","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L354-L364"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"CMNLIProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples_json(os.path.join(data_dir, \"train.json\"), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples_json(os.path.join(data_dir, \"train.json\"), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L370-L372"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"CMNLIProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples_json(os.path.join(data_dir, \"dev.json\"), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples_json(os.path.join(data_dir, \"dev.json\"), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L374-L376"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"CMNLIProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples_json(os.path.join(data_dir, \"test.json\"), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples_json(os.path.join(data_dir, \"test.json\"), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L378-L380"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"CMNLIProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return [\"contradiction\", \"entailment\", \"neutral\"]","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]","function_tokens":["def","get_labels","(","self",")",":","return","[","\"contradiction\"",",","\"entailment\"",",","\"neutral\"","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L382-L384"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"CMNLIProcessor._create_examples_json","parameters":"(self, file_name, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples_json(self, file_name, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n lines = tf.gfile.Open(file_name, \"r\")\n index = 0\n for line in lines:\n line_obj = json.loads(line)\n index = index + 1\n guid = \"%s-%s\" % (set_type, index)\n text_a = convert_to_unicode(line_obj[\"sentence1\"])\n text_b = convert_to_unicode(line_obj[\"sentence2\"])\n label = convert_to_unicode(line_obj[\"label\"]) if set_type != 'test' else 'neutral'\n\n if label != \"-\":\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n\n return examples","function_tokens":["def","_create_examples_json","(","self",",","file_name",",","set_type",")",":","examples","=","[","]","lines","=","tf",".","gfile",".","Open","(","file_name",",","\"r\"",")","index","=","0","for","line","in","lines",":","line_obj","=","json",".","loads","(","line",")","index","=","index","+","1","guid","=","\"%s-%s\"","%","(","set_type",",","index",")","text_a","=","convert_to_unicode","(","line_obj","[","\"sentence1\"","]",")","text_b","=","convert_to_unicode","(","line_obj","[","\"sentence2\"","]",")","label","=","convert_to_unicode","(","line_obj","[","\"label\"","]",")","if","set_type","!=","'test'","else","'neutral'","if","label","!=","\"-\"",":","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L386-L402"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"CslProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L408-L411"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"CslProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L413-L416"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"CslProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L418-L421"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"CslProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return [\"0\", \"1\"]","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]","function_tokens":["def","get_labels","(","self",")",":","return","[","\"0\"",",","\"1\"","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L423-L425"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"CslProcessor._create_examples","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = convert_to_unicode(\" \".join(line['keyword']))\n text_b = convert_to_unicode(line['abst'])\n label = convert_to_unicode(line['label']) if set_type != 'test' else '0'\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples","function_tokens":["def","_create_examples","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","text_a","=","convert_to_unicode","(","\" \"",".","join","(","line","[","'keyword'","]",")",")","text_b","=","convert_to_unicode","(","line","[","'abst'","]",")","label","=","convert_to_unicode","(","line","[","'label'","]",")","if","set_type","!=","'test'","else","'0'","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L427-L437"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"WSCProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L782-L785"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"WSCProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L787-L790"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"WSCProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L792-L795"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"WSCProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return [\"true\", \"false\"]","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"true\", \"false\"]","function_tokens":["def","get_labels","(","self",")",":","return","[","\"true\"",",","\"false\"","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L797-L799"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"WSCProcessor._create_examples","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = convert_to_unicode(line['text'])\n text_a_list = list(text_a)\n target = line['target']\n query = target['span1_text']\n query_idx = target['span1_index']\n pronoun = target['span2_text']\n pronoun_idx = target['span2_index']\n\n assert text_a[pronoun_idx: (pronoun_idx + len(pronoun))\n ] == pronoun, \"pronoun: {}\".format(pronoun)\n assert text_a[query_idx: (query_idx + len(query))] == query, \"query: {}\".format(query)\n\n if pronoun_idx > query_idx:\n text_a_list.insert(query_idx, \"_\")\n text_a_list.insert(query_idx + len(query) + 1, \"_\")\n text_a_list.insert(pronoun_idx + 2, \"[\")\n text_a_list.insert(pronoun_idx + len(pronoun) + 2 + 1, \"]\")\n else:\n text_a_list.insert(pronoun_idx, \"[\")\n text_a_list.insert(pronoun_idx + len(pronoun) + 1, \"]\")\n text_a_list.insert(query_idx + 2, \"_\")\n text_a_list.insert(query_idx + len(query) + 2 + 1, \"_\")\n\n text_a = \"\".join(text_a_list)\n\n if set_type == \"test\":\n label = \"true\"\n else:\n label = line['label']\n\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples","function_tokens":["def","_create_examples","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","text_a","=","convert_to_unicode","(","line","[","'text'","]",")","text_a_list","=","list","(","text_a",")","target","=","line","[","'target'","]","query","=","target","[","'span1_text'","]","query_idx","=","target","[","'span1_index'","]","pronoun","=","target","[","'span2_text'","]","pronoun_idx","=","target","[","'span2_index'","]","assert","text_a","[","pronoun_idx",":","(","pronoun_idx","+","len","(","pronoun",")",")","]","==","pronoun",",","\"pronoun: {}\"",".","format","(","pronoun",")","assert","text_a","[","query_idx",":","(","query_idx","+","len","(","query",")",")","]","==","query",",","\"query: {}\"",".","format","(","query",")","if","pronoun_idx",">","query_idx",":","text_a_list",".","insert","(","query_idx",",","\"_\"",")","text_a_list",".","insert","(","query_idx","+","len","(","query",")","+","1",",","\"_\"",")","text_a_list",".","insert","(","pronoun_idx","+","2",",","\"[\"",")","text_a_list",".","insert","(","pronoun_idx","+","len","(","pronoun",")","+","2","+","1",",","\"]\"",")","else",":","text_a_list",".","insert","(","pronoun_idx",",","\"[\"",")","text_a_list",".","insert","(","pronoun_idx","+","len","(","pronoun",")","+","1",",","\"]\"",")","text_a_list",".","insert","(","query_idx","+","2",",","\"_\"",")","text_a_list",".","insert","(","query_idx","+","len","(","query",")","+","2","+","1",",","\"_\"",")","text_a","=","\"\"",".","join","(","text_a_list",")","if","set_type","==","\"test\"",":","label","=","\"true\"","else",":","label","=","line","[","'label'","]","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","None",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L801-L838"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"COPAProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L847-L850"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"COPAProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L853-L856"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"COPAProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L858-L861"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/classifier_utils.py","language":"python","identifier":"COPAProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return [\"0\", \"1\"]","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]","function_tokens":["def","get_labels","(","self",")",":","return","[","\"0\"",",","\"1\"","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/classifier_utils.py#L863-L865"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_pretraining.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n next_sentence_labels = features[\"next_sentence_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), model.get_embedding_table(),\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n (next_sentence_loss, next_sentence_example_loss, # TODO TODO TODO \u53ef\u4ee5\u8ba1\u7b97\u5355\u4e0d\u7b97\u6210\u7ee9\n next_sentence_log_probs) = get_next_sentence_output(\n bert_config, model.get_pooled_output(), next_sentence_labels)\n # batch_size=masked_lm_log_probs.shape[0]\n # next_sentence_example_loss=tf.zeros((batch_size)) #tf.constant(0.0,dtype=tf.float32)\n # next_sentence_log_probs=tf.zeros((batch_size,2))\n total_loss = masked_lm_loss # TODO remove next sentence loss 2019-08-08, + next_sentence_loss\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n print(\"init_checkpoint:\",init_checkpoint)\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,[-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n next_sentence_log_probs = tf.reshape(\n next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])\n next_sentence_predictions = tf.argmax(\n next_sentence_log_probs, axis=-1, output_type=tf.int32)\n next_sentence_labels = tf.reshape(next_sentence_labels, [-1])\n next_sentence_accuracy = tf.metrics.accuracy(\n labels=next_sentence_labels, predictions=next_sentence_predictions)\n next_sentence_mean_loss = tf.metrics.mean(\n values=next_sentence_example_loss)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n \"next_sentence_accuracy\": next_sentence_accuracy,\n \"next_sentence_loss\": next_sentence_mean_loss,\n }\n\n # next_sentence_example_loss=0.0 TODO\n # next_sentence_log_probs=0.0 # TODO\n eval_metrics = (metric_fn, [\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels\n ])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\"Only TRAIN and EVAL modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","masked_lm_positions","=","features","[","\"masked_lm_positions\"","]","masked_lm_ids","=","features","[","\"masked_lm_ids\"","]","masked_lm_weights","=","features","[","\"masked_lm_weights\"","]","next_sentence_labels","=","features","[","\"next_sentence_labels\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","(","masked_lm_loss",",","masked_lm_example_loss",",","masked_lm_log_probs",")","=","get_masked_lm_output","(","bert_config",",","model",".","get_sequence_output","(",")",",","model",".","get_embedding_table","(",")",",","masked_lm_positions",",","masked_lm_ids",",","masked_lm_weights",")","(","next_sentence_loss",",","next_sentence_example_loss",",","# TODO TODO TODO \u53ef\u4ee5\u8ba1\u7b97\u5355\u4e0d\u7b97\u6210\u7ee9","next_sentence_log_probs",")","=","get_next_sentence_output","(","bert_config",",","model",".","get_pooled_output","(",")",",","next_sentence_labels",")","# batch_size=masked_lm_log_probs.shape[0]","# next_sentence_example_loss=tf.zeros((batch_size)) #tf.constant(0.0,dtype=tf.float32)","# next_sentence_log_probs=tf.zeros((batch_size,2))","total_loss","=","masked_lm_loss","# TODO remove next sentence loss 2019-08-08, + next_sentence_loss","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","print","(","\"init_checkpoint:\"",",","init_checkpoint",")","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels",")",":","\"\"\"Computes the loss and accuracy of the model.\"\"\"","masked_lm_log_probs","=","tf",".","reshape","(","masked_lm_log_probs",",","[","-","1",",","masked_lm_log_probs",".","shape","[","-","1","]","]",")","masked_lm_predictions","=","tf",".","argmax","(","masked_lm_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","masked_lm_example_loss","=","tf",".","reshape","(","masked_lm_example_loss",",","[","-","1","]",")","masked_lm_ids","=","tf",".","reshape","(","masked_lm_ids",",","[","-","1","]",")","masked_lm_weights","=","tf",".","reshape","(","masked_lm_weights",",","[","-","1","]",")","masked_lm_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","masked_lm_ids",",","predictions","=","masked_lm_predictions",",","weights","=","masked_lm_weights",")","masked_lm_mean_loss","=","tf",".","metrics",".","mean","(","values","=","masked_lm_example_loss",",","weights","=","masked_lm_weights",")","next_sentence_log_probs","=","tf",".","reshape","(","next_sentence_log_probs",",","[","-","1",",","next_sentence_log_probs",".","shape","[","-","1","]","]",")","next_sentence_predictions","=","tf",".","argmax","(","next_sentence_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","next_sentence_labels","=","tf",".","reshape","(","next_sentence_labels",",","[","-","1","]",")","next_sentence_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","next_sentence_labels",",","predictions","=","next_sentence_predictions",")","next_sentence_mean_loss","=","tf",".","metrics",".","mean","(","values","=","next_sentence_example_loss",")","return","{","\"masked_lm_accuracy\"",":","masked_lm_accuracy",",","\"masked_lm_loss\"",":","masked_lm_mean_loss",",","\"next_sentence_accuracy\"",":","next_sentence_accuracy",",","\"next_sentence_loss\"",":","next_sentence_mean_loss",",","}","# next_sentence_example_loss=0.0 TODO","# next_sentence_log_probs=0.0 # TODO","eval_metrics","=","(","metric_fn",",","[","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","raise","ValueError","(","\"Only TRAIN and EVAL modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_pretraining.py#L109-L240"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_pretraining.py","language":"python","identifier":"get_masked_lm_output","parameters":"(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights)","argument_list":"","return_statement":"return (loss, per_example_loss, log_probs)","docstring":"Get loss and log probs for the masked LM.","docstring_summary":"Get loss and log probs for the masked LM.","docstring_tokens":["Get","loss","and","log","probs","for","the","masked","LM","."],"function":"def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights):\n \"\"\"Get loss and log probs for the masked LM.\"\"\"\n input_tensor = gather_indexes(input_tensor, positions)\n\n with tf.variable_scope(\"cls\/predictions\"):\n # We apply one more non-linear transformation before the output layer.\n # This matrix is not used after pre-training.\n with tf.variable_scope(\"transform\"):\n input_tensor = tf.layers.dense(\n input_tensor,\n units=bert_config.hidden_size,\n activation=modeling.get_activation(bert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n bert_config.initializer_range))\n input_tensor = modeling.layer_norm(input_tensor)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n output_bias = tf.get_variable(\n \"output_bias\",\n shape=[bert_config.vocab_size],\n initializer=tf.zeros_initializer())\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n label_ids = tf.reshape(label_ids, [-1])\n label_weights = tf.reshape(label_weights, [-1])\n\n one_hot_labels = tf.one_hot(label_ids, depth=bert_config.vocab_size, dtype=tf.float32)\n\n # The `positions` tensor might be zero-padded (if the sequence is too\n # short to have the maximum number of predictions). The `label_weights`\n # tensor has a value of 1.0 for every real prediction and 0.0 for the\n # padding predictions.\n per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])\n numerator = tf.reduce_sum(label_weights * per_example_loss)\n denominator = tf.reduce_sum(label_weights) + 1e-5\n loss = numerator \/ denominator\n\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_masked_lm_output","(","bert_config",",","input_tensor",",","output_weights",",","positions",",","label_ids",",","label_weights",")",":","input_tensor","=","gather_indexes","(","input_tensor",",","positions",")","with","tf",".","variable_scope","(","\"cls\/predictions\"",")",":","# We apply one more non-linear transformation before the output layer.","# This matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"transform\"",")",":","input_tensor","=","tf",".","layers",".","dense","(","input_tensor",",","units","=","bert_config",".","hidden_size",",","activation","=","modeling",".","get_activation","(","bert_config",".","hidden_act",")",",","kernel_initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","input_tensor","=","modeling",".","layer_norm","(","input_tensor",")","# The output weights are the same as the input embeddings, but there is","# an output-only bias for each token.","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","bert_config",".","vocab_size","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","matmul","(","input_tensor",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","label_ids","=","tf",".","reshape","(","label_ids",",","[","-","1","]",")","label_weights","=","tf",".","reshape","(","label_weights",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","label_ids",",","depth","=","bert_config",".","vocab_size",",","dtype","=","tf",".","float32",")","# The `positions` tensor might be zero-padded (if the sequence is too","# short to have the maximum number of predictions). The `label_weights`","# tensor has a value of 1.0 for every real prediction and 0.0 for the","# padding predictions.","per_example_loss","=","-","tf",".","reduce_sum","(","log_probs","*","one_hot_labels",",","axis","=","[","-","1","]",")","numerator","=","tf",".","reduce_sum","(","label_weights","*","per_example_loss",")","denominator","=","tf",".","reduce_sum","(","label_weights",")","+","1e-5","loss","=","numerator","\/","denominator","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_pretraining.py#L243-L284"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_pretraining.py","language":"python","identifier":"get_next_sentence_output","parameters":"(bert_config, input_tensor, labels)","argument_list":"","return_statement":"","docstring":"Get loss and log probs for the next sentence prediction.","docstring_summary":"Get loss and log probs for the next sentence prediction.","docstring_tokens":["Get","loss","and","log","probs","for","the","next","sentence","prediction","."],"function":"def get_next_sentence_output(bert_config, input_tensor, labels):\n \"\"\"Get loss and log probs for the next sentence prediction.\"\"\"\n\n # Simple binary classification. Note that 0 is \"next sentence\" and 1 is\n # \"random sentence\". This weight matrix is not used after pre-training.\n with tf.variable_scope(\"cls\/seq_relationship\"):\n output_weights = tf.get_variable(\n \"output_weights\",\n shape=[2, bert_config.hidden_size],\n initializer=modeling.create_initializer(bert_config.initializer_range))\n output_bias = tf.get_variable(\n \"output_bias\", shape=[2], initializer=tf.zeros_initializer())\n\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n labels = tf.reshape(labels, [-1])\n one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_next_sentence_output","(","bert_config",",","input_tensor",",","labels",")",":","# Simple binary classification. Note that 0 is \"next sentence\" and 1 is","# \"random sentence\". This weight matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"cls\/seq_relationship\"",")",":","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","shape","=","[","2",",","bert_config",".","hidden_size","]",",","initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","2","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","matmul","(","input_tensor",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","labels","=","tf",".","reshape","(","labels",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","2",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_pretraining.py#L287-L307"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_pretraining.py","language":"python","identifier":"gather_indexes","parameters":"(sequence_tensor, positions)","argument_list":"","return_statement":"return output_tensor","docstring":"Gathers the vectors at the specific positions over a minibatch.","docstring_summary":"Gathers the vectors at the specific positions over a minibatch.","docstring_tokens":["Gathers","the","vectors","at","the","specific","positions","over","a","minibatch","."],"function":"def gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor","function_tokens":["def","gather_indexes","(","sequence_tensor",",","positions",")",":","sequence_shape","=","modeling",".","get_shape_list","(","sequence_tensor",",","expected_rank","=","3",")","batch_size","=","sequence_shape","[","0","]","seq_length","=","sequence_shape","[","1","]","width","=","sequence_shape","[","2","]","flat_offsets","=","tf",".","reshape","(","tf",".","range","(","0",",","batch_size",",","dtype","=","tf",".","int32",")","*","seq_length",",","[","-","1",",","1","]",")","flat_positions","=","tf",".","reshape","(","positions","+","flat_offsets",",","[","-","1","]",")","flat_sequence_tensor","=","tf",".","reshape","(","sequence_tensor",",","[","batch_size","*","seq_length",",","width","]",")","output_tensor","=","tf",".","gather","(","flat_sequence_tensor",",","flat_positions",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_pretraining.py#L310-L323"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_pretraining.py","language":"python","identifier":"input_fn_builder","parameters":"(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n name_to_features = {\n \"input_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"input_mask\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"segment_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"masked_lm_positions\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_ids\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_weights\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.float32),\n \"next_sentence_labels\":\n tf.FixedLenFeature([1], tf.int64),\n }\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n if is_training:\n d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))\n d = d.repeat()\n d = d.shuffle(buffer_size=len(input_files))\n\n # `cycle_length` is the number of parallel files that get read.\n cycle_length = min(num_cpu_threads, len(input_files))\n\n # `sloppy` mode means that the interleaving is not exact. This adds\n # even more randomness to the training pipeline.\n d = d.apply(\n tf.contrib.data.parallel_interleave(\n tf.data.TFRecordDataset,\n sloppy=is_training,\n cycle_length=cycle_length))\n d = d.shuffle(buffer_size=100)\n else:\n d = tf.data.TFRecordDataset(input_files)\n # Since we evaluate for a fixed number of steps we don't want to encounter\n # out-of-range exceptions.\n d = d.repeat()\n\n # We must `drop_remainder` on training because the TPU requires fixed\n # size dimensions. For eval, we assume we are evaluating on the CPU or GPU\n # and we *don't* want to drop the remainder, otherwise we wont cover\n # every sample.\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n num_parallel_batches=num_cpu_threads,\n drop_remainder=True))\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","input_files",",","max_seq_length",",","max_predictions_per_seq",",","is_training",",","num_cpu_threads","=","4",")",":","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"masked_lm_positions\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_ids\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_weights\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","float32",")",",","\"next_sentence_labels\"",":","tf",".","FixedLenFeature","(","[","1","]",",","tf",".","int64",")",",","}","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","if","is_training",":","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","tf",".","constant","(","input_files",")",")","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","len","(","input_files",")",")","# `cycle_length` is the number of parallel files that get read.","cycle_length","=","min","(","num_cpu_threads",",","len","(","input_files",")",")","# `sloppy` mode means that the interleaving is not exact. This adds","# even more randomness to the training pipeline.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","parallel_interleave","(","tf",".","data",".","TFRecordDataset",",","sloppy","=","is_training",",","cycle_length","=","cycle_length",")",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","else",":","d","=","tf",".","data",".","TFRecordDataset","(","input_files",")","# Since we evaluate for a fixed number of steps we don't want to encounter","# out-of-range exceptions.","d","=","d",".","repeat","(",")","# We must `drop_remainder` on training because the TPU requires fixed","# size dimensions. For eval, we assume we are evaluating on the CPU or GPU","# and we *don't* want to drop the remainder, otherwise we wont cover","# every sample.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","num_parallel_batches","=","num_cpu_threads",",","drop_remainder","=","True",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_pretraining.py#L326-L390"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_pretraining.py","language":"python","identifier":"_decode_record","parameters":"(record, name_to_features)","argument_list":"","return_statement":"return example","docstring":"Decodes a record to a TensorFlow example.","docstring_summary":"Decodes a record to a TensorFlow example.","docstring_tokens":["Decodes","a","record","to","a","TensorFlow","example","."],"function":"def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example","function_tokens":["def","_decode_record","(","record",",","name_to_features",")",":","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_pretraining.py#L393-L405"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"validate_case_matches_checkpoint","parameters":"(do_lower_case, init_checkpoint)","argument_list":"","return_statement":"","docstring":"Checks whether the casing config is consistent with the checkpoint name.","docstring_summary":"Checks whether the casing config is consistent with the checkpoint name.","docstring_tokens":["Checks","whether","the","casing","config","is","consistent","with","the","checkpoint","name","."],"function":"def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):\n \"\"\"Checks whether the casing config is consistent with the checkpoint name.\"\"\"\n\n # The casing has to be passed in by the user and there is no explicit check\n # as to whether it matches the checkpoint. The casing information probably\n # should have been stored in the bert_config.json file, but it's not, so\n # we have to heuristically detect it to validate.\n\n if not init_checkpoint:\n return\n\n m = re.match(\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\", init_checkpoint)\n if m is None:\n return\n\n model_name = m.group(1)\n\n lower_models = [\n \"uncased_L-24_H-1024_A-16\", \"uncased_L-12_H-768_A-12\",\n \"multilingual_L-12_H-768_A-12\", \"chinese_L-12_H-768_A-12\"\n ]\n\n cased_models = [\n \"cased_L-12_H-768_A-12\", \"cased_L-24_H-1024_A-16\",\n \"multi_cased_L-12_H-768_A-12\"\n ]\n\n is_bad_config = False\n if model_name in lower_models and not do_lower_case:\n is_bad_config = True\n actual_flag = \"False\"\n case_name = \"lowercased\"\n opposite_flag = \"True\"\n\n if model_name in cased_models and do_lower_case:\n is_bad_config = True\n actual_flag = \"True\"\n case_name = \"cased\"\n opposite_flag = \"False\"\n\n if is_bad_config:\n raise ValueError(\n \"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"\n \"However, `%s` seems to be a %s model, so you \"\n \"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"\n \"how the model was pre-training. If this error is wrong, please \"\n \"just comment out this check.\" % (actual_flag, init_checkpoint,\n model_name, case_name, opposite_flag))","function_tokens":["def","validate_case_matches_checkpoint","(","do_lower_case",",","init_checkpoint",")",":","# The casing has to be passed in by the user and there is no explicit check","# as to whether it matches the checkpoint. The casing information probably","# should have been stored in the bert_config.json file, but it's not, so","# we have to heuristically detect it to validate.","if","not","init_checkpoint",":","return","m","=","re",".","match","(","\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\"",",","init_checkpoint",")","if","m","is","None",":","return","model_name","=","m",".","group","(","1",")","lower_models","=","[","\"uncased_L-24_H-1024_A-16\"",",","\"uncased_L-12_H-768_A-12\"",",","\"multilingual_L-12_H-768_A-12\"",",","\"chinese_L-12_H-768_A-12\"","]","cased_models","=","[","\"cased_L-12_H-768_A-12\"",",","\"cased_L-24_H-1024_A-16\"",",","\"multi_cased_L-12_H-768_A-12\"","]","is_bad_config","=","False","if","model_name","in","lower_models","and","not","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"False\"","case_name","=","\"lowercased\"","opposite_flag","=","\"True\"","if","model_name","in","cased_models","and","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"True\"","case_name","=","\"cased\"","opposite_flag","=","\"False\"","if","is_bad_config",":","raise","ValueError","(","\"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"","\"However, `%s` seems to be a %s model, so you \"","\"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"","\"how the model was pre-training. If this error is wrong, please \"","\"just comment out this check.\"","%","(","actual_flag",",","init_checkpoint",",","model_name",",","case_name",",","opposite_flag",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L28-L75"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"convert_to_unicode","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_summary":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_tokens":["Converts","text","to","Unicode","(","if","it","s","not","already",")","assuming","utf","-","8","input","."],"function":"def convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","convert_to_unicode","(","text",")",":","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","elif","isinstance","(","text",",","unicode",")",":","return","text","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L78-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"printable_text","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_summary":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_tokens":["Returns","text","encoded","in","a","way","suitable","for","print","or","tf",".","logging","."],"function":"def printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","printable_text","(","text",")",":","# These functions want `str` for both Python2 and Python3, but in one case","# it's a Unicode string and in the other it's a byte string.","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","unicode",")",":","return","text",".","encode","(","\"utf-8\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L98-L118"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"load_vocab","parameters":"(vocab_file)","argument_list":"","return_statement":"return vocab","docstring":"Loads a vocabulary file into a dictionary.","docstring_summary":"Loads a vocabulary file into a dictionary.","docstring_tokens":["Loads","a","vocabulary","file","into","a","dictionary","."],"function":"def load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab","function_tokens":["def","load_vocab","(","vocab_file",")",":","vocab","=","collections",".","OrderedDict","(",")","index","=","0","with","tf",".","gfile",".","GFile","(","vocab_file",",","\"r\"",")","as","reader",":","while","True",":","token","=","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","token",":","break","token","=","token",".","strip","(",")","vocab","[","token","]","=","index","index","+=","1","return","vocab"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L121-L133"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"convert_by_vocab","parameters":"(vocab, items)","argument_list":"","return_statement":"return output","docstring":"Converts a sequence of [tokens|ids] using the vocab.","docstring_summary":"Converts a sequence of [tokens|ids] using the vocab.","docstring_tokens":["Converts","a","sequence","of","[","tokens|ids","]","using","the","vocab","."],"function":"def convert_by_vocab(vocab, items):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n output = []\n #print(\"items:\",items) #['[CLS]', '\u65e5', '##\u671f', '\uff0c', '\u4f46', '\u88ab', '##\u544a', '\u91d1', '##\u4e1c', '##\u798f', '\u8f7d', '##\u660e', '[MASK]', 'U', '##N', '##K', ']', '\u4fdd', '##\u8bc1', '\u672c', '##\u6708', '1', '##4', '[MASK]', '\u5230', '##\u4f4d', '\uff0c', '2', '##0', '##1', '##5', '\u5e74', '6', '[MASK]', '1', '##1', '\u65e5', '[', 'U', '##N', '##K', ']', '\uff0c', '\u539f', '##\u544a', '[MASK]', '\u8ba4', '##\u53ef', '\u4e8e', '2', '##0', '##1', '##5', '[MASK]', '6', '\u6708', '[MASK]', '[MASK]', '\u65e5', '##\u5411', '\u88ab', '##\u544a', '\u4e3b', '##\u5f20', '\u6743', '##\u5229', '\u3002', '\u800c', '[MASK]', '[MASK]', '\u81ea', '[MASK]', '[MASK]', '[MASK]', '[MASK]', '\u5e74', '6', '\u6708', '1', '##1', '\u65e5', '[SEP]', '\u539f', '##\u544a', '\u4e8e', '2', '##0', '##1', '##6', '[MASK]', '6', '[MASK]', '2', '##4', '\u65e5', '\u8d77', '##\u8bc9', '\uff0c', '\u4e3b', '##\u5f20', '\u4fdd', '##\u8bc1', '\u8d23', '##\u4efb', '\uff0c', '\u5df2', '\u8d85', '##\u8fc7', '\u4fdd', '##\u8bc1', '\u671f', '##\u9650', '[MASK]', '\u4fdd', '##\u8bc1', '\u4eba', '\u4f9d', '##\u6cd5', '\u4e0d', '##\u518d', '\u627f', '##\u62c5', '\u4fdd', '##\u8bc1', '[MASK]', '[MASK]', '[MASK]', '[SEP]']\n for i,item in enumerate(items):\n #print(i,\"item:\",item) # ##\u671f\n output.append(vocab[item])\n return output","function_tokens":["def","convert_by_vocab","(","vocab",",","items",")",":","output","=","[","]","#print(\"items:\",items) #['[CLS]', '\u65e5', '##\u671f', '\uff0c', '\u4f46', '\u88ab', '##\u544a', '\u91d1', '##\u4e1c', '##\u798f', '\u8f7d', '##\u660e', '[MASK]', 'U', '##N', '##K', ']', '\u4fdd', '##\u8bc1', '\u672c', '##\u6708', '1', '##4', '[MASK]', '\u5230', '##\u4f4d', '\uff0c', '2', '##0', '##1', '##5', '\u5e74', '6', '[MASK]', '1', '##1', '\u65e5', '[', 'U', '##N', '##K', ']', '\uff0c', '\u539f', '##\u544a', '[MASK]', '\u8ba4', '##\u53ef', '\u4e8e', '2', '##0', '##1', '##5', '[MASK]', '6', '\u6708', '[MASK]', '[MASK]', '\u65e5', '##\u5411', '\u88ab', '##\u544a', '\u4e3b', '##\u5f20', '\u6743', '##\u5229', '\u3002', '\u800c', '[MASK]', '[MASK]', '\u81ea', '[MASK]', '[MASK]', '[MASK]', '[MASK]', '\u5e74', '6', '\u6708', '1', '##1', '\u65e5', '[SEP]', '\u539f', '##\u544a', '\u4e8e', '2', '##0', '##1', '##6', '[MASK]', '6', '[MASK]', '2', '##4', '\u65e5', '\u8d77', '##\u8bc9', '\uff0c', '\u4e3b', '##\u5f20', '\u4fdd', '##\u8bc1', '\u8d23', '##\u4efb', '\uff0c', '\u5df2', '\u8d85', '##\u8fc7', '\u4fdd', '##\u8bc1', '\u671f', '##\u9650', '[MASK]', '\u4fdd', '##\u8bc1', '\u4eba', '\u4f9d', '##\u6cd5', '\u4e0d', '##\u518d', '\u627f', '##\u62c5', '\u4fdd', '##\u8bc1', '[MASK]', '[MASK]', '[MASK]', '[SEP]']","for","i",",","item","in","enumerate","(","items",")",":","#print(i,\"item:\",item) # ##\u671f","output",".","append","(","vocab","[","item","]",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L136-L143"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"whitespace_tokenize","parameters":"(text)","argument_list":"","return_statement":"return tokens","docstring":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_summary":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_tokens":["Runs","basic","whitespace","cleaning","and","splitting","on","a","piece","of","text","."],"function":"def whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens","function_tokens":["def","whitespace_tokenize","(","text",")",":","text","=","text",".","strip","(",")","if","not","text",":","return","[","]","tokens","=","text",".","split","(",")","return","tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L154-L160"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"_is_whitespace","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a whitespace character.","docstring_summary":"Checks whether `chars` is a whitespace character.","docstring_tokens":["Checks","whether","chars","is","a","whitespace","character","."],"function":"def _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False","function_tokens":["def","_is_whitespace","(","char",")",":","# \\t, \\n, and \\r are technically contorl characters but we treat them","# as whitespace since they are generally considered as such.","if","char","==","\" \"","or","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Zs\"",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L364-L373"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"_is_control","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a control character.","docstring_summary":"Checks whether `chars` is a control character.","docstring_tokens":["Checks","whether","chars","is","a","control","character","."],"function":"def _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False","function_tokens":["def","_is_control","(","char",")",":","# These are technically control characters but we count them as whitespace","# characters.","if","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","False","cat","=","unicodedata",".","category","(","char",")","if","cat","in","(","\"Cc\"",",","\"Cf\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L376-L385"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"_is_punctuation","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a punctuation character.","docstring_summary":"Checks whether `chars` is a punctuation character.","docstring_tokens":["Checks","whether","chars","is","a","punctuation","character","."],"function":"def _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter\/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False","function_tokens":["def","_is_punctuation","(","char",")",":","cp","=","ord","(","char",")","# We treat all non-letter\/number ASCII as punctuation.","# Characters such as \"^\", \"$\", and \"`\" are not in the Unicode","# Punctuation class but we treat them as punctuation anyways, for","# consistency.","if","(","(","cp",">=","33","and","cp","<=","47",")","or","(","cp",">=","58","and","cp","<=","64",")","or","(","cp",">=","91","and","cp","<=","96",")","or","(","cp",">=","123","and","cp","<=","126",")",")",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat",".","startswith","(","\"P\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L388-L401"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"BasicTokenizer.__init__","parameters":"(self, do_lower_case=True)","argument_list":"","return_statement":"","docstring":"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.","docstring_summary":"Constructs a BasicTokenizer.","docstring_tokens":["Constructs","a","BasicTokenizer","."],"function":"def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case","function_tokens":["def","__init__","(","self",",","do_lower_case","=","True",")",":","self",".","do_lower_case","=","do_lower_case"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L190-L196"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"BasicTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text.","docstring_summary":"Tokenizes a piece of text.","docstring_tokens":["Tokenizes","a","piece","of","text","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","text","=","self",".","_clean_text","(","text",")","# This was added on November 1st, 2018 for the multilingual and Chinese","# models. This is also applied to the English models now, but it doesn't","# matter since the English models were not trained on any Chinese data","# and generally don't have any Chinese data in them (there are Chinese","# characters in the vocabulary because Wikipedia does have some Chinese","# words in the English Wikipedia.).","text","=","self",".","_tokenize_chinese_chars","(","text",")","orig_tokens","=","whitespace_tokenize","(","text",")","split_tokens","=","[","]","for","token","in","orig_tokens",":","if","self",".","do_lower_case",":","token","=","token",".","lower","(",")","token","=","self",".","_run_strip_accents","(","token",")","split_tokens",".","extend","(","self",".","_run_split_on_punc","(","token",")",")","output_tokens","=","whitespace_tokenize","(","\" \"",".","join","(","split_tokens",")",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L198-L220"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_strip_accents","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Strips accents from a piece of text.","docstring_summary":"Strips accents from a piece of text.","docstring_tokens":["Strips","accents","from","a","piece","of","text","."],"function":"def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_run_strip_accents","(","self",",","text",")",":","text","=","unicodedata",".","normalize","(","\"NFD\"",",","text",")","output","=","[","]","for","char","in","text",":","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Mn\"",":","continue","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L222-L231"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_split_on_punc","parameters":"(self, text)","argument_list":"","return_statement":"return [\"\".join(x) for x in output]","docstring":"Splits punctuation on a piece of text.","docstring_summary":"Splits punctuation on a piece of text.","docstring_tokens":["Splits","punctuation","on","a","piece","of","text","."],"function":"def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]","function_tokens":["def","_run_split_on_punc","(","self",",","text",")",":","chars","=","list","(","text",")","i","=","0","start_new_word","=","True","output","=","[","]","while","i","<","len","(","chars",")",":","char","=","chars","[","i","]","if","_is_punctuation","(","char",")",":","output",".","append","(","[","char","]",")","start_new_word","=","True","else",":","if","start_new_word",":","output",".","append","(","[","]",")","start_new_word","=","False","output","[","-","1","]",".","append","(","char",")","i","+=","1","return","[","\"\"",".","join","(","x",")","for","x","in","output","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L233-L251"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"BasicTokenizer._tokenize_chinese_chars","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Adds whitespace around any CJK character.","docstring_summary":"Adds whitespace around any CJK character.","docstring_tokens":["Adds","whitespace","around","any","CJK","character","."],"function":"def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_tokenize_chinese_chars","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","self",".","_is_chinese_char","(","cp",")",":","output",".","append","(","\" \"",")","output",".","append","(","char",")","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L253-L264"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"BasicTokenizer._is_chinese_char","parameters":"(self, cp)","argument_list":"","return_statement":"return False","docstring":"Checks whether CP is the codepoint of a CJK character.","docstring_summary":"Checks whether CP is the codepoint of a CJK character.","docstring_tokens":["Checks","whether","CP","is","the","codepoint","of","a","CJK","character","."],"function":"def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False","function_tokens":["def","_is_chinese_char","(","self",",","cp",")",":","# This defines a \"chinese character\" as anything in the CJK Unicode block:","# https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)","#","# Note that the CJK Unicode block is NOT all Japanese and Korean characters,","# despite its name. The modern Korean Hangul alphabet is a different block,","# as is Japanese Hiragana and Katakana. Those alphabets are used to write","# space-separated words, so they are not treated specially and handled","# like the all of the other languages.","if","(","(","cp",">=","0x4E00","and","cp","<=","0x9FFF",")","or","#","(","cp",">=","0x3400","and","cp","<=","0x4DBF",")","or","#","(","cp",">=","0x20000","and","cp","<=","0x2A6DF",")","or","#","(","cp",">=","0x2A700","and","cp","<=","0x2B73F",")","or","#","(","cp",">=","0x2B740","and","cp","<=","0x2B81F",")","or","#","(","cp",">=","0x2B820","and","cp","<=","0x2CEAF",")","or","(","cp",">=","0xF900","and","cp","<=","0xFAFF",")","or","#","(","cp",">=","0x2F800","and","cp","<=","0x2FA1F",")",")",":","#","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L266-L286"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"BasicTokenizer._clean_text","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Performs invalid character removal and whitespace cleanup on text.","docstring_summary":"Performs invalid character removal and whitespace cleanup on text.","docstring_tokens":["Performs","invalid","character","removal","and","whitespace","cleanup","on","text","."],"function":"def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_clean_text","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","cp","==","0","or","cp","==","0xfffd","or","_is_control","(","char",")",":","continue","if","_is_whitespace","(","char",")",":","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L288-L299"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tokenization.py","language":"python","identifier":"WordpieceTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.","docstring_summary":"Tokenizes a piece of text into its word pieces.","docstring_tokens":["Tokenizes","a","piece","of","text","into","its","word","pieces","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","output_tokens","=","[","]","for","token","in","whitespace_tokenize","(","text",")",":","chars","=","list","(","token",")","if","len","(","chars",")",">","self",".","max_input_chars_per_word",":","output_tokens",".","append","(","self",".","unk_token",")","continue","is_bad","=","False","start","=","0","sub_tokens","=","[","]","while","start","<","len","(","chars",")",":","end","=","len","(","chars",")","cur_substr","=","None","while","start","<","end",":","substr","=","\"\"",".","join","(","chars","[","start",":","end","]",")","if","start",">","0",":","substr","=","\"##\"","+","substr","if","substr","in","self",".","vocab",":","cur_substr","=","substr","break","end","-=","1","if","cur_substr","is","None",":","is_bad","=","True","break","sub_tokens",".","append","(","cur_substr",")","start","=","end","if","is_bad",":","output_tokens",".","append","(","self",".","unk_token",")","else",":","output_tokens",".","extend","(","sub_tokens",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tokenization.py#L310-L361"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tf_metrics.py","language":"python","identifier":"precision","parameters":"(labels, predictions, num_classes, pos_indices=None,\n weights=None, average='micro')","argument_list":"","return_statement":"return (pr, op)","docstring":"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","precision","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def precision(labels, predictions, num_classes, pos_indices=None,\n weights=None, average='micro'):\n \"\"\"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n pr, _, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n op, _, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (pr, op)","function_tokens":["def","precision","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","pr",",","_",",","_","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",")","op",",","_",",","_","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",")","return","(","pr",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tf_metrics.py#L15-L50"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tf_metrics.py","language":"python","identifier":"recall","parameters":"(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro')","argument_list":"","return_statement":"return (re, op)","docstring":"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","recall","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def recall(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro'):\n \"\"\"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, re, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n _, op, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (re, op)","function_tokens":["def","recall","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","_",",","re",",","_","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",")","_",",","op",",","_","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",")","return","(","re",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tf_metrics.py#L53-L88"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tf_metrics.py","language":"python","identifier":"fbeta","parameters":"(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro', beta=1)","argument_list":"","return_statement":"return (fbeta, op)","docstring":"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","fbeta","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","beta",":","int","optional","Weight","of","precision","in","harmonic","mean","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def fbeta(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro', beta=1):\n \"\"\"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, _, fbeta = metrics_from_confusion_matrix(\n cm, pos_indices, average=average, beta=beta)\n _, _, op = metrics_from_confusion_matrix(\n op, pos_indices, average=average, beta=beta)\n return (fbeta, op)","function_tokens":["def","fbeta","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",",","beta","=","1",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","_",",","_",",","fbeta","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",",","beta","=","beta",")","_",",","_",",","op","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",",","beta","=","beta",")","return","(","fbeta",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tf_metrics.py#L97-L134"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tf_metrics.py","language":"python","identifier":"safe_div","parameters":"(numerator, denominator)","argument_list":"","return_statement":"return tf.where(denominator_is_zero, zeros, numerator \/ denominator)","docstring":"Safe division, return 0 if denominator is 0","docstring_summary":"Safe division, return 0 if denominator is 0","docstring_tokens":["Safe","division","return","0","if","denominator","is","0"],"function":"def safe_div(numerator, denominator):\n \"\"\"Safe division, return 0 if denominator is 0\"\"\"\n numerator, denominator = tf.to_float(numerator), tf.to_float(denominator)\n zeros = tf.zeros_like(numerator, dtype=numerator.dtype)\n denominator_is_zero = tf.equal(denominator, zeros)\n return tf.where(denominator_is_zero, zeros, numerator \/ denominator)","function_tokens":["def","safe_div","(","numerator",",","denominator",")",":","numerator",",","denominator","=","tf",".","to_float","(","numerator",")",",","tf",".","to_float","(","denominator",")","zeros","=","tf",".","zeros_like","(","numerator",",","dtype","=","numerator",".","dtype",")","denominator_is_zero","=","tf",".","equal","(","denominator",",","zeros",")","return","tf",".","where","(","denominator_is_zero",",","zeros",",","numerator","\/","denominator",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tf_metrics.py#L137-L142"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tf_metrics.py","language":"python","identifier":"pr_re_fbeta","parameters":"(cm, pos_indices, beta=1)","argument_list":"","return_statement":"return pr, re, fbeta","docstring":"Uses a confusion matrix to compute precision, recall and fbeta","docstring_summary":"Uses a confusion matrix to compute precision, recall and fbeta","docstring_tokens":["Uses","a","confusion","matrix","to","compute","precision","recall","and","fbeta"],"function":"def pr_re_fbeta(cm, pos_indices, beta=1):\n \"\"\"Uses a confusion matrix to compute precision, recall and fbeta\"\"\"\n num_classes = cm.shape[0]\n neg_indices = [i for i in range(num_classes) if i not in pos_indices]\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, neg_indices] = 0\n diag_sum = tf.reduce_sum(tf.diag_part(cm * cm_mask))\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[:, neg_indices] = 0\n tot_pred = tf.reduce_sum(cm * cm_mask)\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, :] = 0\n tot_gold = tf.reduce_sum(cm * cm_mask)\n\n pr = safe_div(diag_sum, tot_pred)\n re = safe_div(diag_sum, tot_gold)\n fbeta = safe_div((1. + beta**2) * pr * re, beta**2 * pr + re)\n\n return pr, re, fbeta","function_tokens":["def","pr_re_fbeta","(","cm",",","pos_indices",",","beta","=","1",")",":","num_classes","=","cm",".","shape","[","0","]","neg_indices","=","[","i","for","i","in","range","(","num_classes",")","if","i","not","in","pos_indices","]","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[","neg_indices",",","neg_indices","]","=","0","diag_sum","=","tf",".","reduce_sum","(","tf",".","diag_part","(","cm","*","cm_mask",")",")","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[",":",",","neg_indices","]","=","0","tot_pred","=","tf",".","reduce_sum","(","cm","*","cm_mask",")","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[","neg_indices",",",":","]","=","0","tot_gold","=","tf",".","reduce_sum","(","cm","*","cm_mask",")","pr","=","safe_div","(","diag_sum",",","tot_pred",")","re","=","safe_div","(","diag_sum",",","tot_gold",")","fbeta","=","safe_div","(","(","1.","+","beta","**","2",")","*","pr","*","re",",","beta","**","2","*","pr","+","re",")","return","pr",",","re",",","fbeta"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tf_metrics.py#L145-L165"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/tf_metrics.py","language":"python","identifier":"metrics_from_confusion_matrix","parameters":"(cm, pos_indices=None, average='micro',\n beta=1)","argument_list":"","return_statement":"","docstring":"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'","docstring_summary":"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'","docstring_tokens":["Precision","Recall","and","F1","from","the","confusion","matrix","Parameters","----------","cm",":","tf",".","Tensor","of","type","tf",".","int32","of","shape","(","num_classes","num_classes",")","The","streaming","confusion","matrix",".","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","beta",":","int","optional","Weight","of","precision","in","harmonic","mean","average",":","str","optional","micro","macro","or","weighted"],"function":"def metrics_from_confusion_matrix(cm, pos_indices=None, average='micro',\n beta=1):\n \"\"\"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'\n \"\"\"\n num_classes = cm.shape[0]\n if pos_indices is None:\n pos_indices = [i for i in range(num_classes)]\n\n if average == 'micro':\n return pr_re_fbeta(cm, pos_indices, beta)\n elif average in {'macro', 'weighted'}:\n precisions, recalls, fbetas, n_golds = [], [], [], []\n for idx in pos_indices:\n pr, re, fbeta = pr_re_fbeta(cm, [idx], beta)\n precisions.append(pr)\n recalls.append(re)\n fbetas.append(fbeta)\n cm_mask = np.zeros([num_classes, num_classes])\n cm_mask[idx, :] = 1\n n_golds.append(tf.to_float(tf.reduce_sum(cm * cm_mask)))\n\n if average == 'macro':\n pr = tf.reduce_mean(precisions)\n re = tf.reduce_mean(recalls)\n fbeta = tf.reduce_mean(fbetas)\n return pr, re, fbeta\n if average == 'weighted':\n n_gold = tf.reduce_sum(n_golds)\n pr_sum = sum(p * n for p, n in zip(precisions, n_golds))\n pr = safe_div(pr_sum, n_gold)\n re_sum = sum(r * n for r, n in zip(recalls, n_golds))\n re = safe_div(re_sum, n_gold)\n fbeta_sum = sum(f * n for f, n in zip(fbetas, n_golds))\n fbeta = safe_div(fbeta_sum, n_gold)\n return pr, re, fbeta\n\n else:\n raise NotImplementedError()","function_tokens":["def","metrics_from_confusion_matrix","(","cm",",","pos_indices","=","None",",","average","=","'micro'",",","beta","=","1",")",":","num_classes","=","cm",".","shape","[","0","]","if","pos_indices","is","None",":","pos_indices","=","[","i","for","i","in","range","(","num_classes",")","]","if","average","==","'micro'",":","return","pr_re_fbeta","(","cm",",","pos_indices",",","beta",")","elif","average","in","{","'macro'",",","'weighted'","}",":","precisions",",","recalls",",","fbetas",",","n_golds","=","[","]",",","[","]",",","[","]",",","[","]","for","idx","in","pos_indices",":","pr",",","re",",","fbeta","=","pr_re_fbeta","(","cm",",","[","idx","]",",","beta",")","precisions",".","append","(","pr",")","recalls",".","append","(","re",")","fbetas",".","append","(","fbeta",")","cm_mask","=","np",".","zeros","(","[","num_classes",",","num_classes","]",")","cm_mask","[","idx",",",":","]","=","1","n_golds",".","append","(","tf",".","to_float","(","tf",".","reduce_sum","(","cm","*","cm_mask",")",")",")","if","average","==","'macro'",":","pr","=","tf",".","reduce_mean","(","precisions",")","re","=","tf",".","reduce_mean","(","recalls",")","fbeta","=","tf",".","reduce_mean","(","fbetas",")","return","pr",",","re",",","fbeta","if","average","==","'weighted'",":","n_gold","=","tf",".","reduce_sum","(","n_golds",")","pr_sum","=","sum","(","p","*","n","for","p",",","n","in","zip","(","precisions",",","n_golds",")",")","pr","=","safe_div","(","pr_sum",",","n_gold",")","re_sum","=","sum","(","r","*","n","for","r",",","n","in","zip","(","recalls",",","n_golds",")",")","re","=","safe_div","(","re_sum",",","n_gold",")","fbeta_sum","=","sum","(","f","*","n","for","f",",","n","in","zip","(","fbetas",",","n_golds",")",")","fbeta","=","safe_div","(","fbeta_sum",",","n_gold",")","return","pr",",","re",",","fbeta","else",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/tf_metrics.py#L168-L215"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"gelu","parameters":"(x)","argument_list":"","return_statement":"return x * cdf","docstring":"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.","docstring_summary":"Gaussian Error Linear Unit.","docstring_tokens":["Gaussian","Error","Linear","Unit","."],"function":"def gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.tanh(\n (np.sqrt(2 \/ np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf","function_tokens":["def","gelu","(","x",")",":","cdf","=","0.5","*","(","1.0","+","tf",".","tanh","(","(","np",".","sqrt","(","2","\/","np",".","pi",")","*","(","x","+","0.044715","*","tf",".","pow","(","x",",","3",")",")",")",")",")","return","x","*","cdf"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L264-L277"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"get_activation","parameters":"(activation_string)","argument_list":"","return_statement":"","docstring":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.","docstring_summary":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.","docstring_tokens":["Maps","a","string","to","a","Python","function","e",".","g",".","relu","=",">","tf",".","nn",".","relu","."],"function":"def get_activation(activation_string):\n \"\"\"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.\n \"\"\"\n\n # We assume that anything that\"s not a string is already an activation\n # function, so we just return it.\n if not isinstance(activation_string, six.string_types):\n return activation_string\n\n if not activation_string:\n return None\n\n act = activation_string.lower()\n if act == \"linear\":\n return None\n elif act == \"relu\":\n return tf.nn.relu\n elif act == \"gelu\":\n return gelu\n elif act == \"tanh\":\n return tf.tanh\n else:\n raise ValueError(\"Unsupported activation: %s\" % act)","function_tokens":["def","get_activation","(","activation_string",")",":","# We assume that anything that\"s not a string is already an activation","# function, so we just return it.","if","not","isinstance","(","activation_string",",","six",".","string_types",")",":","return","activation_string","if","not","activation_string",":","return","None","act","=","activation_string",".","lower","(",")","if","act","==","\"linear\"",":","return","None","elif","act","==","\"relu\"",":","return","tf",".","nn",".","relu","elif","act","==","\"gelu\"",":","return","gelu","elif","act","==","\"tanh\"",":","return","tf",".","tanh","else",":","raise","ValueError","(","\"Unsupported activation: %s\"","%","act",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L280-L314"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"get_assignment_map_from_checkpoint","parameters":"(tvars, init_checkpoint)","argument_list":"","return_statement":"return (assignment_map, initialized_variable_names)","docstring":"Compute the union of the current variables and checkpoint variables.","docstring_summary":"Compute the union of the current variables and checkpoint variables.","docstring_tokens":["Compute","the","union","of","the","current","variables","and","checkpoint","variables","."],"function":"def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)","function_tokens":["def","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")",":","assignment_map","=","{","}","initialized_variable_names","=","{","}","name_to_variable","=","collections",".","OrderedDict","(",")","for","var","in","tvars",":","name","=","var",".","name","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","name",")","if","m","is","not","None",":","name","=","m",".","group","(","1",")","name_to_variable","[","name","]","=","var","init_vars","=","tf",".","train",".","list_variables","(","init_checkpoint",")","assignment_map","=","collections",".","OrderedDict","(",")","for","x","in","init_vars",":","(","name",",","var",")","=","(","x","[","0","]",",","x","[","1","]",")","if","name","not","in","name_to_variable",":","continue","assignment_map","[","name","]","=","name","initialized_variable_names","[","name","]","=","1","initialized_variable_names","[","name","+","\":0\"","]","=","1","return","(","assignment_map",",","initialized_variable_names",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L317-L341"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"dropout","parameters":"(input_tensor, dropout_prob)","argument_list":"","return_statement":"return output","docstring":"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.","docstring_summary":"Perform dropout.","docstring_tokens":["Perform","dropout","."],"function":"def dropout(input_tensor, dropout_prob):\n \"\"\"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.\n \"\"\"\n if dropout_prob is None or dropout_prob == 0.0:\n return input_tensor\n\n output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)\n return output","function_tokens":["def","dropout","(","input_tensor",",","dropout_prob",")",":","if","dropout_prob","is","None","or","dropout_prob","==","0.0",":","return","input_tensor","output","=","tf",".","nn",".","dropout","(","input_tensor",",","1.0","-","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L344-L359"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"layer_norm","parameters":"(input_tensor, name=None)","argument_list":"","return_statement":"return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","docstring":"Run layer normalization on the last dimension of the tensor.","docstring_summary":"Run layer normalization on the last dimension of the tensor.","docstring_tokens":["Run","layer","normalization","on","the","last","dimension","of","the","tensor","."],"function":"def layer_norm(input_tensor, name=None):\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\n return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","function_tokens":["def","layer_norm","(","input_tensor",",","name","=","None",")",":","return","tf",".","contrib",".","layers",".","layer_norm","(","inputs","=","input_tensor",",","begin_norm_axis","=","-","1",",","begin_params_axis","=","-","1",",","scope","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L362-L365"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"layer_norm_and_dropout","parameters":"(input_tensor, dropout_prob, name=None)","argument_list":"","return_statement":"return output_tensor","docstring":"Runs layer normalization followed by dropout.","docstring_summary":"Runs layer normalization followed by dropout.","docstring_tokens":["Runs","layer","normalization","followed","by","dropout","."],"function":"def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):\n \"\"\"Runs layer normalization followed by dropout.\"\"\"\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob)\n return output_tensor","function_tokens":["def","layer_norm_and_dropout","(","input_tensor",",","dropout_prob",",","name","=","None",")",":","output_tensor","=","layer_norm","(","input_tensor",",","name",")","output_tensor","=","dropout","(","output_tensor",",","dropout_prob",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L368-L372"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"create_initializer","parameters":"(initializer_range=0.02)","argument_list":"","return_statement":"return tf.truncated_normal_initializer(stddev=initializer_range)","docstring":"Creates a `truncated_normal_initializer` with the given range.","docstring_summary":"Creates a `truncated_normal_initializer` with the given range.","docstring_tokens":["Creates","a","truncated_normal_initializer","with","the","given","range","."],"function":"def create_initializer(initializer_range=0.02):\n \"\"\"Creates a `truncated_normal_initializer` with the given range.\"\"\"\n return tf.truncated_normal_initializer(stddev=initializer_range)","function_tokens":["def","create_initializer","(","initializer_range","=","0.02",")",":","return","tf",".","truncated_normal_initializer","(","stddev","=","initializer_range",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L375-L377"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"embedding_lookup","parameters":"(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False)","argument_list":"","return_statement":"return (output, embedding_table)","docstring":"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].","docstring_summary":"Looks up words embeddings for id tensor.","docstring_tokens":["Looks","up","words","embeddings","for","id","tensor","."],"function":"def embedding_lookup(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False):\n \"\"\"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].\n \"\"\"\n # This function assumes that the input is of shape [batch_size, seq_length,\n # num_inputs].\n #\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\n # reshape to [batch_size, seq_length, 1].\n if input_ids.shape.ndims == 2:\n input_ids = tf.expand_dims(input_ids, axis=[-1])\n\n embedding_table = tf.get_variable(\n name=word_embedding_name,\n shape=[vocab_size, embedding_size],\n initializer=create_initializer(initializer_range))\n\n flat_input_ids = tf.reshape(input_ids, [-1])\n if use_one_hot_embeddings:\n one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)\n output = tf.matmul(one_hot_input_ids, embedding_table)\n else:\n output = tf.gather(embedding_table, flat_input_ids)\n\n input_shape = get_shape_list(input_ids)\n\n output = tf.reshape(output,\n input_shape[0:-1] + [input_shape[-1] * embedding_size])\n return (output, embedding_table)","function_tokens":["def","embedding_lookup","(","input_ids",",","vocab_size",",","embedding_size","=","128",",","initializer_range","=","0.02",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","False",")",":","# This function assumes that the input is of shape [batch_size, seq_length,","# num_inputs].","#","# If the input is a 2D tensor of shape [batch_size, seq_length], we","# reshape to [batch_size, seq_length, 1].","if","input_ids",".","shape",".","ndims","==","2",":","input_ids","=","tf",".","expand_dims","(","input_ids",",","axis","=","[","-","1","]",")","embedding_table","=","tf",".","get_variable","(","name","=","word_embedding_name",",","shape","=","[","vocab_size",",","embedding_size","]",",","initializer","=","create_initializer","(","initializer_range",")",")","flat_input_ids","=","tf",".","reshape","(","input_ids",",","[","-","1","]",")","if","use_one_hot_embeddings",":","one_hot_input_ids","=","tf",".","one_hot","(","flat_input_ids",",","depth","=","vocab_size",")","output","=","tf",".","matmul","(","one_hot_input_ids",",","embedding_table",")","else",":","output","=","tf",".","gather","(","embedding_table",",","flat_input_ids",")","input_shape","=","get_shape_list","(","input_ids",")","output","=","tf",".","reshape","(","output",",","input_shape","[","0",":","-","1","]","+","[","input_shape","[","-","1","]","*","embedding_size","]",")","return","(","output",",","embedding_table",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L380-L425"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"embedding_postprocessor","parameters":"(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1)","argument_list":"","return_statement":"return output","docstring":"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.","docstring_summary":"Performs various post-processing on a word embedding tensor.","docstring_tokens":["Performs","various","post","-","processing","on","a","word","embedding","tensor","."],"function":"def embedding_postprocessor(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1):\n \"\"\"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.\n \"\"\"\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = input_tensor\n\n if use_token_type:\n if token_type_ids is None:\n raise ValueError(\"`token_type_ids` must be specified if\"\n \"`use_token_type` is True.\")\n token_type_table = tf.get_variable(\n name=token_type_embedding_name,\n shape=[token_type_vocab_size, width],\n initializer=create_initializer(initializer_range))\n # This vocab will be small so we always do one-hot here, since it is always\n # faster for a small vocabulary.\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)\n token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if use_position_embeddings:\n assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)\n with tf.control_dependencies([assert_op]):\n full_position_embeddings = tf.get_variable(\n name=position_embedding_name,\n shape=[max_position_embeddings, width],\n initializer=create_initializer(initializer_range))\n # Since the position embedding table is a learned variable, we create it\n # using a (long) sequence length `max_position_embeddings`. The actual\n # sequence length might be shorter than this, for faster training of\n # tasks that do not have long sequences.\n #\n # So `full_position_embeddings` is effectively an embedding table\n # for position [0, 1, 2, ..., max_position_embeddings-1], and the current\n # sequence has positions [0, 1, 2, ... seq_length-1], so we can just\n # perform a slice.\n position_embeddings = tf.slice(full_position_embeddings, [0, 0],\n [seq_length, -1])\n num_dims = len(output.shape.as_list())\n\n # Only the last two dimensions are relevant (`seq_length` and `width`), so\n # we broadcast among the first dimensions, which is typically just\n # the batch size.\n position_broadcast_shape = []\n for _ in range(num_dims - 2):\n position_broadcast_shape.append(1)\n position_broadcast_shape.extend([seq_length, width])\n position_embeddings = tf.reshape(position_embeddings,\n position_broadcast_shape)\n output += position_embeddings\n\n output = layer_norm_and_dropout(output, dropout_prob)\n return output","function_tokens":["def","embedding_postprocessor","(","input_tensor",",","use_token_type","=","False",",","token_type_ids","=","None",",","token_type_vocab_size","=","16",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","0.02",",","max_position_embeddings","=","512",",","dropout_prob","=","0.1",")",":","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","width","=","input_shape","[","2","]","output","=","input_tensor","if","use_token_type",":","if","token_type_ids","is","None",":","raise","ValueError","(","\"`token_type_ids` must be specified if\"","\"`use_token_type` is True.\"",")","token_type_table","=","tf",".","get_variable","(","name","=","token_type_embedding_name",",","shape","=","[","token_type_vocab_size",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# This vocab will be small so we always do one-hot here, since it is always","# faster for a small vocabulary.","flat_token_type_ids","=","tf",".","reshape","(","token_type_ids",",","[","-","1","]",")","one_hot_ids","=","tf",".","one_hot","(","flat_token_type_ids",",","depth","=","token_type_vocab_size",")","token_type_embeddings","=","tf",".","matmul","(","one_hot_ids",",","token_type_table",")","token_type_embeddings","=","tf",".","reshape","(","token_type_embeddings",",","[","batch_size",",","seq_length",",","width","]",")","output","+=","token_type_embeddings","if","use_position_embeddings",":","assert_op","=","tf",".","assert_less_equal","(","seq_length",",","max_position_embeddings",")","with","tf",".","control_dependencies","(","[","assert_op","]",")",":","full_position_embeddings","=","tf",".","get_variable","(","name","=","position_embedding_name",",","shape","=","[","max_position_embeddings",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# Since the position embedding table is a learned variable, we create it","# using a (long) sequence length `max_position_embeddings`. The actual","# sequence length might be shorter than this, for faster training of","# tasks that do not have long sequences.","#","# So `full_position_embeddings` is effectively an embedding table","# for position [0, 1, 2, ..., max_position_embeddings-1], and the current","# sequence has positions [0, 1, 2, ... seq_length-1], so we can just","# perform a slice.","position_embeddings","=","tf",".","slice","(","full_position_embeddings",",","[","0",",","0","]",",","[","seq_length",",","-","1","]",")","num_dims","=","len","(","output",".","shape",".","as_list","(",")",")","# Only the last two dimensions are relevant (`seq_length` and `width`), so","# we broadcast among the first dimensions, which is typically just","# the batch size.","position_broadcast_shape","=","[","]","for","_","in","range","(","num_dims","-","2",")",":","position_broadcast_shape",".","append","(","1",")","position_broadcast_shape",".","extend","(","[","seq_length",",","width","]",")","position_embeddings","=","tf",".","reshape","(","position_embeddings",",","position_broadcast_shape",")","output","+=","position_embeddings","output","=","layer_norm_and_dropout","(","output",",","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L428-L521"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"create_attention_mask_from_input_mask","parameters":"(from_tensor, to_mask)","argument_list":"","return_statement":"return mask","docstring":"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].","docstring_summary":"Create 3D attention mask from a 2D tensor mask.","docstring_tokens":["Create","3D","attention","mask","from","a","2D","tensor","mask","."],"function":"def create_attention_mask_from_input_mask(from_tensor, to_mask):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n\n return mask","function_tokens":["def","create_attention_mask_from_input_mask","(","from_tensor",",","to_mask",")",":","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_shape","=","get_shape_list","(","to_mask",",","expected_rank","=","2",")","to_seq_length","=","to_shape","[","1","]","to_mask","=","tf",".","cast","(","tf",".","reshape","(","to_mask",",","[","batch_size",",","1",",","to_seq_length","]",")",",","tf",".","float32",")","# We don't assume that `from_tensor` is a mask (although it could be). We","# don't actually care if we attend *from* padding tokens (only *to* padding)","# tokens so we create a tensor of all ones.","#","# `broadcast_ones` = [batch_size, from_seq_length, 1]","broadcast_ones","=","tf",".","ones","(","shape","=","[","batch_size",",","from_seq_length",",","1","]",",","dtype","=","tf",".","float32",")","# Here we broadcast along two dimensions to create the mask.","mask","=","broadcast_ones","*","to_mask","return","mask"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L524-L555"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"attention_layer","parameters":"(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None)","argument_list":"","return_statement":"return context_layer","docstring":"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.","docstring_summary":"Performs multi-headed attention from `from_tensor` to `to_tensor`.","docstring_tokens":["Performs","multi","-","headed","attention","from","from_tensor","to","to_tensor","."],"function":"def attention_layer(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n \"\"\"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])\n return output_tensor\n\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n from_tensor_2d = reshape_to_matrix(from_tensor)\n to_tensor_2d = reshape_to_matrix(to_tensor)\n\n # `query_layer` = [B*F, N*H]\n query_layer = tf.layers.dense(\n from_tensor_2d,\n num_attention_heads * size_per_head,\n activation=query_act,\n name=\"query\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `key_layer` = [B*T, N*H]\n key_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=key_act,\n name=\"key\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `value_layer` = [B*T, N*H]\n value_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=value_act,\n name=\"value\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `query_layer` = [B, N, F, H]\n query_layer = transpose_for_scores(query_layer, batch_size,\n num_attention_heads, from_seq_length,\n size_per_head)\n\n # `key_layer` = [B, N, T, H]\n key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,\n to_seq_length, size_per_head)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n # `attention_scores` = [B, N, F, T]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(attention_scores,\n 1.0 \/ math.sqrt(float(size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\n\n # `value_layer` = [B, T, N, H]\n value_layer = tf.reshape(\n value_layer,\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\n\n # `value_layer` = [B, N, T, H]\n value_layer = tf.transpose(value_layer, [0, 2, 1, 3])\n\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\n\n if do_return_2d_tensor:\n # `context_layer` = [B*F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\n else:\n # `context_layer` = [B, F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\n\n return context_layer","function_tokens":["def","attention_layer","(","from_tensor",",","to_tensor",",","attention_mask","=","None",",","num_attention_heads","=","1",",","size_per_head","=","512",",","query_act","=","None",",","key_act","=","None",",","value_act","=","None",",","attention_probs_dropout_prob","=","0.0",",","initializer_range","=","0.02",",","do_return_2d_tensor","=","False",",","batch_size","=","None",",","from_seq_length","=","None",",","to_seq_length","=","None",")",":","def","transpose_for_scores","(","input_tensor",",","batch_size",",","num_attention_heads",",","seq_length",",","width",")",":","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","batch_size",",","seq_length",",","num_attention_heads",",","width","]",")","output_tensor","=","tf",".","transpose","(","output_tensor",",","[","0",",","2",",","1",",","3","]",")","return","output_tensor","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","to_shape","=","get_shape_list","(","to_tensor",",","expected_rank","=","[","2",",","3","]",")","if","len","(","from_shape",")","!=","len","(","to_shape",")",":","raise","ValueError","(","\"The rank of `from_tensor` must match the rank of `to_tensor`.\"",")","if","len","(","from_shape",")","==","3",":","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_seq_length","=","to_shape","[","1","]","elif","len","(","from_shape",")","==","2",":","if","(","batch_size","is","None","or","from_seq_length","is","None","or","to_seq_length","is","None",")",":","raise","ValueError","(","\"When passing in rank 2 tensors to attention_layer, the values \"","\"for `batch_size`, `from_seq_length`, and `to_seq_length` \"","\"must all be specified.\"",")","# Scalar dimensions referenced here:","# B = batch size (number of sequences)","# F = `from_tensor` sequence length","# T = `to_tensor` sequence length","# N = `num_attention_heads`","# H = `size_per_head`","from_tensor_2d","=","reshape_to_matrix","(","from_tensor",")","to_tensor_2d","=","reshape_to_matrix","(","to_tensor",")","# `query_layer` = [B*F, N*H]","query_layer","=","tf",".","layers",".","dense","(","from_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","query_act",",","name","=","\"query\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `key_layer` = [B*T, N*H]","key_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","key_act",",","name","=","\"key\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `value_layer` = [B*T, N*H]","value_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","value_act",",","name","=","\"value\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `query_layer` = [B, N, F, H]","query_layer","=","transpose_for_scores","(","query_layer",",","batch_size",",","num_attention_heads",",","from_seq_length",",","size_per_head",")","# `key_layer` = [B, N, T, H]","key_layer","=","transpose_for_scores","(","key_layer",",","batch_size",",","num_attention_heads",",","to_seq_length",",","size_per_head",")","# Take the dot product between \"query\" and \"key\" to get the raw","# attention scores.","# `attention_scores` = [B, N, F, T]","attention_scores","=","tf",".","matmul","(","query_layer",",","key_layer",",","transpose_b","=","True",")","attention_scores","=","tf",".","multiply","(","attention_scores",",","1.0","\/","math",".","sqrt","(","float","(","size_per_head",")",")",")","if","attention_mask","is","not","None",":","# `attention_mask` = [B, 1, F, T]","attention_mask","=","tf",".","expand_dims","(","attention_mask",",","axis","=","[","1","]",")","# Since attention_mask is 1.0 for positions we want to attend and 0.0 for","# masked positions, this operation will create a tensor which is 0.0 for","# positions we want to attend and -10000.0 for masked positions.","adder","=","(","1.0","-","tf",".","cast","(","attention_mask",",","tf",".","float32",")",")","*","-","10000.0","# Since we are adding it to the raw scores before the softmax, this is","# effectively the same as removing these entirely.","attention_scores","+=","adder","# Normalize the attention scores to probabilities.","# `attention_probs` = [B, N, F, T]","attention_probs","=","tf",".","nn",".","softmax","(","attention_scores",")","# This is actually dropping out entire tokens to attend to, which might","# seem a bit unusual, but is taken from the original Transformer paper.","attention_probs","=","dropout","(","attention_probs",",","attention_probs_dropout_prob",")","# `value_layer` = [B, T, N, H]","value_layer","=","tf",".","reshape","(","value_layer",",","[","batch_size",",","to_seq_length",",","num_attention_heads",",","size_per_head","]",")","# `value_layer` = [B, N, T, H]","value_layer","=","tf",".","transpose","(","value_layer",",","[","0",",","2",",","1",",","3","]",")","# `context_layer` = [B, N, F, H]","context_layer","=","tf",".","matmul","(","attention_probs",",","value_layer",")","# `context_layer` = [B, F, N, H]","context_layer","=","tf",".","transpose","(","context_layer",",","[","0",",","2",",","1",",","3","]",")","if","do_return_2d_tensor",":","# `context_layer` = [B*F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size","*","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","else",":","# `context_layer` = [B, F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size",",","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","return","context_layer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L558-L751"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"transformer_model","parameters":"(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False)","argument_list":"","return_statement":"","docstring":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.","docstring_summary":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".","docstring_tokens":["Multi","-","headed","multi","-","layer","Transformer","from","Attention","is","All","You","Need","."],"function":"def transformer_model(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False):\n \"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.\n \"\"\"\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n\n attention_head_size = int(hidden_size \/ num_attention_heads)\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n input_width = input_shape[2]\n\n # The Transformer performs sum residuals on all layers so the input needs\n # to be the same as the hidden size.\n if input_width != hidden_size:\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n (input_width, hidden_size))\n\n # We keep the representation as a 2D tensor to avoid re-shaping it back and\n # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\n # the GPU\/CPU but may not be free on the TPU, so we want to minimize them to\n # help the optimizer.\n prev_output = reshape_to_matrix(input_tensor)\n\n all_layer_outputs = []\n for layer_idx in range(num_hidden_layers):\n with tf.variable_scope(\"layer_%d\" % layer_idx):\n layer_input = prev_output\n\n with tf.variable_scope(\"attention\"):\n attention_heads = []\n with tf.variable_scope(\"self\"):\n attention_head = attention_layer(\n from_tensor=layer_input,\n to_tensor=layer_input,\n attention_mask=attention_mask,\n num_attention_heads=num_attention_heads,\n size_per_head=attention_head_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n initializer_range=initializer_range,\n do_return_2d_tensor=True,\n batch_size=batch_size,\n from_seq_length=seq_length,\n to_seq_length=seq_length)\n attention_heads.append(attention_head)\n\n attention_output = None\n if len(attention_heads) == 1:\n attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n attention_output = tf.concat(attention_heads, axis=-1)\n\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n with tf.variable_scope(\"output\"):\n attention_output = tf.layers.dense(\n attention_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n attention_output = dropout(attention_output, hidden_dropout_prob)\n attention_output = layer_norm(attention_output + layer_input)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = tf.layers.dense(\n attention_output,\n intermediate_size,\n activation=intermediate_act_fn,\n kernel_initializer=create_initializer(initializer_range))\n\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"):\n layer_output = tf.layers.dense(\n intermediate_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n layer_output = dropout(layer_output, hidden_dropout_prob)\n layer_output = layer_norm(layer_output + attention_output)\n prev_output = layer_output\n all_layer_outputs.append(layer_output)\n\n if do_return_all_layers:\n final_outputs = []\n for layer_output in all_layer_outputs:\n final_output = reshape_from_matrix(layer_output, input_shape)\n final_outputs.append(final_output)\n return final_outputs\n else:\n final_output = reshape_from_matrix(prev_output, input_shape)\n return final_output","function_tokens":["def","transformer_model","(","input_tensor",",","attention_mask","=","None",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","intermediate_act_fn","=","gelu",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","initializer_range","=","0.02",",","do_return_all_layers","=","False",")",":","if","hidden_size","%","num_attention_heads","!=","0",":","raise","ValueError","(","\"The hidden size (%d) is not a multiple of the number of attention \"","\"heads (%d)\"","%","(","hidden_size",",","num_attention_heads",")",")","attention_head_size","=","int","(","hidden_size","\/","num_attention_heads",")","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","input_width","=","input_shape","[","2","]","# The Transformer performs sum residuals on all layers so the input needs","# to be the same as the hidden size.","if","input_width","!=","hidden_size",":","raise","ValueError","(","\"The width of the input tensor (%d) != hidden size (%d)\"","%","(","input_width",",","hidden_size",")",")","# We keep the representation as a 2D tensor to avoid re-shaping it back and","# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on","# the GPU\/CPU but may not be free on the TPU, so we want to minimize them to","# help the optimizer.","prev_output","=","reshape_to_matrix","(","input_tensor",")","all_layer_outputs","=","[","]","for","layer_idx","in","range","(","num_hidden_layers",")",":","with","tf",".","variable_scope","(","\"layer_%d\"","%","layer_idx",")",":","layer_input","=","prev_output","with","tf",".","variable_scope","(","\"attention\"",")",":","attention_heads","=","[","]","with","tf",".","variable_scope","(","\"self\"",")",":","attention_head","=","attention_layer","(","from_tensor","=","layer_input",",","to_tensor","=","layer_input",",","attention_mask","=","attention_mask",",","num_attention_heads","=","num_attention_heads",",","size_per_head","=","attention_head_size",",","attention_probs_dropout_prob","=","attention_probs_dropout_prob",",","initializer_range","=","initializer_range",",","do_return_2d_tensor","=","True",",","batch_size","=","batch_size",",","from_seq_length","=","seq_length",",","to_seq_length","=","seq_length",")","attention_heads",".","append","(","attention_head",")","attention_output","=","None","if","len","(","attention_heads",")","==","1",":","attention_output","=","attention_heads","[","0","]","else",":","# In the case where we have other sequences, we just concatenate","# them to the self-attention head before the projection.","attention_output","=","tf",".","concat","(","attention_heads",",","axis","=","-","1",")","# Run a linear projection of `hidden_size` then add a residual","# with `layer_input`.","with","tf",".","variable_scope","(","\"output\"",")",":","attention_output","=","tf",".","layers",".","dense","(","attention_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","attention_output","=","dropout","(","attention_output",",","hidden_dropout_prob",")","attention_output","=","layer_norm","(","attention_output","+","layer_input",")","# The activation is only applied to the \"intermediate\" hidden layer.","with","tf",".","variable_scope","(","\"intermediate\"",")",":","intermediate_output","=","tf",".","layers",".","dense","(","attention_output",",","intermediate_size",",","activation","=","intermediate_act_fn",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# Down-project back to `hidden_size` then add the residual.","with","tf",".","variable_scope","(","\"output\"",")",":","layer_output","=","tf",".","layers",".","dense","(","intermediate_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","layer_output","=","dropout","(","layer_output",",","hidden_dropout_prob",")","layer_output","=","layer_norm","(","layer_output","+","attention_output",")","prev_output","=","layer_output","all_layer_outputs",".","append","(","layer_output",")","if","do_return_all_layers",":","final_outputs","=","[","]","for","layer_output","in","all_layer_outputs",":","final_output","=","reshape_from_matrix","(","layer_output",",","input_shape",")","final_outputs",".","append","(","final_output",")","return","final_outputs","else",":","final_output","=","reshape_from_matrix","(","prev_output",",","input_shape",")","return","final_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L754-L892"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"get_shape_list","parameters":"(tensor, expected_rank=None, name=None)","argument_list":"","return_statement":"return shape","docstring":"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.","docstring_summary":"Returns a list of the shape of tensor, preferring static dimensions.","docstring_tokens":["Returns","a","list","of","the","shape","of","tensor","preferring","static","dimensions","."],"function":"def get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if name is None:\n name = tensor.name\n\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape","function_tokens":["def","get_shape_list","(","tensor",",","expected_rank","=","None",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","if","expected_rank","is","not","None",":","assert_rank","(","tensor",",","expected_rank",",","name",")","shape","=","tensor",".","shape",".","as_list","(",")","non_static_indexes","=","[","]","for","(","index",",","dim",")","in","enumerate","(","shape",")",":","if","dim","is","None",":","non_static_indexes",".","append","(","index",")","if","not","non_static_indexes",":","return","shape","dyn_shape","=","tf",".","shape","(","tensor",")","for","index","in","non_static_indexes",":","shape","[","index","]","=","dyn_shape","[","index","]","return","shape"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L895-L929"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"reshape_to_matrix","parameters":"(input_tensor)","argument_list":"","return_statement":"return output_tensor","docstring":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_summary":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_tokens":["Reshapes","a",">","=","rank","2","tensor","to","a","rank","2","tensor","(","i",".","e",".","a","matrix",")","."],"function":"def reshape_to_matrix(input_tensor):\n \"\"\"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).\"\"\"\n ndims = input_tensor.shape.ndims\n if ndims < 2:\n raise ValueError(\"Input tensor must have at least rank 2. Shape = %s\" %\n (input_tensor.shape))\n if ndims == 2:\n return input_tensor\n\n width = input_tensor.shape[-1]\n output_tensor = tf.reshape(input_tensor, [-1, width])\n return output_tensor","function_tokens":["def","reshape_to_matrix","(","input_tensor",")",":","ndims","=","input_tensor",".","shape",".","ndims","if","ndims","<","2",":","raise","ValueError","(","\"Input tensor must have at least rank 2. Shape = %s\"","%","(","input_tensor",".","shape",")",")","if","ndims","==","2",":","return","input_tensor","width","=","input_tensor",".","shape","[","-","1","]","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","-","1",",","width","]",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L932-L943"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"reshape_from_matrix","parameters":"(output_tensor, orig_shape_list)","argument_list":"","return_statement":"return tf.reshape(output_tensor, orig_dims + [width])","docstring":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_summary":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_tokens":["Reshapes","a","rank","2","tensor","back","to","its","original","rank",">","=","2","tensor","."],"function":"def reshape_from_matrix(output_tensor, orig_shape_list):\n \"\"\"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.\"\"\"\n if len(orig_shape_list) == 2:\n return output_tensor\n\n output_shape = get_shape_list(output_tensor)\n\n orig_dims = orig_shape_list[0:-1]\n width = output_shape[-1]\n\n return tf.reshape(output_tensor, orig_dims + [width])","function_tokens":["def","reshape_from_matrix","(","output_tensor",",","orig_shape_list",")",":","if","len","(","orig_shape_list",")","==","2",":","return","output_tensor","output_shape","=","get_shape_list","(","output_tensor",")","orig_dims","=","orig_shape_list","[","0",":","-","1","]","width","=","output_shape","[","-","1","]","return","tf",".","reshape","(","output_tensor",",","orig_dims","+","[","width","]",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L946-L956"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"assert_rank","parameters":"(tensor, expected_rank, name=None)","argument_list":"","return_statement":"","docstring":"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.","docstring_summary":"Raises an exception if the tensor rank is not of the expected rank.","docstring_tokens":["Raises","an","exception","if","the","tensor","rank","is","not","of","the","expected","rank","."],"function":"def assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n if name is None:\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n scope_name = tf.get_variable_scope().name\n raise ValueError(\n \"For the tensor `%s` in scope `%s`, the actual rank \"\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))","function_tokens":["def","assert_rank","(","tensor",",","expected_rank",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","expected_rank_dict","=","{","}","if","isinstance","(","expected_rank",",","six",".","integer_types",")",":","expected_rank_dict","[","expected_rank","]","=","True","else",":","for","x","in","expected_rank",":","expected_rank_dict","[","x","]","=","True","actual_rank","=","tensor",".","shape",".","ndims","if","actual_rank","not","in","expected_rank_dict",":","scope_name","=","tf",".","get_variable_scope","(",")",".","name","raise","ValueError","(","\"For the tensor `%s` in scope `%s`, the actual rank \"","\"`%d` (shape = %s) is not equal to the expected rank `%s`\"","%","(","name",",","scope_name",",","actual_rank",",","str","(","tensor",".","shape",")",",","str","(","expected_rank",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L959-L986"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"BertConfig.__init__","parameters":"(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02)","argument_list":"","return_statement":"","docstring":"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.","docstring_summary":"Constructs BertConfig.","docstring_tokens":["Constructs","BertConfig","."],"function":"def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range","function_tokens":["def","__init__","(","self",",","vocab_size",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","hidden_act","=","\"gelu\"",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","max_position_embeddings","=","512",",","type_vocab_size","=","16",",","initializer_range","=","0.02",")",":","self",".","vocab_size","=","vocab_size","self",".","hidden_size","=","hidden_size","self",".","num_hidden_layers","=","num_hidden_layers","self",".","num_attention_heads","=","num_attention_heads","self",".","hidden_act","=","hidden_act","self",".","intermediate_size","=","intermediate_size","self",".","hidden_dropout_prob","=","hidden_dropout_prob","self",".","attention_probs_dropout_prob","=","attention_probs_dropout_prob","self",".","max_position_embeddings","=","max_position_embeddings","self",".","type_vocab_size","=","type_vocab_size","self",".","initializer_range","=","initializer_range"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L34-L80"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"BertConfig.from_dict","parameters":"(cls, json_object)","argument_list":"","return_statement":"return config","docstring":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_summary":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","Python","dictionary","of","parameters","."],"function":"def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config","function_tokens":["def","from_dict","(","cls",",","json_object",")",":","config","=","BertConfig","(","vocab_size","=","None",")","for","(","key",",","value",")","in","six",".","iteritems","(","json_object",")",":","config",".","__dict__","[","key","]","=","value","return","config"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L83-L88"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"BertConfig.from_json_file","parameters":"(cls, json_file)","argument_list":"","return_statement":"return cls.from_dict(json.loads(text))","docstring":"Constructs a `BertConfig` from a json file of parameters.","docstring_summary":"Constructs a `BertConfig` from a json file of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","json","file","of","parameters","."],"function":"def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with tf.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))","function_tokens":["def","from_json_file","(","cls",",","json_file",")",":","with","tf",".","gfile",".","GFile","(","json_file",",","\"r\"",")","as","reader",":","text","=","reader",".","read","(",")","return","cls",".","from_dict","(","json",".","loads","(","text",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L91-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"BertConfig.to_dict","parameters":"(self)","argument_list":"","return_statement":"return output","docstring":"Serializes this instance to a Python dictionary.","docstring_summary":"Serializes this instance to a Python dictionary.","docstring_tokens":["Serializes","this","instance","to","a","Python","dictionary","."],"function":"def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output","function_tokens":["def","to_dict","(","self",")",":","output","=","copy",".","deepcopy","(","self",".","__dict__",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L97-L100"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"BertConfig.to_json_string","parameters":"(self)","argument_list":"","return_statement":"return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","docstring":"Serializes this instance to a JSON string.","docstring_summary":"Serializes this instance to a JSON string.","docstring_tokens":["Serializes","this","instance","to","a","JSON","string","."],"function":"def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","function_tokens":["def","to_json_string","(","self",")",":","return","json",".","dumps","(","self",".","to_dict","(",")",",","indent","=","2",",","sort_keys","=","True",")","+","\"\\n\""],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L102-L104"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"BertModel.__init__","parameters":"(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None)","argument_list":"","return_statement":"","docstring":"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.","docstring_summary":"Constructor for BertModel.","docstring_tokens":["Constructor","for","BertModel","."],"function":"def __init__(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n\n input_shape = get_shape_list(input_ids, expected_rank=2)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n\n if input_mask is None:\n input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)\n\n if token_type_ids is None:\n token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)\n\n with tf.variable_scope(scope, default_name=\"bert\"):\n with tf.variable_scope(\"embeddings\"):\n # Perform embedding lookup on the word ids.\n (self.embedding_output, self.embedding_table) = embedding_lookup(\n input_ids=input_ids,\n vocab_size=config.vocab_size,\n embedding_size=config.hidden_size,\n initializer_range=config.initializer_range,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n self.embedding_output = embedding_postprocessor(\n input_tensor=self.embedding_output,\n use_token_type=True,\n token_type_ids=token_type_ids,\n token_type_vocab_size=config.type_vocab_size,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=config.initializer_range,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n\n with tf.variable_scope(\"encoder\"):\n # This converts a 2D mask of shape [batch_size, seq_length] to a 3D\n # mask of shape [batch_size, seq_length, seq_length] which is used\n # for the attention scores.\n attention_mask = create_attention_mask_from_input_mask(\n input_ids, input_mask)\n\n # Run the stacked transformer.\n # `sequence_output` shape = [batch_size, seq_length, hidden_size].\n self.all_encoder_layers = transformer_model(\n input_tensor=self.embedding_output,\n attention_mask=attention_mask,\n hidden_size=config.hidden_size,\n num_hidden_layers=config.num_hidden_layers,\n num_attention_heads=config.num_attention_heads,\n intermediate_size=config.intermediate_size,\n intermediate_act_fn=get_activation(config.hidden_act),\n hidden_dropout_prob=config.hidden_dropout_prob,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n initializer_range=config.initializer_range,\n do_return_all_layers=True)\n\n self.sequence_output = self.all_encoder_layers[-1] # [batch_size, seq_length, hidden_size]\n # The \"pooler\" converts the encoded sequence tensor of shape\n # [batch_size, seq_length, hidden_size] to a tensor of shape\n # [batch_size, hidden_size]. This is necessary for segment-level\n # (or segment-pair-level) classification tasks where we need a fixed\n # dimensional representation of the segment.\n with tf.variable_scope(\"pooler\"):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token. We assume that this has been pre-trained\n first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)\n self.pooled_output = tf.layers.dense(\n first_token_tensor,\n config.hidden_size,\n activation=tf.tanh,\n kernel_initializer=create_initializer(config.initializer_range))","function_tokens":["def","__init__","(","self",",","config",",","is_training",",","input_ids",",","input_mask","=","None",",","token_type_ids","=","None",",","use_one_hot_embeddings","=","False",",","scope","=","None",")",":","config","=","copy",".","deepcopy","(","config",")","if","not","is_training",":","config",".","hidden_dropout_prob","=","0.0","config",".","attention_probs_dropout_prob","=","0.0","input_shape","=","get_shape_list","(","input_ids",",","expected_rank","=","2",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","if","input_mask","is","None",":","input_mask","=","tf",".","ones","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","if","token_type_ids","is","None",":","token_type_ids","=","tf",".","zeros","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","with","tf",".","variable_scope","(","scope",",","default_name","=","\"bert\"",")",":","with","tf",".","variable_scope","(","\"embeddings\"",")",":","# Perform embedding lookup on the word ids.","(","self",".","embedding_output",",","self",".","embedding_table",")","=","embedding_lookup","(","input_ids","=","input_ids",",","vocab_size","=","config",".","vocab_size",",","embedding_size","=","config",".","hidden_size",",","initializer_range","=","config",".","initializer_range",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# Add positional embeddings and token type embeddings, then layer","# normalize and perform dropout.","self",".","embedding_output","=","embedding_postprocessor","(","input_tensor","=","self",".","embedding_output",",","use_token_type","=","True",",","token_type_ids","=","token_type_ids",",","token_type_vocab_size","=","config",".","type_vocab_size",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","config",".","initializer_range",",","max_position_embeddings","=","config",".","max_position_embeddings",",","dropout_prob","=","config",".","hidden_dropout_prob",")","with","tf",".","variable_scope","(","\"encoder\"",")",":","# This converts a 2D mask of shape [batch_size, seq_length] to a 3D","# mask of shape [batch_size, seq_length, seq_length] which is used","# for the attention scores.","attention_mask","=","create_attention_mask_from_input_mask","(","input_ids",",","input_mask",")","# Run the stacked transformer.","# `sequence_output` shape = [batch_size, seq_length, hidden_size].","self",".","all_encoder_layers","=","transformer_model","(","input_tensor","=","self",".","embedding_output",",","attention_mask","=","attention_mask",",","hidden_size","=","config",".","hidden_size",",","num_hidden_layers","=","config",".","num_hidden_layers",",","num_attention_heads","=","config",".","num_attention_heads",",","intermediate_size","=","config",".","intermediate_size",",","intermediate_act_fn","=","get_activation","(","config",".","hidden_act",")",",","hidden_dropout_prob","=","config",".","hidden_dropout_prob",",","attention_probs_dropout_prob","=","config",".","attention_probs_dropout_prob",",","initializer_range","=","config",".","initializer_range",",","do_return_all_layers","=","True",")","self",".","sequence_output","=","self",".","all_encoder_layers","[","-","1","]","# [batch_size, seq_length, hidden_size]","# The \"pooler\" converts the encoded sequence tensor of shape","# [batch_size, seq_length, hidden_size] to a tensor of shape","# [batch_size, hidden_size]. This is necessary for segment-level","# (or segment-pair-level) classification tasks where we need a fixed","# dimensional representation of the segment.","with","tf",".","variable_scope","(","\"pooler\"",")",":","# We \"pool\" the model by simply taking the hidden state corresponding","# to the first token. We assume that this has been pre-trained","first_token_tensor","=","tf",".","squeeze","(","self",".","sequence_output","[",":",",","0",":","1",",",":","]",",","axis","=","1",")","self",".","pooled_output","=","tf",".","layers",".","dense","(","first_token_tensor",",","config",".","hidden_size",",","activation","=","tf",".","tanh",",","kernel_initializer","=","create_initializer","(","config",".","initializer_range",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L131-L232"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"BertModel.get_sequence_output","parameters":"(self)","argument_list":"","return_statement":"return self.sequence_output","docstring":"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.","docstring_summary":"Gets final hidden layer of encoder.","docstring_tokens":["Gets","final","hidden","layer","of","encoder","."],"function":"def get_sequence_output(self):\n \"\"\"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.\n \"\"\"\n return self.sequence_output","function_tokens":["def","get_sequence_output","(","self",")",":","return","self",".","sequence_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L237-L244"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/modeling.py","language":"python","identifier":"BertModel.get_embedding_output","parameters":"(self)","argument_list":"","return_statement":"return self.embedding_output","docstring":"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.","docstring_summary":"Gets output of the embedding lookup (i.e., input to the transformer).","docstring_tokens":["Gets","output","of","the","embedding","lookup","(","i",".","e",".","input","to","the","transformer",")","."],"function":"def get_embedding_output(self):\n \"\"\"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.\n \"\"\"\n return self.embedding_output","function_tokens":["def","get_embedding_output","(","self",")",":","return","self",".","embedding_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/modeling.py#L249-L258"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_classifier.py","language":"python","identifier":"convert_example_list_for_inews","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature_list","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_example_list_for_inews(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return [InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)]\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n must_len = len(tokens_a) + 3\n extra_len = max_seq_length - must_len\n feature_list = []\n if example.text_b and extra_len > 0:\n extra_num = int((len(tokens_b) - 1) \/ extra_len) + 1\n for num in range(extra_num):\n max_len = min((num + 1) * extra_len, len(tokens_b))\n tokens_b_sub = tokens_b[num * extra_len: max_len]\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b_sub, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n else:\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n return feature_list","function_tokens":["def","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","[","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","]","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","must_len","=","len","(","tokens_a",")","+","3","extra_len","=","max_seq_length","-","must_len","feature_list","=","[","]","if","example",".","text_b","and","extra_len",">","0",":","extra_num","=","int","(","(","len","(","tokens_b",")","-","1",")","\/","extra_len",")","+","1","for","num","in","range","(","extra_num",")",":","max_len","=","min","(","(","num","+","1",")","*","extra_len",",","len","(","tokens_b",")",")","tokens_b_sub","=","tokens_b","[","num","*","extra_len",":","max_len","]","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b_sub",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","else",":","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","return","feature_list"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_classifier.py#L238-L273"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features_for_inews","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features_for_inews(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n num_example = 0\n for (ex_index, example) in enumerate(examples):\n if ex_index % 1000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature_list = convert_example_list_for_inews(ex_index, example, label_list,\n max_seq_length, tokenizer)\n num_example += len(feature_list)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n for feature in feature_list:\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n tf.logging.info(\"feature num: %s\", num_example)\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features_for_inews","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","num_example","=","0","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","1000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature_list","=","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","num_example","+=","len","(","feature_list",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","for","feature","in","feature_list",":","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","tf",".","logging",".","info","(","\"feature num: %s\"",",","num_example",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_classifier.py#L276-L306"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_classifier.py","language":"python","identifier":"convert_single_example","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature","function_tokens":["def","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","if","tokens_b",":","# Modifies `tokens_a` and `tokens_b` in place so that the total","# length is less than the specified length.","# Account for [CLS], [SEP], [SEP] with \"- 3\"","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_seq_length","-","3",")","else",":","# Account for [CLS] and [SEP] with \"- 2\"","if","len","(","tokens_a",")",">","max_seq_length","-","2",":","tokens_a","=","tokens_a","[","0",":","(","max_seq_length","-","2",")","]","# The convention in BERT is:","# (a) For sequence pairs:","# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]","# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1","# (b) For single sequences:","# tokens: [CLS] the dog is hairy . [SEP]","# type_ids: 0 0 0 0 0 0 0","#","# Where \"type_ids\" are used to indicate whether this is the first","# sequence or the second sequence. The embedding vectors for `type=0` and","# `type=1` were learned during pre-training and are added to the wordpiece","# embedding vector (and position vector). This is not *strictly* necessary","# since the [SEP] token unambiguously separates the sequences, but it makes","# it easier for the model to learn the concept of sequences.","#","# For classification tasks, the first vector (corresponding to [CLS]) is","# used as the \"sentence vector\". Note that this only makes sense because","# the entire model is fine-tuned.","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","label_id","=","label_map","[","example",".","label","]","if","ex_index","<","5",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"guid: %s\"","%","(","example",".","guid",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","tf",".","logging",".","info","(","\"label: %s (id = %d)\"","%","(","example",".","label",",","label_id",")",")","feature","=","InputFeatures","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","label_id","=","label_id",",","is_real_example","=","True",")","return","feature"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_classifier.py#L309-L408"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_classifier.py#L411-L438"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_classifier.py","language":"python","identifier":"file_based_input_fn_builder","parameters":"(input_file, seq_length, is_training,\n drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn","function_tokens":["def","file_based_input_fn_builder","(","input_file",",","seq_length",",","is_training",",","drop_remainder",")",":","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"label_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"is_real_example\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","}","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","d","=","tf",".","data",".","TFRecordDataset","(","input_file",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_classifier.py#L441-L486"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_classifier.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_classifier.py#L489-L503"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_classifier.py","language":"python","identifier":"create_model","parameters":"(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings)","argument_list":"","return_statement":"","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs,\n axis=-1) # todo 08-29 try temp-loss\n ###############bi_tempered_logistic_loss############################################################################\n # print(\"##cross entropy loss is used....\"); tf.logging.info(\"##cross entropy loss is used....\")\n # t1=0.9 #t1=0.90\n # t2=1.05 #t2=1.05\n # per_example_loss=bi_tempered_logistic_loss(log_probs,one_hot_labels,t1,t2,label_smoothing=0.1,num_iters=5) # TODO label_smoothing=0.0\n # tf.logging.info(\"per_example_loss:\"+str(per_example_loss.shape))\n ##############bi_tempered_logistic_loss#############################################################################\n\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)","function_tokens":["def","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","labels",",","num_labels",",","use_one_hot_embeddings",")",":","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# In the demo, we are doing a simple classification task on the entire","# segment.","#","# If you want to use the token-level output, use model.get_sequence_output()","# instead.","output_layer","=","model",".","get_pooled_output","(",")","hidden_size","=","output_layer",".","shape","[","-","1","]",".","value","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","[","num_labels",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","[","num_labels","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","with","tf",".","variable_scope","(","\"loss\"",")",":","if","is_training",":","# I.e., 0.1 dropout","output_layer","=","tf",".","nn",".","dropout","(","output_layer",",","keep_prob","=","0.9",")","logits","=","tf",".","matmul","(","output_layer",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","probabilities","=","tf",".","nn",".","softmax","(","logits",",","axis","=","-","1",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","num_labels",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","# todo 08-29 try temp-loss","###############bi_tempered_logistic_loss############################################################################","# print(\"##cross entropy loss is used....\"); tf.logging.info(\"##cross entropy loss is used....\")","# t1=0.9 #t1=0.90","# t2=1.05 #t2=1.05","# per_example_loss=bi_tempered_logistic_loss(log_probs,one_hot_labels,t1,t2,label_smoothing=0.1,num_iters=5) # TODO label_smoothing=0.0","# tf.logging.info(\"per_example_loss:\"+str(per_example_loss.shape))","##############bi_tempered_logistic_loss#############################################################################","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","logits",",","probabilities",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_classifier.py#L506-L557"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_classifier.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions, weights=is_real_example)\n loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn,\n [per_example_loss, label_ids, logits, is_real_example])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","num_labels",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","label_ids","=","features","[","\"label_ids\"","]","is_real_example","=","None","if","\"is_real_example\"","in","features",":","is_real_example","=","tf",".","cast","(","features","[","\"is_real_example\"","]",",","dtype","=","tf",".","float32",")","else",":","is_real_example","=","tf",".","ones","(","tf",".","shape","(","label_ids",")",",","dtype","=","tf",".","float32",")","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","total_loss",",","per_example_loss",",","logits",",","probabilities",")","=","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","label_ids",",","num_labels",",","use_one_hot_embeddings",")","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","per_example_loss",",","label_ids",",","logits",",","is_real_example",")",":","predictions","=","tf",".","argmax","(","logits",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","label_ids",",","predictions","=","predictions",",","weights","=","is_real_example",")","loss","=","tf",".","metrics",".","mean","(","values","=","per_example_loss",",","weights","=","is_real_example",")","return","{","\"eval_accuracy\"",":","accuracy",",","\"eval_loss\"",":","loss",",","}","eval_metrics","=","(","metric_fn",",","[","per_example_loss",",","label_ids",",","logits",",","is_real_example","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","{","\"probabilities\"",":","probabilities","}",",","scaffold_fn","=","scaffold_fn",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_classifier.py#L560-L649"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_classifier.py","language":"python","identifier":"input_fn_builder","parameters":"(features, seq_length, is_training, drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","features",",","seq_length",",","is_training",",","drop_remainder",")",":","all_input_ids","=","[","]","all_input_mask","=","[","]","all_segment_ids","=","[","]","all_label_ids","=","[","]","for","feature","in","features",":","all_input_ids",".","append","(","feature",".","input_ids",")","all_input_mask",".","append","(","feature",".","input_mask",")","all_segment_ids",".","append","(","feature",".","segment_ids",")","all_label_ids",".","append","(","feature",".","label_id",")","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","num_examples","=","len","(","features",")","# This is for demo purposes and does NOT scale to large data sets. We do","# not use Dataset.from_generator() because that uses tf.py_func which is","# not TPU compatible. The right way to load data is with TFRecordReader.","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","{","\"input_ids\"",":","tf",".","constant","(","all_input_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_mask\"",":","tf",".","constant","(","all_input_mask",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"segment_ids\"",":","tf",".","constant","(","all_segment_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"label_ids\"",":","tf",".","constant","(","all_label_ids",",","shape","=","[","num_examples","]",",","dtype","=","tf",".","int32",")",",","}",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","batch","(","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_classifier.py#L654-L703"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_classifier.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return features","docstring":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_summary":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","list","of","InputFeatures","."],"function":"def convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n features.append(feature)\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",")",":","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","features",".","append","(","feature",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_classifier.py#L708-L721"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/optimization.py","language":"python","identifier":"create_optimizer","parameters":"(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu)","argument_list":"","return_statement":"return train_op","docstring":"Creates an optimizer training op.","docstring_summary":"Creates an optimizer training op.","docstring_tokens":["Creates","an","optimizer","training","op","."],"function":"def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n \"\"\"Creates an optimizer training op.\"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step\/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float \/ warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m\/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.98, # 0.98 ONLY USED FOR PRETRAIN. MUST CHANGE AT FINE-TUNING 0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n if use_tpu:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.trainable_variables()\n\n # tvars=find_train_variables(tvars) # fix parameters from layer 0 to layer9.\n\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op","function_tokens":["def","create_optimizer","(","loss",",","init_lr",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")",":","global_step","=","tf",".","train",".","get_or_create_global_step","(",")","learning_rate","=","tf",".","constant","(","value","=","init_lr",",","shape","=","[","]",",","dtype","=","tf",".","float32",")","# Implements linear decay of the learning rate.","learning_rate","=","tf",".","train",".","polynomial_decay","(","learning_rate",",","global_step",",","num_train_steps",",","end_learning_rate","=","0.0",",","power","=","1.0",",","cycle","=","False",")","# Implements linear warmup. I.e., if global_step < num_warmup_steps, the","# learning rate will be `global_step\/num_warmup_steps * init_lr`.","if","num_warmup_steps",":","global_steps_int","=","tf",".","cast","(","global_step",",","tf",".","int32",")","warmup_steps_int","=","tf",".","constant","(","num_warmup_steps",",","dtype","=","tf",".","int32",")","global_steps_float","=","tf",".","cast","(","global_steps_int",",","tf",".","float32",")","warmup_steps_float","=","tf",".","cast","(","warmup_steps_int",",","tf",".","float32",")","warmup_percent_done","=","global_steps_float","\/","warmup_steps_float","warmup_learning_rate","=","init_lr","*","warmup_percent_done","is_warmup","=","tf",".","cast","(","global_steps_int","<","warmup_steps_int",",","tf",".","float32",")","learning_rate","=","(","(","1.0","-","is_warmup",")","*","learning_rate","+","is_warmup","*","warmup_learning_rate",")","# It is recommended that you use this optimizer for fine tuning, since this","# is how the model was trained (note that the Adam m\/v variables are NOT","# loaded from init_checkpoint.)","optimizer","=","AdamWeightDecayOptimizer","(","learning_rate","=","learning_rate",",","weight_decay_rate","=","0.01",",","beta_1","=","0.9",",","beta_2","=","0.98",",","# 0.98 ONLY USED FOR PRETRAIN. MUST CHANGE AT FINE-TUNING 0.999,","epsilon","=","1e-6",",","exclude_from_weight_decay","=","[","\"LayerNorm\"",",","\"layer_norm\"",",","\"bias\"","]",")","if","use_tpu",":","optimizer","=","tf",".","contrib",".","tpu",".","CrossShardOptimizer","(","optimizer",")","tvars","=","tf",".","trainable_variables","(",")","# tvars=find_train_variables(tvars) # fix parameters from layer 0 to layer9.","grads","=","tf",".","gradients","(","loss",",","tvars",")","# This is how the model was pre-trained.","(","grads",",","_",")","=","tf",".","clip_by_global_norm","(","grads",",","clip_norm","=","1.0",")","train_op","=","optimizer",".","apply_gradients","(","zip","(","grads",",","tvars",")",",","global_step","=","global_step",")","# Normally the global step update is done inside of `apply_gradients`.","# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use","# a different optimizer, you should probably take this line out.","new_global_step","=","global_step","+","1","train_op","=","tf",".","group","(","train_op",",","[","global_step",".","assign","(","new_global_step",")","]",")","return","train_op"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/optimization.py#L25-L87"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/optimization.py","language":"python","identifier":"find_train_variables","parameters":"(tvars)","argument_list":"","return_statement":"return tvars_result_list","docstring":"get trainable variables only to train from layer 9 to last layer\n :param tvars: a list\n :return: a new tvars, which is list","docstring_summary":"get trainable variables only to train from layer 9 to last layer\n :param tvars: a list\n :return: a new tvars, which is list","docstring_tokens":["get","trainable","variables","only","to","train","from","layer","9","to","last","layer",":","param","tvars",":","a","list",":","return",":","a","new","tvars","which","is","list"],"function":"def find_train_variables(tvars):\n \"\"\"\n get trainable variables only to train from layer 9 to last layer\n :param tvars: a list\n :return: a new tvars, which is list\n \"\"\"\n # bert\/encoder\/layer_21, bert\/encoder\/layer_9, bert\/encoder\/layer_20\/attention\/output\/dense\/bias:0, bert\/encoder\/layer_20\/attention\/output\/dense\/kernel:\n tvars_result_list=[]\n\n for var in tvars:\n if 'cls\/predictions' in var.name or 'bert\/pooler\/dense' in var.name: # \u6700\u540e\u51e0\u5c42\n tvars_result_list.append(var)\n else: # \u540e\u534a\u4e2a\u7f51\u7edc\u7684\u53c2\u6570\n layer_number_list=re.findall(\"layer_(.+?)\/\", var.name)\n if len(layer_number_list)>0 and isinstance(layer_number_list[0],int): # \u5339\u914d\u5230\u4e86\u4e00\u4e2a\u6570\u5b57\n layer_number=int(layer_number_list[0])\n if layer_number>=9:\n tvars_result_list.append(var)\n\n # print train variables\n for i,var_ in enumerate(tvars_result_list):\n print(\"####find_train_variables.i:\",i, \"variable name:\",var_.name)\n\n print(\"####find_train_variables:length of tvars_result_list:\",tvars_result_list)\n return tvars_result_list","function_tokens":["def","find_train_variables","(","tvars",")",":","# bert\/encoder\/layer_21, bert\/encoder\/layer_9, bert\/encoder\/layer_20\/attention\/output\/dense\/bias:0, bert\/encoder\/layer_20\/attention\/output\/dense\/kernel:","tvars_result_list","=","[","]","for","var","in","tvars",":","if","'cls\/predictions'","in","var",".","name","or","'bert\/pooler\/dense'","in","var",".","name",":","# \u6700\u540e\u51e0\u5c42","tvars_result_list",".","append","(","var",")","else",":","# \u540e\u534a\u4e2a\u7f51\u7edc\u7684\u53c2\u6570","layer_number_list","=","re",".","findall","(","\"layer_(.+?)\/\"",",","var",".","name",")","if","len","(","layer_number_list",")",">","0","and","isinstance","(","layer_number_list","[","0","]",",","int",")",":","# \u5339\u914d\u5230\u4e86\u4e00\u4e2a\u6570\u5b57","layer_number","=","int","(","layer_number_list","[","0","]",")","if","layer_number",">=","9",":","tvars_result_list",".","append","(","var",")","# print train variables","for","i",",","var_","in","enumerate","(","tvars_result_list",")",":","print","(","\"####find_train_variables.i:\"",",","i",",","\"variable name:\"",",","var_",".","name",")","print","(","\"####find_train_variables:length of tvars_result_list:\"",",","tvars_result_list",")","return","tvars_result_list"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/optimization.py#L89-L113"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.__init__","parameters":"(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\")","argument_list":"","return_statement":"","docstring":"Constructs a AdamWeightDecayOptimizer.","docstring_summary":"Constructs a AdamWeightDecayOptimizer.","docstring_tokens":["Constructs","a","AdamWeightDecayOptimizer","."],"function":"def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay","function_tokens":["def","__init__","(","self",",","learning_rate",",","weight_decay_rate","=","0.0",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","None",",","name","=","\"AdamWeightDecayOptimizer\"",")",":","super","(","AdamWeightDecayOptimizer",",","self",")",".","__init__","(","False",",","name",")","self",".","learning_rate","=","learning_rate","self",".","weight_decay_rate","=","weight_decay_rate","self",".","beta_1","=","beta_1","self",".","beta_2","=","beta_2","self",".","epsilon","=","epsilon","self",".","exclude_from_weight_decay","=","exclude_from_weight_decay"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/optimization.py#L119-L135"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.apply_gradients","parameters":"(self, grads_and_vars, global_step=None, name=None)","argument_list":"","return_statement":"return tf.group(*assignments, name=name)","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"\/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"\/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m \/ (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization\/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m\/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)","function_tokens":["def","apply_gradients","(","self",",","grads_and_vars",",","global_step","=","None",",","name","=","None",")",":","assignments","=","[","]","for","(","grad",",","param",")","in","grads_and_vars",":","if","grad","is","None","or","param","is","None",":","continue","param_name","=","self",".","_get_variable_name","(","param",".","name",")","m","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_m\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","v","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_v\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","# Standard Adam update.","next_m","=","(","tf",".","multiply","(","self",".","beta_1",",","m",")","+","tf",".","multiply","(","1.0","-","self",".","beta_1",",","grad",")",")","next_v","=","(","tf",".","multiply","(","self",".","beta_2",",","v",")","+","tf",".","multiply","(","1.0","-","self",".","beta_2",",","tf",".","square","(","grad",")",")",")","update","=","next_m","\/","(","tf",".","sqrt","(","next_v",")","+","self",".","epsilon",")","# Just adding the square of the weights to the loss function is *not*","# the correct way of using L2 regularization\/weight decay with Adam,","# since that will interact with the m and v parameters in strange ways.","#","# Instead we want ot decay the weights in a manner that doesn't interact","# with the m\/v parameters. This is equivalent to adding the square","# of the weights to the loss with plain (non-momentum) SGD.","if","self",".","_do_use_weight_decay","(","param_name",")",":","update","+=","self",".","weight_decay_rate","*","param","update_with_lr","=","self",".","learning_rate","*","update","next_param","=","param","-","update_with_lr","assignments",".","extend","(","[","param",".","assign","(","next_param",")",",","m",".","assign","(","next_m",")",",","v",".","assign","(","next_v",")","]",")","return","tf",".","group","(","*","assignments",",","name","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/optimization.py#L137-L186"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._do_use_weight_decay","parameters":"(self, param_name)","argument_list":"","return_statement":"return True","docstring":"Whether to use L2 weight decay for `param_name`.","docstring_summary":"Whether to use L2 weight decay for `param_name`.","docstring_tokens":["Whether","to","use","L2","weight","decay","for","param_name","."],"function":"def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True","function_tokens":["def","_do_use_weight_decay","(","self",",","param_name",")",":","if","not","self",".","weight_decay_rate",":","return","False","if","self",".","exclude_from_weight_decay",":","for","r","in","self",".","exclude_from_weight_decay",":","if","re",".","search","(","r",",","param_name",")","is","not","None",":","return","False","return","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/optimization.py#L188-L196"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._get_variable_name","parameters":"(self, param_name)","argument_list":"","return_statement":"return param_name","docstring":"Get the variable name from the tensor name.","docstring_summary":"Get the variable name from the tensor name.","docstring_tokens":["Get","the","variable","name","from","the","tensor","name","."],"function":"def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name","function_tokens":["def","_get_variable_name","(","self",",","param_name",")",":","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","param_name",")","if","m","is","not","None",":","param_name","=","m",".","group","(","1",")","return","param_name"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/optimization.py#L198-L203"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/optimization_finetuning.py","language":"python","identifier":"create_optimizer","parameters":"(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu)","argument_list":"","return_statement":"return train_op","docstring":"Creates an optimizer training op.","docstring_summary":"Creates an optimizer training op.","docstring_tokens":["Creates","an","optimizer","training","op","."],"function":"def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n \"\"\"Creates an optimizer training op.\"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step\/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float \/ warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m\/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999, # 0.98 ONLY USED FOR PRETRAIN. MUST CHANGE AT FINE-TUNING 0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n if use_tpu:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op","function_tokens":["def","create_optimizer","(","loss",",","init_lr",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")",":","global_step","=","tf",".","train",".","get_or_create_global_step","(",")","learning_rate","=","tf",".","constant","(","value","=","init_lr",",","shape","=","[","]",",","dtype","=","tf",".","float32",")","# Implements linear decay of the learning rate.","learning_rate","=","tf",".","train",".","polynomial_decay","(","learning_rate",",","global_step",",","num_train_steps",",","end_learning_rate","=","0.0",",","power","=","1.0",",","cycle","=","False",")","# Implements linear warmup. I.e., if global_step < num_warmup_steps, the","# learning rate will be `global_step\/num_warmup_steps * init_lr`.","if","num_warmup_steps",":","global_steps_int","=","tf",".","cast","(","global_step",",","tf",".","int32",")","warmup_steps_int","=","tf",".","constant","(","num_warmup_steps",",","dtype","=","tf",".","int32",")","global_steps_float","=","tf",".","cast","(","global_steps_int",",","tf",".","float32",")","warmup_steps_float","=","tf",".","cast","(","warmup_steps_int",",","tf",".","float32",")","warmup_percent_done","=","global_steps_float","\/","warmup_steps_float","warmup_learning_rate","=","init_lr","*","warmup_percent_done","is_warmup","=","tf",".","cast","(","global_steps_int","<","warmup_steps_int",",","tf",".","float32",")","learning_rate","=","(","(","1.0","-","is_warmup",")","*","learning_rate","+","is_warmup","*","warmup_learning_rate",")","# It is recommended that you use this optimizer for fine tuning, since this","# is how the model was trained (note that the Adam m\/v variables are NOT","# loaded from init_checkpoint.)","optimizer","=","AdamWeightDecayOptimizer","(","learning_rate","=","learning_rate",",","weight_decay_rate","=","0.01",",","beta_1","=","0.9",",","beta_2","=","0.999",",","# 0.98 ONLY USED FOR PRETRAIN. MUST CHANGE AT FINE-TUNING 0.999,","epsilon","=","1e-6",",","exclude_from_weight_decay","=","[","\"LayerNorm\"",",","\"layer_norm\"",",","\"bias\"","]",")","if","use_tpu",":","optimizer","=","tf",".","contrib",".","tpu",".","CrossShardOptimizer","(","optimizer",")","tvars","=","tf",".","trainable_variables","(",")","grads","=","tf",".","gradients","(","loss",",","tvars",")","# This is how the model was pre-trained.","(","grads",",","_",")","=","tf",".","clip_by_global_norm","(","grads",",","clip_norm","=","1.0",")","train_op","=","optimizer",".","apply_gradients","(","zip","(","grads",",","tvars",")",",","global_step","=","global_step",")","# Normally the global step update is done inside of `apply_gradients`.","# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use","# a different optimizer, you should probably take this line out.","new_global_step","=","global_step","+","1","train_op","=","tf",".","group","(","train_op",",","[","global_step",".","assign","(","new_global_step",")","]",")","return","train_op"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/optimization_finetuning.py#L25-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/optimization_finetuning.py","language":"python","identifier":"AdamWeightDecayOptimizer.__init__","parameters":"(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\")","argument_list":"","return_statement":"","docstring":"Constructs a AdamWeightDecayOptimizer.","docstring_summary":"Constructs a AdamWeightDecayOptimizer.","docstring_tokens":["Constructs","a","AdamWeightDecayOptimizer","."],"function":"def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay","function_tokens":["def","__init__","(","self",",","learning_rate",",","weight_decay_rate","=","0.0",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","None",",","name","=","\"AdamWeightDecayOptimizer\"",")",":","super","(","AdamWeightDecayOptimizer",",","self",")",".","__init__","(","False",",","name",")","self",".","learning_rate","=","learning_rate","self",".","weight_decay_rate","=","weight_decay_rate","self",".","beta_1","=","beta_1","self",".","beta_2","=","beta_2","self",".","epsilon","=","epsilon","self",".","exclude_from_weight_decay","=","exclude_from_weight_decay"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/optimization_finetuning.py#L90-L106"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/optimization_finetuning.py","language":"python","identifier":"AdamWeightDecayOptimizer.apply_gradients","parameters":"(self, grads_and_vars, global_step=None, name=None)","argument_list":"","return_statement":"return tf.group(*assignments, name=name)","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"\/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"\/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m \/ (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization\/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m\/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)","function_tokens":["def","apply_gradients","(","self",",","grads_and_vars",",","global_step","=","None",",","name","=","None",")",":","assignments","=","[","]","for","(","grad",",","param",")","in","grads_and_vars",":","if","grad","is","None","or","param","is","None",":","continue","param_name","=","self",".","_get_variable_name","(","param",".","name",")","m","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_m\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","v","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_v\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","# Standard Adam update.","next_m","=","(","tf",".","multiply","(","self",".","beta_1",",","m",")","+","tf",".","multiply","(","1.0","-","self",".","beta_1",",","grad",")",")","next_v","=","(","tf",".","multiply","(","self",".","beta_2",",","v",")","+","tf",".","multiply","(","1.0","-","self",".","beta_2",",","tf",".","square","(","grad",")",")",")","update","=","next_m","\/","(","tf",".","sqrt","(","next_v",")","+","self",".","epsilon",")","# Just adding the square of the weights to the loss function is *not*","# the correct way of using L2 regularization\/weight decay with Adam,","# since that will interact with the m and v parameters in strange ways.","#","# Instead we want ot decay the weights in a manner that doesn't interact","# with the m\/v parameters. This is equivalent to adding the square","# of the weights to the loss with plain (non-momentum) SGD.","if","self",".","_do_use_weight_decay","(","param_name",")",":","update","+=","self",".","weight_decay_rate","*","param","update_with_lr","=","self",".","learning_rate","*","update","next_param","=","param","-","update_with_lr","assignments",".","extend","(","[","param",".","assign","(","next_param",")",",","m",".","assign","(","next_m",")",",","v",".","assign","(","next_v",")","]",")","return","tf",".","group","(","*","assignments",",","name","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/optimization_finetuning.py#L108-L157"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/optimization_finetuning.py","language":"python","identifier":"AdamWeightDecayOptimizer._do_use_weight_decay","parameters":"(self, param_name)","argument_list":"","return_statement":"return True","docstring":"Whether to use L2 weight decay for `param_name`.","docstring_summary":"Whether to use L2 weight decay for `param_name`.","docstring_tokens":["Whether","to","use","L2","weight","decay","for","param_name","."],"function":"def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True","function_tokens":["def","_do_use_weight_decay","(","self",",","param_name",")",":","if","not","self",".","weight_decay_rate",":","return","False","if","self",".","exclude_from_weight_decay",":","for","r","in","self",".","exclude_from_weight_decay",":","if","re",".","search","(","r",",","param_name",")","is","not","None",":","return","False","return","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/optimization_finetuning.py#L159-L167"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/optimization_finetuning.py","language":"python","identifier":"AdamWeightDecayOptimizer._get_variable_name","parameters":"(self, param_name)","argument_list":"","return_statement":"return param_name","docstring":"Get the variable name from the tensor name.","docstring_summary":"Get the variable name from the tensor name.","docstring_tokens":["Get","the","variable","name","from","the","tensor","name","."],"function":"def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name","function_tokens":["def","_get_variable_name","(","self",",","param_name",")",":","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","param_name",")","if","m","is","not","None",":","param_name","=","m",".","group","(","1",")","return","param_name"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/optimization_finetuning.py#L169-L174"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/create_pretraining_data.py","language":"python","identifier":"write_instance_to_example_files","parameters":"(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files)","argument_list":"","return_statement":"","docstring":"Create TF example files from `TrainingInstance`s.","docstring_summary":"Create TF example files from `TrainingInstance`s.","docstring_tokens":["Create","TF","example","files","from","TrainingInstance","s","."],"function":"def write_instance_to_example_files(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files):\n \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n writers = []\n for output_file in output_files:\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n writer_index = 0\n\n total_written = 0\n for (inst_index, instance) in enumerate(instances):\n input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)\n input_mask = [1] * len(input_ids)\n segment_ids = list(instance.segment_ids)\n assert len(input_ids) <= max_seq_length\n\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n # print(\"length of segment_ids:\",len(segment_ids),\"max_seq_length:\", max_seq_length)\n assert len(segment_ids) == max_seq_length\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n while len(masked_lm_positions) < max_predictions_per_seq:\n masked_lm_positions.append(0)\n masked_lm_ids.append(0)\n masked_lm_weights.append(0.0)\n\n next_sentence_label = 1 if instance.is_random_next else 0\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n features[\"masked_lm_positions\"] = create_int_feature(masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n features[\"next_sentence_labels\"] = create_int_feature([next_sentence_label])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n\n writers[writer_index].write(tf_example.SerializeToString())\n writer_index = (writer_index + 1) % len(writers)\n\n total_written += 1\n\n if inst_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in instance.tokens]))\n\n for feature_name in features.keys():\n feature = features[feature_name]\n values = []\n if feature.int64_list.value:\n values = feature.int64_list.value\n elif feature.float_list.value:\n values = feature.float_list.value\n tf.logging.info(\n \"%s: %s\" % (feature_name, \" \".join([str(x) for x in values])))\n\n for writer in writers:\n writer.close()\n\n tf.logging.info(\"Wrote %d total instances\", total_written)","function_tokens":["def","write_instance_to_example_files","(","instances",",","tokenizer",",","max_seq_length",",","max_predictions_per_seq",",","output_files",")",":","writers","=","[","]","for","output_file","in","output_files",":","writers",".","append","(","tf",".","python_io",".","TFRecordWriter","(","output_file",")",")","writer_index","=","0","total_written","=","0","for","(","inst_index",",","instance",")","in","enumerate","(","instances",")",":","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","tokens",")","input_mask","=","[","1","]","*","len","(","input_ids",")","segment_ids","=","list","(","instance",".","segment_ids",")","assert","len","(","input_ids",")","<=","max_seq_length","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","# print(\"length of segment_ids:\",len(segment_ids),\"max_seq_length:\", max_seq_length)","assert","len","(","segment_ids",")","==","max_seq_length","masked_lm_positions","=","list","(","instance",".","masked_lm_positions",")","masked_lm_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","masked_lm_labels",")","masked_lm_weights","=","[","1.0","]","*","len","(","masked_lm_ids",")","while","len","(","masked_lm_positions",")","<","max_predictions_per_seq",":","masked_lm_positions",".","append","(","0",")","masked_lm_ids",".","append","(","0",")","masked_lm_weights",".","append","(","0.0",")","next_sentence_label","=","1","if","instance",".","is_random_next","else","0","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","segment_ids",")","features","[","\"masked_lm_positions\"","]","=","create_int_feature","(","masked_lm_positions",")","features","[","\"masked_lm_ids\"","]","=","create_int_feature","(","masked_lm_ids",")","features","[","\"masked_lm_weights\"","]","=","create_float_feature","(","masked_lm_weights",")","features","[","\"next_sentence_labels\"","]","=","create_int_feature","(","[","next_sentence_label","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writers","[","writer_index","]",".","write","(","tf_example",".","SerializeToString","(",")",")","writer_index","=","(","writer_index","+","1",")","%","len","(","writers",")","total_written","+=","1","if","inst_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","instance",".","tokens","]",")",")","for","feature_name","in","features",".","keys","(",")",":","feature","=","features","[","feature_name","]","values","=","[","]","if","feature",".","int64_list",".","value",":","values","=","feature",".","int64_list",".","value","elif","feature",".","float_list",".","value",":","values","=","feature",".","float_list",".","value","tf",".","logging",".","info","(","\"%s: %s\"","%","(","feature_name",",","\" \"",".","join","(","[","str","(","x",")","for","x","in","values","]",")",")",")","for","writer","in","writers",":","writer",".","close","(",")","tf",".","logging",".","info","(","\"Wrote %d total instances\"",",","total_written",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/create_pretraining_data.py#L98-L169"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/create_pretraining_data.py","language":"python","identifier":"create_training_instances","parameters":"(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng)","argument_list":"","return_statement":"return instances","docstring":"Create `TrainingInstance`s from raw text.","docstring_summary":"Create `TrainingInstance`s from raw text.","docstring_tokens":["Create","TrainingInstance","s","from","raw","text","."],"function":"def create_training_instances(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng):\n \"\"\"Create `TrainingInstance`s from raw text.\"\"\"\n all_documents = [[]]\n\n # Input file format:\n # (1) One sentence per line. These should ideally be actual sentences, not\n # entire paragraphs or arbitrary spans of text. (Because we use the\n # sentence boundaries for the \"next sentence prediction\" task).\n # (2) Blank lines between documents. Document boundaries are needed so\n # that the \"next sentence prediction\" task doesn't span between documents.\n print(\"create_training_instances.started...\")\n for input_file in input_files:\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline().replace(\"\",\"\"))# .replace(\"\u201d\",\"\")) # \u5c06\u3001\u201d\u66ff\u6362\u6389\u3002\n if not line:\n break\n line = line.strip()\n\n # Empty lines are used as document delimiters\n if not line:\n all_documents.append([])\n tokens = tokenizer.tokenize(line)\n if tokens:\n all_documents[-1].append(tokens)\n\n # Remove empty documents\n all_documents = [x for x in all_documents if x]\n rng.shuffle(all_documents)\n\n vocab_words = list(tokenizer.vocab.keys())\n instances = []\n for _ in range(dupe_factor):\n for document_index in range(len(all_documents)):\n instances.extend(\n create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng))\n\n rng.shuffle(instances)\n print(\"create_training_instances.ended...\")\n\n return instances","function_tokens":["def","create_training_instances","(","input_files",",","tokenizer",",","max_seq_length",",","dupe_factor",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","rng",")",":","all_documents","=","[","[","]","]","# Input file format:","# (1) One sentence per line. These should ideally be actual sentences, not","# entire paragraphs or arbitrary spans of text. (Because we use the","# sentence boundaries for the \"next sentence prediction\" task).","# (2) Blank lines between documents. Document boundaries are needed so","# that the \"next sentence prediction\" task doesn't span between documents.","print","(","\"create_training_instances.started...\"",")","for","input_file","in","input_files",":","with","tf",".","gfile",".","GFile","(","input_file",",","\"r\"",")","as","reader",":","while","True",":","line","=","tokenization",".","convert_to_unicode","(","reader",".","readline","(",")",".","replace","(","\"\"",",","\"\"",")",")","# .replace(\"\u201d\",\"\")) # \u5c06\u3001\u201d\u66ff\u6362\u6389\u3002","if","not","line",":","break","line","=","line",".","strip","(",")","# Empty lines are used as document delimiters","if","not","line",":","all_documents",".","append","(","[","]",")","tokens","=","tokenizer",".","tokenize","(","line",")","if","tokens",":","all_documents","[","-","1","]",".","append","(","tokens",")","# Remove empty documents","all_documents","=","[","x","for","x","in","all_documents","if","x","]","rng",".","shuffle","(","all_documents",")","vocab_words","=","list","(","tokenizer",".","vocab",".","keys","(",")",")","instances","=","[","]","for","_","in","range","(","dupe_factor",")",":","for","document_index","in","range","(","len","(","all_documents",")",")",":","instances",".","extend","(","create_instances_from_document","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",")","rng",".","shuffle","(","instances",")","print","(","\"create_training_instances.ended...\"",")","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/create_pretraining_data.py#L182-L226"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/create_pretraining_data.py","language":"python","identifier":"_is_chinese_char","parameters":"(cp)","argument_list":"","return_statement":"","docstring":"Checks whether CP is the codepoint of a CJK character.","docstring_summary":"Checks whether CP is the codepoint of a CJK character.","docstring_tokens":["Checks","whether","CP","is","the","codepoint","of","a","CJK","character","."],"function":"def _is_chinese_char(cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True","function_tokens":["def","_is_chinese_char","(","cp",")",":","# This defines a \"chinese character\" as anything in the CJK Unicode block:","# https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)","#","# Note that the CJK Unicode block is NOT all Japanese and Korean characters,","# despite its name. The modern Korean Hangul alphabet is a different block,","# as is Japanese Hiragana and Katakana. Those alphabets are used to write","# space-separated words, so they are not treated specially and handled","# like the all of the other languages.","if","(","(","cp",">=","0x4E00","and","cp","<=","0x9FFF",")","or","#","(","cp",">=","0x3400","and","cp","<=","0x4DBF",")","or","#","(","cp",">=","0x20000","and","cp","<=","0x2A6DF",")","or","#","(","cp",">=","0x2A700","and","cp","<=","0x2B73F",")","or","#","(","cp",">=","0x2B740","and","cp","<=","0x2B81F",")","or","#","(","cp",">=","0x2B820","and","cp","<=","0x2CEAF",")","or","(","cp",">=","0xF900","and","cp","<=","0xFAFF",")","or","#","(","cp",">=","0x2F800","and","cp","<=","0x2FA1F",")",")",":","#","return","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/create_pretraining_data.py#L229-L247"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/create_pretraining_data.py","language":"python","identifier":"get_new_segment","parameters":"(segment)","argument_list":"","return_statement":"return new_segment","docstring":"\u8f93\u5165\u4e00\u53e5\u8bdd\uff0c\u8fd4\u56de\u4e00\u53e5\u7ecf\u8fc7\u5904\u7406\u7684\u8bdd: \u4e3a\u4e86\u652f\u6301\u4e2d\u6587\u5168\u79f0mask\uff0c\u5c06\u88ab\u5206\u5f00\u7684\u8bcd\uff0c\u5c06\u4e0a\u7279\u6b8a\u6807\u8bb0(\"#\")\uff0c\u4f7f\u5f97\u540e\u7eed\u5904\u7406\u6a21\u5757\uff0c\u80fd\u591f\u77e5\u9053\u54ea\u4e9b\u5b57\u662f\u5c5e\u4e8e\u540c\u4e00\u4e2a\u8bcd\u7684\u3002\n :param segment: \u4e00\u53e5\u8bdd\n :return: \u4e00\u53e5\u5904\u7406\u8fc7\u7684\u8bdd","docstring_summary":"\u8f93\u5165\u4e00\u53e5\u8bdd\uff0c\u8fd4\u56de\u4e00\u53e5\u7ecf\u8fc7\u5904\u7406\u7684\u8bdd: \u4e3a\u4e86\u652f\u6301\u4e2d\u6587\u5168\u79f0mask\uff0c\u5c06\u88ab\u5206\u5f00\u7684\u8bcd\uff0c\u5c06\u4e0a\u7279\u6b8a\u6807\u8bb0(\"#\")\uff0c\u4f7f\u5f97\u540e\u7eed\u5904\u7406\u6a21\u5757\uff0c\u80fd\u591f\u77e5\u9053\u54ea\u4e9b\u5b57\u662f\u5c5e\u4e8e\u540c\u4e00\u4e2a\u8bcd\u7684\u3002\n :param segment: \u4e00\u53e5\u8bdd\n :return: \u4e00\u53e5\u5904\u7406\u8fc7\u7684\u8bdd","docstring_tokens":["\u8f93\u5165\u4e00\u53e5\u8bdd\uff0c\u8fd4\u56de\u4e00\u53e5\u7ecf\u8fc7\u5904\u7406\u7684\u8bdd",":","\u4e3a\u4e86\u652f\u6301\u4e2d\u6587\u5168\u79f0mask\uff0c\u5c06\u88ab\u5206\u5f00\u7684\u8bcd\uff0c\u5c06\u4e0a\u7279\u6b8a\u6807\u8bb0","(","#",")","\uff0c\u4f7f\u5f97\u540e\u7eed\u5904\u7406\u6a21\u5757\uff0c\u80fd\u591f\u77e5\u9053\u54ea\u4e9b\u5b57\u662f\u5c5e\u4e8e\u540c\u4e00\u4e2a\u8bcd\u7684\u3002",":","param","segment",":","\u4e00\u53e5\u8bdd",":","return",":","\u4e00\u53e5\u5904\u7406\u8fc7\u7684\u8bdd"],"function":"def get_new_segment(segment): # \u65b0\u589e\u7684\u65b9\u6cd5 ####\n \"\"\"\n \u8f93\u5165\u4e00\u53e5\u8bdd\uff0c\u8fd4\u56de\u4e00\u53e5\u7ecf\u8fc7\u5904\u7406\u7684\u8bdd: \u4e3a\u4e86\u652f\u6301\u4e2d\u6587\u5168\u79f0mask\uff0c\u5c06\u88ab\u5206\u5f00\u7684\u8bcd\uff0c\u5c06\u4e0a\u7279\u6b8a\u6807\u8bb0(\"#\")\uff0c\u4f7f\u5f97\u540e\u7eed\u5904\u7406\u6a21\u5757\uff0c\u80fd\u591f\u77e5\u9053\u54ea\u4e9b\u5b57\u662f\u5c5e\u4e8e\u540c\u4e00\u4e2a\u8bcd\u7684\u3002\n :param segment: \u4e00\u53e5\u8bdd\n :return: \u4e00\u53e5\u5904\u7406\u8fc7\u7684\u8bdd\n \"\"\"\n seq_cws = jieba.lcut(\"\".join(segment))\n seq_cws_dict = {x: 1 for x in seq_cws}\n new_segment = []\n i = 0\n while i < len(segment):\n if len(re.findall('[\\u4E00-\\u9FA5]', segment[i]))==0: # \u4e0d\u662f\u4e2d\u6587\u7684\uff0c\u539f\u6587\u52a0\u8fdb\u53bb\u3002\n new_segment.append(segment[i])\n i += 1\n continue\n\n has_add = False\n for length in range(3,0,-1):\n if i+length>len(segment):\n continue\n if ''.join(segment[i:i+length]) in seq_cws_dict:\n new_segment.append(segment[i])\n for l in range(1, length):\n new_segment.append('##' + segment[i+l])\n i += length\n has_add = True\n break\n if not has_add:\n new_segment.append(segment[i])\n i += 1\n return new_segment","function_tokens":["def","get_new_segment","(","segment",")",":","# \u65b0\u589e\u7684\u65b9\u6cd5 ####","seq_cws","=","jieba",".","lcut","(","\"\"",".","join","(","segment",")",")","seq_cws_dict","=","{","x",":","1","for","x","in","seq_cws","}","new_segment","=","[","]","i","=","0","while","i","<","len","(","segment",")",":","if","len","(","re",".","findall","(","'[\\u4E00-\\u9FA5]'",",","segment","[","i","]",")",")","==","0",":","# \u4e0d\u662f\u4e2d\u6587\u7684\uff0c\u539f\u6587\u52a0\u8fdb\u53bb\u3002","new_segment",".","append","(","segment","[","i","]",")","i","+=","1","continue","has_add","=","False","for","length","in","range","(","3",",","0",",","-","1",")",":","if","i","+","length",">","len","(","segment",")",":","continue","if","''",".","join","(","segment","[","i",":","i","+","length","]",")","in","seq_cws_dict",":","new_segment",".","append","(","segment","[","i","]",")","for","l","in","range","(","1",",","length",")",":","new_segment",".","append","(","'##'","+","segment","[","i","+","l","]",")","i","+=","length","has_add","=","True","break","if","not","has_add",":","new_segment",".","append","(","segment","[","i","]",")","i","+=","1","return","new_segment"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/create_pretraining_data.py#L250-L280"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/create_pretraining_data.py","language":"python","identifier":"get_raw_instance","parameters":"(document,max_sequence_length)","argument_list":"","return_statement":"return result_list","docstring":"\u83b7\u53d6\u521d\u6b65\u7684\u8bad\u7ec3\u5b9e\u4f8b\uff0c\u5c06\u6574\u6bb5\u6309\u7167max_sequence_length\u5207\u5206\u6210\u591a\u4e2a\u90e8\u5206,\u5e76\u4ee5\u591a\u4e2a\u5904\u7406\u597d\u7684\u5b9e\u4f8b\u7684\u5f62\u5f0f\u8fd4\u56de\u3002\n :param document: \u4e00\u6574\u6bb5\n :param max_sequence_length:\n :return: a list. each element is a sequence of text","docstring_summary":"\u83b7\u53d6\u521d\u6b65\u7684\u8bad\u7ec3\u5b9e\u4f8b\uff0c\u5c06\u6574\u6bb5\u6309\u7167max_sequence_length\u5207\u5206\u6210\u591a\u4e2a\u90e8\u5206,\u5e76\u4ee5\u591a\u4e2a\u5904\u7406\u597d\u7684\u5b9e\u4f8b\u7684\u5f62\u5f0f\u8fd4\u56de\u3002\n :param document: \u4e00\u6574\u6bb5\n :param max_sequence_length:\n :return: a list. each element is a sequence of text","docstring_tokens":["\u83b7\u53d6\u521d\u6b65\u7684\u8bad\u7ec3\u5b9e\u4f8b\uff0c\u5c06\u6574\u6bb5\u6309\u7167max_sequence_length\u5207\u5206\u6210\u591a\u4e2a\u90e8\u5206","\u5e76\u4ee5\u591a\u4e2a\u5904\u7406\u597d\u7684\u5b9e\u4f8b\u7684\u5f62\u5f0f\u8fd4\u56de\u3002",":","param","document",":","\u4e00\u6574\u6bb5",":","param","max_sequence_length",":",":","return",":","a","list",".","each","element","is","a","sequence","of","text"],"function":"def get_raw_instance(document,max_sequence_length): # \u65b0\u589e\u7684\u65b9\u6cd5\n \"\"\"\n \u83b7\u53d6\u521d\u6b65\u7684\u8bad\u7ec3\u5b9e\u4f8b\uff0c\u5c06\u6574\u6bb5\u6309\u7167max_sequence_length\u5207\u5206\u6210\u591a\u4e2a\u90e8\u5206,\u5e76\u4ee5\u591a\u4e2a\u5904\u7406\u597d\u7684\u5b9e\u4f8b\u7684\u5f62\u5f0f\u8fd4\u56de\u3002\n :param document: \u4e00\u6574\u6bb5\n :param max_sequence_length:\n :return: a list. each element is a sequence of text\n \"\"\"\n max_sequence_length_allowed=max_sequence_length-2\n document = [seq for seq in document if len(seq)max_sequence_length_allowed\/2: # \/2\n result_list.append(curr_seq)\n\n # # \u8ba1\u7b97\u603b\u5171\u53ef\u4ee5\u5f97\u5230\u591a\u5c11\u4efd\n # num_instance=int(len(big_list)\/max_sequence_length_allowed)+1\n # print(\"num_instance:\",num_instance)\n # # \u5207\u5206\u6210\u591a\u4efd\uff0c\u6dfb\u52a0\u5230\u5217\u8868\u4e2d\n # result_list=[]\n # for j in range(num_instance):\n # index=j*max_sequence_length_allowed\n # end_index=index+max_sequence_length_allowed if j!=num_instance-1 else -1\n # result_list.append(big_list[index:end_index])\n return result_list","function_tokens":["def","get_raw_instance","(","document",",","max_sequence_length",")",":","# \u65b0\u589e\u7684\u65b9\u6cd5","max_sequence_length_allowed","=","max_sequence_length","-","2","document","=","[","seq","for","seq","in","document","if","len","(","seq",")","<","max_sequence_length_allowed","]","sizes","=","[","len","(","seq",")","for","seq","in","document","]","result_list","=","[","]","curr_seq","=","[","]","# \u5f53\u524d\u5904\u7406\u7684\u5e8f\u5217","sz_idx","=","0","while","sz_idx","<","len","(","sizes",")",":","# \u5f53\u524d\u53e5\u5b50\u52a0\u4e0a\u65b0\u7684\u53e5\u5b50\uff0c\u5982\u679c\u957f\u5ea6\u5c0f\u4e8e\u6700\u5927\u9650\u5236\uff0c\u5219\u5408\u5e76\u5f53\u524d\u53e5\u5b50\u548c\u65b0\u53e5\u5b50\uff1b\u5426\u5219\u5373\u8d85\u8fc7\u4e86\u6700\u5927\u9650\u5236\uff0c\u90a3\u4e48\u505a\u4e3a\u4e00\u4e2a\u65b0\u7684\u5e8f\u5217\u52a0\u5230\u76ee\u6807\u5217\u8868\u4e2d","if","len","(","curr_seq",")","+","sizes","[","sz_idx","]","<=","max_sequence_length_allowed",":","# or len(curr_seq)==0:","curr_seq","+=","document","[","sz_idx","]","sz_idx","+=","1","else",":","result_list",".","append","(","curr_seq",")","curr_seq","=","[","]","# \u5bf9\u6700\u540e\u4e00\u4e2a\u5e8f\u5217\u8fdb\u884c\u5904\u7406\uff0c\u5982\u679c\u592a\u77ed\u7684\u8bdd\uff0c\u4e22\u5f03\u6389\u3002","if","len","(","curr_seq",")",">","max_sequence_length_allowed","\/","2",":","# \/2","result_list",".","append","(","curr_seq",")","# # \u8ba1\u7b97\u603b\u5171\u53ef\u4ee5\u5f97\u5230\u591a\u5c11\u4efd","# num_instance=int(len(big_list)\/max_sequence_length_allowed)+1","# print(\"num_instance:\",num_instance)","# # \u5207\u5206\u6210\u591a\u4efd\uff0c\u6dfb\u52a0\u5230\u5217\u8868\u4e2d","# result_list=[]","# for j in range(num_instance):","# index=j*max_sequence_length_allowed","# end_index=index+max_sequence_length_allowed if j!=num_instance-1 else -1","# result_list.append(big_list[index:end_index])","return","result_list"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/create_pretraining_data.py#L282-L317"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/create_pretraining_data.py","language":"python","identifier":"create_instances_from_document","parameters":"( # \u65b0\u589e\u7684\u65b9\u6cd5\n # \u76ee\u6807\u6309\u7167RoBERTa\u7684\u601d\u8def\uff0c\u4f7f\u7528DOC-SENTENCES\uff0c\u5e76\u4f1a\u53bb\u6389NSP\u4efb\u52a1: \u4ece\u4e00\u4e2a\u6587\u6863\u4e2d\u8fde\u7eed\u7684\u83b7\u5f97\u6587\u672c\uff0c\u76f4\u5230\u8fbe\u5230\u6700\u5927\u957f\u5ea6\u3002\u5982\u679c\u662f\u4ece\u4e0b\u4e00\u4e2a\u6587\u6863\u4e2d\u83b7\u5f97\uff0c\u90a3\u4e48\u52a0\u4e0a\u4e00\u4e2a\u5206\u9694\u7b26\n # document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.\n # \u7ed9\u5b9a\u4e00\u4e2adocument\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u751f\u6210\u4e00\u4e9binstance.\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return instances","docstring":"Creates `TrainingInstance`s for a single document.","docstring_summary":"Creates `TrainingInstance`s for a single document.","docstring_tokens":["Creates","TrainingInstance","s","for","a","single","document","."],"function":"def create_instances_from_document( # \u65b0\u589e\u7684\u65b9\u6cd5\n # \u76ee\u6807\u6309\u7167RoBERTa\u7684\u601d\u8def\uff0c\u4f7f\u7528DOC-SENTENCES\uff0c\u5e76\u4f1a\u53bb\u6389NSP\u4efb\u52a1: \u4ece\u4e00\u4e2a\u6587\u6863\u4e2d\u8fde\u7eed\u7684\u83b7\u5f97\u6587\u672c\uff0c\u76f4\u5230\u8fbe\u5230\u6700\u5927\u957f\u5ea6\u3002\u5982\u679c\u662f\u4ece\u4e0b\u4e00\u4e2a\u6587\u6863\u4e2d\u83b7\u5f97\uff0c\u90a3\u4e48\u52a0\u4e0a\u4e00\u4e2a\u5206\u9694\u7b26\n # document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.\n # \u7ed9\u5b9a\u4e00\u4e2adocument\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u751f\u6210\u4e00\u4e9binstance.\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[document_index]\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n\n #target_seq_length = max_num_tokens\n #if rng.random() < short_seq_prob:\n # target_seq_length = rng.randint(2, max_num_tokens)\n\n instances = []\n raw_text_list_list=get_raw_instance(document, max_seq_length) # document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.\n for j, raw_text_list in enumerate(raw_text_list_list):\n ####################################################################################################################\n raw_text_list = get_new_segment(raw_text_list) # \u7ed3\u5408\u5206\u8bcd\u7684\u4e2d\u6587\u7684whole mask\u8bbe\u7f6e\u5373\u5728\u9700\u8981\u7684\u5730\u65b9\u52a0\u4e0a\u201c##\u201d\n # 1\u3001\u8bbe\u7f6etoken, segment_ids\n is_random_next=True # this will not be used, so it's value doesn't matter\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in raw_text_list:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n ################################################################################################################\n # 2\u3001\u8c03\u7528\u539f\u6709\u7684\u65b9\u6cd5\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n\n return instances","function_tokens":["def","create_instances_from_document","(","# \u65b0\u589e\u7684\u65b9\u6cd5","# \u76ee\u6807\u6309\u7167RoBERTa\u7684\u601d\u8def\uff0c\u4f7f\u7528DOC-SENTENCES\uff0c\u5e76\u4f1a\u53bb\u6389NSP\u4efb\u52a1: \u4ece\u4e00\u4e2a\u6587\u6863\u4e2d\u8fde\u7eed\u7684\u83b7\u5f97\u6587\u672c\uff0c\u76f4\u5230\u8fbe\u5230\u6700\u5927\u957f\u5ea6\u3002\u5982\u679c\u662f\u4ece\u4e0b\u4e00\u4e2a\u6587\u6863\u4e2d\u83b7\u5f97\uff0c\u90a3\u4e48\u52a0\u4e0a\u4e00\u4e2a\u5206\u9694\u7b26","# document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.","# \u7ed9\u5b9a\u4e00\u4e2adocument\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u751f\u6210\u4e00\u4e9binstance.","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","document","=","all_documents","[","document_index","]","# Account for [CLS], [SEP], [SEP]","max_num_tokens","=","max_seq_length","-","3","# We *usually* want to fill up the entire sequence since we are padding","# to `max_seq_length` anyways, so short sequences are generally wasted","# computation. However, we *sometimes*","# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter","# sequences to minimize the mismatch between pre-training and fine-tuning.","# The `target_seq_length` is just a rough target however, whereas","# `max_seq_length` is a hard limit.","#target_seq_length = max_num_tokens","#if rng.random() < short_seq_prob:","# target_seq_length = rng.randint(2, max_num_tokens)","instances","=","[","]","raw_text_list_list","=","get_raw_instance","(","document",",","max_seq_length",")","# document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.","for","j",",","raw_text_list","in","enumerate","(","raw_text_list_list",")",":","####################################################################################################################","raw_text_list","=","get_new_segment","(","raw_text_list",")","# \u7ed3\u5408\u5206\u8bcd\u7684\u4e2d\u6587\u7684whole mask\u8bbe\u7f6e\u5373\u5728\u9700\u8981\u7684\u5730\u65b9\u52a0\u4e0a\u201c##\u201d","# 1\u3001\u8bbe\u7f6etoken, segment_ids","is_random_next","=","True","# this will not be used, so it's value doesn't matter","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","raw_text_list",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","################################################################################################################","# 2\u3001\u8c03\u7528\u539f\u6709\u7684\u65b9\u6cd5","(","tokens",",","masked_lm_positions",",","masked_lm_labels",")","=","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")","instance","=","TrainingInstance","(","tokens","=","tokens",",","segment_ids","=","segment_ids",",","is_random_next","=","is_random_next",",","masked_lm_positions","=","masked_lm_positions",",","masked_lm_labels","=","masked_lm_labels",")","instances",".","append","(","instance",")","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/create_pretraining_data.py#L319-L372"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/create_pretraining_data.py","language":"python","identifier":"create_instances_from_document_original","parameters":"(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return instances","docstring":"Creates `TrainingInstance`s for a single document.","docstring_summary":"Creates `TrainingInstance`s for a single document.","docstring_tokens":["Creates","TrainingInstance","s","for","a","single","document","."],"function":"def create_instances_from_document_original(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[document_index]\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if rng.random() < short_seq_prob:\n target_seq_length = rng.randint(2, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n instances = []\n current_chunk = []\n current_length = 0\n i = 0\n print(\"document_index:\",document_index,\"document:\",type(document),\" ;document:\",document) # document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.\n while i < len(document):\n segment = document[i] # \u53d6\u5230\u4e00\u4e2a\u90e8\u5206\uff08\u53ef\u80fd\u662f\u4e00\u6bb5\u8bdd\uff09\n print(\"i:\",i,\" ;segment:\",segment)\n ####################################################################################################################\n segment = get_new_segment(segment) # \u7ed3\u5408\u5206\u8bcd\u7684\u4e2d\u6587\u7684whole mask\u8bbe\u7f6e\u5373\u5728\u9700\u8981\u7684\u5730\u65b9\u52a0\u4e0a\u201c##\u201d\n ###################################################################################################################\n current_chunk.append(segment)\n current_length += len(segment)\n print(\"#####condition:\",i == len(document) - 1 or current_length >= target_seq_length)\n if i == len(document) - 1 or current_length >= target_seq_length:\n if current_chunk:\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2:\n a_end = rng.randint(1, len(current_chunk) - 1)\n\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n tokens_b = []\n # Random next\n is_random_next = False\n if len(current_chunk) == 1 or rng.random() < 0.5:\n is_random_next = True\n target_b_length = target_seq_length - len(tokens_a)\n\n # This should rarely go for more than one iteration for large\n # corpora. However, just to be careful, we try to make sure that\n # the random document is not the same as the document\n # we're processing.\n for _ in range(10):\n random_document_index = rng.randint(0, len(all_documents) - 1)\n if random_document_index != document_index:\n break\n\n random_document = all_documents[random_document_index]\n random_start = rng.randint(0, len(random_document) - 1)\n for j in range(random_start, len(random_document)):\n tokens_b.extend(random_document[j])\n if len(tokens_b) >= target_b_length:\n break\n # We didn't actually use these segments so we \"put them back\" so\n # they don't go to waste.\n num_unused_segments = len(current_chunk) - a_end\n i -= num_unused_segments\n # Actual next\n else:\n is_random_next = False\n for j in range(a_end, len(current_chunk)):\n tokens_b.extend(current_chunk[j])\n truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)\n\n assert len(tokens_a) >= 1\n assert len(tokens_b) >= 1\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n current_chunk = []\n current_length = 0\n i += 1\n\n return instances","function_tokens":["def","create_instances_from_document_original","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","document","=","all_documents","[","document_index","]","# Account for [CLS], [SEP], [SEP]","max_num_tokens","=","max_seq_length","-","3","# We *usually* want to fill up the entire sequence since we are padding","# to `max_seq_length` anyways, so short sequences are generally wasted","# computation. However, we *sometimes*","# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter","# sequences to minimize the mismatch between pre-training and fine-tuning.","# The `target_seq_length` is just a rough target however, whereas","# `max_seq_length` is a hard limit.","target_seq_length","=","max_num_tokens","if","rng",".","random","(",")","<","short_seq_prob",":","target_seq_length","=","rng",".","randint","(","2",",","max_num_tokens",")","# We DON'T just concatenate all of the tokens from a document into a long","# sequence and choose an arbitrary split point because this would make the","# next sentence prediction task too easy. Instead, we split the input into","# segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user","# input.","instances","=","[","]","current_chunk","=","[","]","current_length","=","0","i","=","0","print","(","\"document_index:\"",",","document_index",",","\"document:\"",",","type","(","document",")",",","\" ;document:\"",",","document",")","# document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.","while","i","<","len","(","document",")",":","segment","=","document","[","i","]","# \u53d6\u5230\u4e00\u4e2a\u90e8\u5206\uff08\u53ef\u80fd\u662f\u4e00\u6bb5\u8bdd\uff09","print","(","\"i:\"",",","i",",","\" ;segment:\"",",","segment",")","####################################################################################################################","segment","=","get_new_segment","(","segment",")","# \u7ed3\u5408\u5206\u8bcd\u7684\u4e2d\u6587\u7684whole mask\u8bbe\u7f6e\u5373\u5728\u9700\u8981\u7684\u5730\u65b9\u52a0\u4e0a\u201c##\u201d","###################################################################################################################","current_chunk",".","append","(","segment",")","current_length","+=","len","(","segment",")","print","(","\"#####condition:\"",",","i","==","len","(","document",")","-","1","or","current_length",">=","target_seq_length",")","if","i","==","len","(","document",")","-","1","or","current_length",">=","target_seq_length",":","if","current_chunk",":","# `a_end` is how many segments from `current_chunk` go into the `A`","# (first) sentence.","a_end","=","1","if","len","(","current_chunk",")",">=","2",":","a_end","=","rng",".","randint","(","1",",","len","(","current_chunk",")","-","1",")","tokens_a","=","[","]","for","j","in","range","(","a_end",")",":","tokens_a",".","extend","(","current_chunk","[","j","]",")","tokens_b","=","[","]","# Random next","is_random_next","=","False","if","len","(","current_chunk",")","==","1","or","rng",".","random","(",")","<","0.5",":","is_random_next","=","True","target_b_length","=","target_seq_length","-","len","(","tokens_a",")","# This should rarely go for more than one iteration for large","# corpora. However, just to be careful, we try to make sure that","# the random document is not the same as the document","# we're processing.","for","_","in","range","(","10",")",":","random_document_index","=","rng",".","randint","(","0",",","len","(","all_documents",")","-","1",")","if","random_document_index","!=","document_index",":","break","random_document","=","all_documents","[","random_document_index","]","random_start","=","rng",".","randint","(","0",",","len","(","random_document",")","-","1",")","for","j","in","range","(","random_start",",","len","(","random_document",")",")",":","tokens_b",".","extend","(","random_document","[","j","]",")","if","len","(","tokens_b",")",">=","target_b_length",":","break","# We didn't actually use these segments so we \"put them back\" so","# they don't go to waste.","num_unused_segments","=","len","(","current_chunk",")","-","a_end","i","-=","num_unused_segments","# Actual next","else",":","is_random_next","=","False","for","j","in","range","(","a_end",",","len","(","current_chunk",")",")",":","tokens_b",".","extend","(","current_chunk","[","j","]",")","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")","assert","len","(","tokens_a",")",">=","1","assert","len","(","tokens_b",")",">=","1","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","(","tokens",",","masked_lm_positions",",","masked_lm_labels",")","=","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")","instance","=","TrainingInstance","(","tokens","=","tokens",",","segment_ids","=","segment_ids",",","is_random_next","=","is_random_next",",","masked_lm_positions","=","masked_lm_positions",",","masked_lm_labels","=","masked_lm_labels",")","instances",".","append","(","instance",")","current_chunk","=","[","]","current_length","=","0","i","+=","1","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/create_pretraining_data.py#L376-L494"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/create_pretraining_data.py","language":"python","identifier":"create_masked_lm_predictions","parameters":"(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return (output_tokens, masked_lm_positions, masked_lm_labels)","docstring":"Creates the predictions for the masked LM objective.","docstring_summary":"Creates the predictions for the masked LM objective.","docstring_tokens":["Creates","the","predictions","for","the","masked","LM","objective","."],"function":"def create_masked_lm_predictions(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n continue\n # Whole Word Masking means that if we mask all of the wordpieces\n # corresponding to an original word. When a word has been split into\n # WordPieces, the first token does not have any marker and any subsequence\n # tokens are prefixed with ##. So whenever we see the ## token, we\n # append it to the previous set of word indexes.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each WordPiece independently, softmaxed\n # over the entire vocabulary.\n if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and\n token.startswith(\"##\")):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n rng.shuffle(cand_indexes)\n\n output_tokens = [t[2:] if len(re.findall('##[\\u4E00-\\u9FA5]', t))>0 else t for t in tokens]\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index][2:] if len(re.findall('##[\\u4E00-\\u9FA5]', tokens[index]))>0 else tokens[index]\n # 10% of the time, replace with random word\n else:\n masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n assert len(masked_lms) <= num_to_predict\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n # tf.logging.info('%s' % (tokens))\n # tf.logging.info('%s' % (output_tokens))\n return (output_tokens, masked_lm_positions, masked_lm_labels)","function_tokens":["def","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","cand_indexes","=","[","]","for","(","i",",","token",")","in","enumerate","(","tokens",")",":","if","token","==","\"[CLS]\"","or","token","==","\"[SEP]\"",":","continue","# Whole Word Masking means that if we mask all of the wordpieces","# corresponding to an original word. When a word has been split into","# WordPieces, the first token does not have any marker and any subsequence","# tokens are prefixed with ##. So whenever we see the ## token, we","# append it to the previous set of word indexes.","#","# Note that Whole Word Masking does *not* change the training code","# at all -- we still predict each WordPiece independently, softmaxed","# over the entire vocabulary.","if","(","FLAGS",".","do_whole_word_mask","and","len","(","cand_indexes",")",">=","1","and","token",".","startswith","(","\"##\"",")",")",":","cand_indexes","[","-","1","]",".","append","(","i",")","else",":","cand_indexes",".","append","(","[","i","]",")","rng",".","shuffle","(","cand_indexes",")","output_tokens","=","[","t","[","2",":","]","if","len","(","re",".","findall","(","'##[\\u4E00-\\u9FA5]'",",","t",")",")",">","0","else","t","for","t","in","tokens","]","num_to_predict","=","min","(","max_predictions_per_seq",",","max","(","1",",","int","(","round","(","len","(","tokens",")","*","masked_lm_prob",")",")",")",")","masked_lms","=","[","]","covered_indexes","=","set","(",")","for","index_set","in","cand_indexes",":","if","len","(","masked_lms",")",">=","num_to_predict",":","break","# If adding a whole-word mask would exceed the maximum number of","# predictions, then just skip this candidate.","if","len","(","masked_lms",")","+","len","(","index_set",")",">","num_to_predict",":","continue","is_any_index_covered","=","False","for","index","in","index_set",":","if","index","in","covered_indexes",":","is_any_index_covered","=","True","break","if","is_any_index_covered",":","continue","for","index","in","index_set",":","covered_indexes",".","add","(","index",")","masked_token","=","None","# 80% of the time, replace with [MASK]","if","rng",".","random","(",")","<","0.8",":","masked_token","=","\"[MASK]\"","else",":","# 10% of the time, keep original","if","rng",".","random","(",")","<","0.5",":","masked_token","=","tokens","[","index","]","[","2",":","]","if","len","(","re",".","findall","(","'##[\\u4E00-\\u9FA5]'",",","tokens","[","index","]",")",")",">","0","else","tokens","[","index","]","# 10% of the time, replace with random word","else",":","masked_token","=","vocab_words","[","rng",".","randint","(","0",",","len","(","vocab_words",")","-","1",")","]","output_tokens","[","index","]","=","masked_token","masked_lms",".","append","(","MaskedLmInstance","(","index","=","index",",","label","=","tokens","[","index","]",")",")","assert","len","(","masked_lms",")","<=","num_to_predict","masked_lms","=","sorted","(","masked_lms",",","key","=","lambda","x",":","x",".","index",")","masked_lm_positions","=","[","]","masked_lm_labels","=","[","]","for","p","in","masked_lms",":","masked_lm_positions",".","append","(","p",".","index",")","masked_lm_labels",".","append","(","p",".","label",")","# tf.logging.info('%s' % (tokens))","# tf.logging.info('%s' % (output_tokens))","return","(","output_tokens",",","masked_lm_positions",",","masked_lm_labels",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/create_pretraining_data.py#L501-L576"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/create_pretraining_data.py","language":"python","identifier":"truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_num_tokens, rng)","argument_list":"","return_statement":"","docstring":"Truncates a pair of sequences to a maximum sequence length.","docstring_summary":"Truncates a pair of sequences to a maximum sequence length.","docstring_tokens":["Truncates","a","pair","of","sequences","to","a","maximum","sequence","length","."],"function":"def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):\n \"\"\"Truncates a pair of sequences to a maximum sequence length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()","function_tokens":["def","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")",":","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_num_tokens",":","break","trunc_tokens","=","tokens_a","if","len","(","tokens_a",")",">","len","(","tokens_b",")","else","tokens_b","assert","len","(","trunc_tokens",")",">=","1","# We want to sometimes truncate from the front and sometimes from the","# back to add more randomness and avoid biases.","if","rng",".","random","(",")","<","0.5",":","del","trunc_tokens","[","0","]","else",":","trunc_tokens",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/create_pretraining_data.py#L579-L594"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_ner.py","language":"python","identifier":"InputExample.__init__","parameters":"(self, guid, text, label=None)","argument_list":"","return_statement":"","docstring":"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.","docstring_summary":"Constructs a InputExample.","docstring_tokens":["Constructs","a","InputExample","."],"function":"def __init__(self, guid, text, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text = text\n self.label = label","function_tokens":["def","__init__","(","self",",","guid",",","text",",","label","=","None",")",":","self",".","guid","=","guid","self",".","text","=","text","self",".","label","=","label"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_ner.py#L123-L135"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_ner.py","language":"python","identifier":"DataProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the train set.","docstring_summary":"Gets a collection of `InputExample`s for the train set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","train","set","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_ner.py#L152-L154"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_ner.py","language":"python","identifier":"DataProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the dev set.","docstring_summary":"Gets a collection of `InputExample`s for the dev set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","dev","set","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_ner.py#L156-L158"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_ner.py","language":"python","identifier":"DataProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Gets the list of labels for this data set.","docstring_summary":"Gets the list of labels for this data set.","docstring_tokens":["Gets","the","list","of","labels","for","this","data","set","."],"function":"def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_labels","(","self",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_ner.py#L160-L162"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta\/run_ner.py","language":"python","identifier":"DataProcessor._read_data","parameters":"(cls, input_file)","argument_list":"","return_statement":"","docstring":"Reads a BIO data.","docstring_summary":"Reads a BIO data.","docstring_tokens":["Reads","a","BIO","data","."],"function":"def _read_data(cls, input_file):\n \"\"\"Reads a BIO data.\"\"\"\n with open(input_file) as f:\n lines = []\n words = []\n labels = []\n for line in f:\n contends = line.strip()\n word = line.strip().split(' ')[0]\n label = line.strip().split(' ')[-1]\n if contends.startswith(\"-DOCSTART-\"):\n words.append('')\n continue\n if len(contends) == 0 and words[-1] == '.':\n l = ' '.join([label for label in labels if len(label) > 0])\n w = ' '.join([word for word in words if len(word) > 0])\n lines.append([l, w])\n words = []\n labels = []\n continue\n if len(contends) == 0:\n continue\n words.append(word)\n labels.append(label)\n return lines","function_tokens":["def","_read_data","(","cls",",","input_file",")",":","with","open","(","input_file",")","as","f",":","lines","=","[","]","words","=","[","]","labels","=","[","]","for","line","in","f",":","contends","=","line",".","strip","(",")","word","=","line",".","strip","(",")",".","split","(","' '",")","[","0","]","label","=","line",".","strip","(",")",".","split","(","' '",")","[","-","1","]","if","contends",".","startswith","(","\"-DOCSTART-\"",")",":","words",".","append","(","''",")","continue","if","len","(","contends",")","==","0","and","words","[","-","1","]","==","'.'",":","l","=","' '",".","join","(","[","label","for","label","in","labels","if","len","(","label",")",">","0","]",")","w","=","' '",".","join","(","[","word","for","word","in","words","if","len","(","word",")",">","0","]",")","lines",".","append","(","[","l",",","w","]",")","words","=","[","]","labels","=","[","]","continue","if","len","(","contends",")","==","0",":","continue","words",".","append","(","word",")","labels",".","append","(","label",")","return","lines"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta\/run_ner.py#L165-L189"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_pretraining.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n next_sentence_labels = features[\"next_sentence_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), model.get_embedding_table(),\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n (next_sentence_loss, next_sentence_example_loss,\n next_sentence_log_probs) = get_next_sentence_output(\n bert_config, model.get_pooled_output(), next_sentence_labels)\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n next_sentence_log_probs = tf.reshape(\n next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])\n next_sentence_predictions = tf.argmax(\n next_sentence_log_probs, axis=-1, output_type=tf.int32)\n next_sentence_labels = tf.reshape(next_sentence_labels, [-1])\n next_sentence_accuracy = tf.metrics.accuracy(\n labels=next_sentence_labels, predictions=next_sentence_predictions)\n next_sentence_mean_loss = tf.metrics.mean(\n values=next_sentence_example_loss)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n \"next_sentence_accuracy\": next_sentence_accuracy,\n \"next_sentence_loss\": next_sentence_mean_loss,\n }\n\n eval_metrics = (metric_fn, [\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels\n ])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\"Only TRAIN and EVAL modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","masked_lm_positions","=","features","[","\"masked_lm_positions\"","]","masked_lm_ids","=","features","[","\"masked_lm_ids\"","]","masked_lm_weights","=","features","[","\"masked_lm_weights\"","]","next_sentence_labels","=","features","[","\"next_sentence_labels\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","(","masked_lm_loss",",","masked_lm_example_loss",",","masked_lm_log_probs",")","=","get_masked_lm_output","(","bert_config",",","model",".","get_sequence_output","(",")",",","model",".","get_embedding_table","(",")",",","masked_lm_positions",",","masked_lm_ids",",","masked_lm_weights",")","(","next_sentence_loss",",","next_sentence_example_loss",",","next_sentence_log_probs",")","=","get_next_sentence_output","(","bert_config",",","model",".","get_pooled_output","(",")",",","next_sentence_labels",")","total_loss","=","masked_lm_loss","+","next_sentence_loss","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels",")",":","\"\"\"Computes the loss and accuracy of the model.\"\"\"","masked_lm_log_probs","=","tf",".","reshape","(","masked_lm_log_probs",",","[","-","1",",","masked_lm_log_probs",".","shape","[","-","1","]","]",")","masked_lm_predictions","=","tf",".","argmax","(","masked_lm_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","masked_lm_example_loss","=","tf",".","reshape","(","masked_lm_example_loss",",","[","-","1","]",")","masked_lm_ids","=","tf",".","reshape","(","masked_lm_ids",",","[","-","1","]",")","masked_lm_weights","=","tf",".","reshape","(","masked_lm_weights",",","[","-","1","]",")","masked_lm_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","masked_lm_ids",",","predictions","=","masked_lm_predictions",",","weights","=","masked_lm_weights",")","masked_lm_mean_loss","=","tf",".","metrics",".","mean","(","values","=","masked_lm_example_loss",",","weights","=","masked_lm_weights",")","next_sentence_log_probs","=","tf",".","reshape","(","next_sentence_log_probs",",","[","-","1",",","next_sentence_log_probs",".","shape","[","-","1","]","]",")","next_sentence_predictions","=","tf",".","argmax","(","next_sentence_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","next_sentence_labels","=","tf",".","reshape","(","next_sentence_labels",",","[","-","1","]",")","next_sentence_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","next_sentence_labels",",","predictions","=","next_sentence_predictions",")","next_sentence_mean_loss","=","tf",".","metrics",".","mean","(","values","=","next_sentence_example_loss",")","return","{","\"masked_lm_accuracy\"",":","masked_lm_accuracy",",","\"masked_lm_loss\"",":","masked_lm_mean_loss",",","\"next_sentence_accuracy\"",":","next_sentence_accuracy",",","\"next_sentence_loss\"",":","next_sentence_mean_loss",",","}","eval_metrics","=","(","metric_fn",",","[","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","raise","ValueError","(","\"Only TRAIN and EVAL modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_pretraining.py#L109-L237"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_pretraining.py","language":"python","identifier":"get_masked_lm_output","parameters":"(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights)","argument_list":"","return_statement":"return (loss, per_example_loss, log_probs)","docstring":"Get loss and log probs for the masked LM.","docstring_summary":"Get loss and log probs for the masked LM.","docstring_tokens":["Get","loss","and","log","probs","for","the","masked","LM","."],"function":"def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights):\n \"\"\"Get loss and log probs for the masked LM.\"\"\"\n input_tensor = gather_indexes(input_tensor, positions)\n\n with tf.variable_scope(\"cls\/predictions\"):\n # We apply one more non-linear transformation before the output layer.\n # This matrix is not used after pre-training.\n with tf.variable_scope(\"transform\"):\n input_tensor = tf.layers.dense(\n input_tensor,\n units=bert_config.hidden_size,\n activation=modeling.get_activation(bert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n bert_config.initializer_range))\n input_tensor = modeling.layer_norm(input_tensor)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n output_bias = tf.get_variable(\n \"output_bias\",\n shape=[bert_config.vocab_size],\n initializer=tf.zeros_initializer())\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n label_ids = tf.reshape(label_ids, [-1])\n label_weights = tf.reshape(label_weights, [-1])\n\n one_hot_labels = tf.one_hot(\n label_ids, depth=bert_config.vocab_size, dtype=tf.float32)\n\n # The `positions` tensor might be zero-padded (if the sequence is too\n # short to have the maximum number of predictions). The `label_weights`\n # tensor has a value of 1.0 for every real prediction and 0.0 for the\n # padding predictions.\n per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])\n numerator = tf.reduce_sum(label_weights * per_example_loss)\n denominator = tf.reduce_sum(label_weights) + 1e-5\n loss = numerator \/ denominator\n\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_masked_lm_output","(","bert_config",",","input_tensor",",","output_weights",",","positions",",","label_ids",",","label_weights",")",":","input_tensor","=","gather_indexes","(","input_tensor",",","positions",")","with","tf",".","variable_scope","(","\"cls\/predictions\"",")",":","# We apply one more non-linear transformation before the output layer.","# This matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"transform\"",")",":","input_tensor","=","tf",".","layers",".","dense","(","input_tensor",",","units","=","bert_config",".","hidden_size",",","activation","=","modeling",".","get_activation","(","bert_config",".","hidden_act",")",",","kernel_initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","input_tensor","=","modeling",".","layer_norm","(","input_tensor",")","# The output weights are the same as the input embeddings, but there is","# an output-only bias for each token.","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","bert_config",".","vocab_size","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","matmul","(","input_tensor",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","label_ids","=","tf",".","reshape","(","label_ids",",","[","-","1","]",")","label_weights","=","tf",".","reshape","(","label_weights",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","label_ids",",","depth","=","bert_config",".","vocab_size",",","dtype","=","tf",".","float32",")","# The `positions` tensor might be zero-padded (if the sequence is too","# short to have the maximum number of predictions). The `label_weights`","# tensor has a value of 1.0 for every real prediction and 0.0 for the","# padding predictions.","per_example_loss","=","-","tf",".","reduce_sum","(","log_probs","*","one_hot_labels",",","axis","=","[","-","1","]",")","numerator","=","tf",".","reduce_sum","(","label_weights","*","per_example_loss",")","denominator","=","tf",".","reduce_sum","(","label_weights",")","+","1e-5","loss","=","numerator","\/","denominator","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_pretraining.py#L240-L282"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_pretraining.py","language":"python","identifier":"get_next_sentence_output","parameters":"(bert_config, input_tensor, labels)","argument_list":"","return_statement":"","docstring":"Get loss and log probs for the next sentence prediction.","docstring_summary":"Get loss and log probs for the next sentence prediction.","docstring_tokens":["Get","loss","and","log","probs","for","the","next","sentence","prediction","."],"function":"def get_next_sentence_output(bert_config, input_tensor, labels):\n \"\"\"Get loss and log probs for the next sentence prediction.\"\"\"\n\n # Simple binary classification. Note that 0 is \"next sentence\" and 1 is\n # \"random sentence\". This weight matrix is not used after pre-training.\n with tf.variable_scope(\"cls\/seq_relationship\"):\n output_weights = tf.get_variable(\n \"output_weights\",\n shape=[2, bert_config.hidden_size],\n initializer=modeling.create_initializer(bert_config.initializer_range))\n output_bias = tf.get_variable(\n \"output_bias\", shape=[2], initializer=tf.zeros_initializer())\n\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n labels = tf.reshape(labels, [-1])\n one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_next_sentence_output","(","bert_config",",","input_tensor",",","labels",")",":","# Simple binary classification. Note that 0 is \"next sentence\" and 1 is","# \"random sentence\". This weight matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"cls\/seq_relationship\"",")",":","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","shape","=","[","2",",","bert_config",".","hidden_size","]",",","initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","2","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","matmul","(","input_tensor",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","labels","=","tf",".","reshape","(","labels",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","2",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_pretraining.py#L285-L305"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_pretraining.py","language":"python","identifier":"gather_indexes","parameters":"(sequence_tensor, positions)","argument_list":"","return_statement":"return output_tensor","docstring":"Gathers the vectors at the specific positions over a minibatch.","docstring_summary":"Gathers the vectors at the specific positions over a minibatch.","docstring_tokens":["Gathers","the","vectors","at","the","specific","positions","over","a","minibatch","."],"function":"def gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor","function_tokens":["def","gather_indexes","(","sequence_tensor",",","positions",")",":","sequence_shape","=","modeling",".","get_shape_list","(","sequence_tensor",",","expected_rank","=","3",")","batch_size","=","sequence_shape","[","0","]","seq_length","=","sequence_shape","[","1","]","width","=","sequence_shape","[","2","]","flat_offsets","=","tf",".","reshape","(","tf",".","range","(","0",",","batch_size",",","dtype","=","tf",".","int32",")","*","seq_length",",","[","-","1",",","1","]",")","flat_positions","=","tf",".","reshape","(","positions","+","flat_offsets",",","[","-","1","]",")","flat_sequence_tensor","=","tf",".","reshape","(","sequence_tensor",",","[","batch_size","*","seq_length",",","width","]",")","output_tensor","=","tf",".","gather","(","flat_sequence_tensor",",","flat_positions",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_pretraining.py#L308-L321"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_pretraining.py","language":"python","identifier":"input_fn_builder","parameters":"(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n name_to_features = {\n \"input_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"input_mask\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"segment_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"masked_lm_positions\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_ids\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_weights\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.float32),\n \"next_sentence_labels\":\n tf.FixedLenFeature([1], tf.int64),\n }\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n if is_training:\n d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))\n d = d.repeat()\n d = d.shuffle(buffer_size=len(input_files))\n\n # `cycle_length` is the number of parallel files that get read.\n cycle_length = min(num_cpu_threads, len(input_files))\n\n # `sloppy` mode means that the interleaving is not exact. This adds\n # even more randomness to the training pipeline.\n d = d.apply(\n tf.contrib.data.parallel_interleave(\n tf.data.TFRecordDataset,\n sloppy=is_training,\n cycle_length=cycle_length))\n d = d.shuffle(buffer_size=100)\n else:\n d = tf.data.TFRecordDataset(input_files)\n # Since we evaluate for a fixed number of steps we don't want to encounter\n # out-of-range exceptions.\n d = d.repeat()\n\n # We must `drop_remainder` on training because the TPU requires fixed\n # size dimensions. For eval, we assume we are evaluating on the CPU or GPU\n # and we *don't* want to drop the remainder, otherwise we wont cover\n # every sample.\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n num_parallel_batches=num_cpu_threads,\n drop_remainder=True))\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","input_files",",","max_seq_length",",","max_predictions_per_seq",",","is_training",",","num_cpu_threads","=","4",")",":","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"masked_lm_positions\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_ids\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_weights\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","float32",")",",","\"next_sentence_labels\"",":","tf",".","FixedLenFeature","(","[","1","]",",","tf",".","int64",")",",","}","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","if","is_training",":","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","tf",".","constant","(","input_files",")",")","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","len","(","input_files",")",")","# `cycle_length` is the number of parallel files that get read.","cycle_length","=","min","(","num_cpu_threads",",","len","(","input_files",")",")","# `sloppy` mode means that the interleaving is not exact. This adds","# even more randomness to the training pipeline.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","parallel_interleave","(","tf",".","data",".","TFRecordDataset",",","sloppy","=","is_training",",","cycle_length","=","cycle_length",")",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","else",":","d","=","tf",".","data",".","TFRecordDataset","(","input_files",")","# Since we evaluate for a fixed number of steps we don't want to encounter","# out-of-range exceptions.","d","=","d",".","repeat","(",")","# We must `drop_remainder` on training because the TPU requires fixed","# size dimensions. For eval, we assume we are evaluating on the CPU or GPU","# and we *don't* want to drop the remainder, otherwise we wont cover","# every sample.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","num_parallel_batches","=","num_cpu_threads",",","drop_remainder","=","True",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_pretraining.py#L324-L388"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_pretraining.py","language":"python","identifier":"_decode_record","parameters":"(record, name_to_features)","argument_list":"","return_statement":"return example","docstring":"Decodes a record to a TensorFlow example.","docstring_summary":"Decodes a record to a TensorFlow example.","docstring_tokens":["Decodes","a","record","to","a","TensorFlow","example","."],"function":"def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example","function_tokens":["def","_decode_record","(","record",",","name_to_features",")",":","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_pretraining.py#L391-L403"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_squad.py","language":"python","identifier":"read_squad_examples","parameters":"(input_file, is_training)","argument_list":"","return_statement":"return examples","docstring":"Read a SQuAD json file into a list of SquadExample.","docstring_summary":"Read a SQuAD json file into a list of SquadExample.","docstring_tokens":["Read","a","SQuAD","json","file","into","a","list","of","SquadExample","."],"function":"def read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n\n if FLAGS.version_2_with_negative:\n is_impossible = qa[\"is_impossible\"]\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\")\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length -\n 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(\n doc_tokens[start_position:(end_position + 1)])\n cleaned_answer_text = \" \".join(\n tokenization.whitespace_tokenize(orig_answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n tf.logging.warning(\"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n\n return examples","function_tokens":["def","read_squad_examples","(","input_file",",","is_training",")",":","with","tf",".","gfile",".","Open","(","input_file",",","\"r\"",")","as","reader",":","input_data","=","json",".","load","(","reader",")","[","\"data\"","]","def","is_whitespace","(","c",")",":","if","c","==","\" \"","or","c","==","\"\\t\"","or","c","==","\"\\r\"","or","c","==","\"\\n\"","or","ord","(","c",")","==","0x202F",":","return","True","return","False","examples","=","[","]","for","entry","in","input_data",":","for","paragraph","in","entry","[","\"paragraphs\"","]",":","paragraph_text","=","paragraph","[","\"context\"","]","doc_tokens","=","[","]","char_to_word_offset","=","[","]","prev_is_whitespace","=","True","for","c","in","paragraph_text",":","if","is_whitespace","(","c",")",":","prev_is_whitespace","=","True","else",":","if","prev_is_whitespace",":","doc_tokens",".","append","(","c",")","else",":","doc_tokens","[","-","1","]","+=","c","prev_is_whitespace","=","False","char_to_word_offset",".","append","(","len","(","doc_tokens",")","-","1",")","for","qa","in","paragraph","[","\"qas\"","]",":","qas_id","=","qa","[","\"id\"","]","question_text","=","qa","[","\"question\"","]","start_position","=","None","end_position","=","None","orig_answer_text","=","None","is_impossible","=","False","if","is_training",":","if","FLAGS",".","version_2_with_negative",":","is_impossible","=","qa","[","\"is_impossible\"","]","if","(","len","(","qa","[","\"answers\"","]",")","!=","1",")","and","(","not","is_impossible",")",":","raise","ValueError","(","\"For training, each question should have exactly 1 answer.\"",")","if","not","is_impossible",":","answer","=","qa","[","\"answers\"","]","[","0","]","orig_answer_text","=","answer","[","\"text\"","]","answer_offset","=","answer","[","\"answer_start\"","]","answer_length","=","len","(","orig_answer_text",")","start_position","=","char_to_word_offset","[","answer_offset","]","end_position","=","char_to_word_offset","[","answer_offset","+","answer_length","-","1","]","# Only add answers where the text can be exactly recovered from the","# document. If this CAN'T happen it's likely due to weird Unicode","# stuff so we will just skip the example.","#","# Note that this means for training mode, every example is NOT","# guaranteed to be preserved.","actual_text","=","\" \"",".","join","(","doc_tokens","[","start_position",":","(","end_position","+","1",")","]",")","cleaned_answer_text","=","\" \"",".","join","(","tokenization",".","whitespace_tokenize","(","orig_answer_text",")",")","if","actual_text",".","find","(","cleaned_answer_text",")","==","-","1",":","tf",".","logging",".","warning","(","\"Could not find answer: '%s' vs. '%s'\"",",","actual_text",",","cleaned_answer_text",")","continue","else",":","start_position","=","-","1","end_position","=","-","1","orig_answer_text","=","\"\"","example","=","SquadExample","(","qas_id","=","qas_id",",","question_text","=","question_text",",","doc_tokens","=","doc_tokens",",","orig_answer_text","=","orig_answer_text",",","start_position","=","start_position",",","end_position","=","end_position",",","is_impossible","=","is_impossible",")","examples",".","append","(","example",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_squad.py#L227-L306"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_squad.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn)","argument_list":"","return_statement":"","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"token_to_orig_map: %s\" % \" \".join(\n [\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))\n tf.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n tf.logging.info(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\n \"answer: %s\" % (tokenization.printable_text(answer_text)))\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1","function_tokens":["def","convert_examples_to_features","(","examples",",","tokenizer",",","max_seq_length",",","doc_stride",",","max_query_length",",","is_training",",","output_fn",")",":","unique_id","=","1000000000","for","(","example_index",",","example",")","in","enumerate","(","examples",")",":","query_tokens","=","tokenizer",".","tokenize","(","example",".","question_text",")","if","len","(","query_tokens",")",">","max_query_length",":","query_tokens","=","query_tokens","[","0",":","max_query_length","]","tok_to_orig_index","=","[","]","orig_to_tok_index","=","[","]","all_doc_tokens","=","[","]","for","(","i",",","token",")","in","enumerate","(","example",".","doc_tokens",")",":","orig_to_tok_index",".","append","(","len","(","all_doc_tokens",")",")","sub_tokens","=","tokenizer",".","tokenize","(","token",")","for","sub_token","in","sub_tokens",":","tok_to_orig_index",".","append","(","i",")","all_doc_tokens",".","append","(","sub_token",")","tok_start_position","=","None","tok_end_position","=","None","if","is_training","and","example",".","is_impossible",":","tok_start_position","=","-","1","tok_end_position","=","-","1","if","is_training","and","not","example",".","is_impossible",":","tok_start_position","=","orig_to_tok_index","[","example",".","start_position","]","if","example",".","end_position","<","len","(","example",".","doc_tokens",")","-","1",":","tok_end_position","=","orig_to_tok_index","[","example",".","end_position","+","1","]","-","1","else",":","tok_end_position","=","len","(","all_doc_tokens",")","-","1","(","tok_start_position",",","tok_end_position",")","=","_improve_answer_span","(","all_doc_tokens",",","tok_start_position",",","tok_end_position",",","tokenizer",",","example",".","orig_answer_text",")","# The -3 accounts for [CLS], [SEP] and [SEP]","max_tokens_for_doc","=","max_seq_length","-","len","(","query_tokens",")","-","3","# We can have documents that are longer than the maximum sequence length.","# To deal with this we do a sliding window approach, where we take chunks","# of the up to our max length with a stride of `doc_stride`.","_DocSpan","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"DocSpan\"",",","[","\"start\"",",","\"length\"","]",")","doc_spans","=","[","]","start_offset","=","0","while","start_offset","<","len","(","all_doc_tokens",")",":","length","=","len","(","all_doc_tokens",")","-","start_offset","if","length",">","max_tokens_for_doc",":","length","=","max_tokens_for_doc","doc_spans",".","append","(","_DocSpan","(","start","=","start_offset",",","length","=","length",")",")","if","start_offset","+","length","==","len","(","all_doc_tokens",")",":","break","start_offset","+=","min","(","length",",","doc_stride",")","for","(","doc_span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","tokens","=","[","]","token_to_orig_map","=","{","}","token_is_max_context","=","{","}","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","query_tokens",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","i","in","range","(","doc_span",".","length",")",":","split_token_index","=","doc_span",".","start","+","i","token_to_orig_map","[","len","(","tokens",")","]","=","tok_to_orig_index","[","split_token_index","]","is_max_context","=","_check_is_max_context","(","doc_spans",",","doc_span_index",",","split_token_index",")","token_is_max_context","[","len","(","tokens",")","]","=","is_max_context","tokens",".","append","(","all_doc_tokens","[","split_token_index","]",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","start_position","=","None","end_position","=","None","if","is_training","and","not","example",".","is_impossible",":","# For training, if our document chunk does not contain an annotation","# we throw it out, since there is nothing to predict.","doc_start","=","doc_span",".","start","doc_end","=","doc_span",".","start","+","doc_span",".","length","-","1","out_of_span","=","False","if","not","(","tok_start_position",">=","doc_start","and","tok_end_position","<=","doc_end",")",":","out_of_span","=","True","if","out_of_span",":","start_position","=","0","end_position","=","0","else",":","doc_offset","=","len","(","query_tokens",")","+","2","start_position","=","tok_start_position","-","doc_start","+","doc_offset","end_position","=","tok_end_position","-","doc_start","+","doc_offset","if","is_training","and","example",".","is_impossible",":","start_position","=","0","end_position","=","0","if","example_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"unique_id: %s\"","%","(","unique_id",")",")","tf",".","logging",".","info","(","\"example_index: %s\"","%","(","example_index",")",")","tf",".","logging",".","info","(","\"doc_span_index: %s\"","%","(","doc_span_index",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"token_to_orig_map: %s\"","%","\" \"",".","join","(","[","\"%d:%d\"","%","(","x",",","y",")","for","(","x",",","y",")","in","six",".","iteritems","(","token_to_orig_map",")","]",")",")","tf",".","logging",".","info","(","\"token_is_max_context: %s\"","%","\" \"",".","join","(","[","\"%d:%s\"","%","(","x",",","y",")","for","(","x",",","y",")","in","six",".","iteritems","(","token_is_max_context",")","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","if","is_training","and","example",".","is_impossible",":","tf",".","logging",".","info","(","\"impossible example\"",")","if","is_training","and","not","example",".","is_impossible",":","answer_text","=","\" \"",".","join","(","tokens","[","start_position",":","(","end_position","+","1",")","]",")","tf",".","logging",".","info","(","\"start_position: %d\"","%","(","start_position",")",")","tf",".","logging",".","info","(","\"end_position: %d\"","%","(","end_position",")",")","tf",".","logging",".","info","(","\"answer: %s\"","%","(","tokenization",".","printable_text","(","answer_text",")",")",")","feature","=","InputFeatures","(","unique_id","=","unique_id",",","example_index","=","example_index",",","doc_span_index","=","doc_span_index",",","tokens","=","tokens",",","token_to_orig_map","=","token_to_orig_map",",","token_is_max_context","=","token_is_max_context",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","start_position","=","start_position",",","end_position","=","end_position",",","is_impossible","=","example",".","is_impossible",")","# Run callback","output_fn","(","feature",")","unique_id","+=","1"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_squad.py#L309-L473"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_squad.py","language":"python","identifier":"_improve_answer_span","parameters":"(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text)","argument_list":"","return_statement":"return (input_start, input_end)","docstring":"Returns tokenized answer spans that better match the annotated answer.","docstring_summary":"Returns tokenized answer spans that better match the annotated answer.","docstring_tokens":["Returns","tokenized","answer","spans","that","better","match","the","annotated","answer","."],"function":"def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)","function_tokens":["def","_improve_answer_span","(","doc_tokens",",","input_start",",","input_end",",","tokenizer",",","orig_answer_text",")",":","# The SQuAD annotations are character based. We first project them to","# whitespace-tokenized words. But then after WordPiece tokenization, we can","# often find a \"better match\". For example:","#","# Question: What year was John Smith born?","# Context: The leader was John Smith (1895-1943).","# Answer: 1895","#","# The original whitespace-tokenized answer will be \"(1895-1943).\". However","# after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match","# the exact answer, 1895.","#","# However, this is not always possible. Consider the following:","#","# Question: What country is the top exporter of electornics?","# Context: The Japanese electronics industry is the lagest in the world.","# Answer: Japan","#","# In this case, the annotator chose \"Japan\" as a character sub-span of","# the word \"Japanese\". Since our WordPiece tokenizer does not split","# \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare","# in SQuAD, but does happen.","tok_answer_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_answer_text",")",")","for","new_start","in","range","(","input_start",",","input_end","+","1",")",":","for","new_end","in","range","(","input_end",",","new_start","-","1",",","-","1",")",":","text_span","=","\" \"",".","join","(","doc_tokens","[","new_start",":","(","new_end","+","1",")","]",")","if","text_span","==","tok_answer_text",":","return","(","new_start",",","new_end",")","return","(","input_start",",","input_end",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_squad.py#L476-L510"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_squad.py","language":"python","identifier":"_check_is_max_context","parameters":"(doc_spans, cur_span_index, position)","argument_list":"","return_statement":"return cur_span_index == best_span_index","docstring":"Check if this is the 'max context' doc span for the token.","docstring_summary":"Check if this is the 'max context' doc span for the token.","docstring_tokens":["Check","if","this","is","the","max","context","doc","span","for","the","token","."],"function":"def _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index","function_tokens":["def","_check_is_max_context","(","doc_spans",",","cur_span_index",",","position",")",":","# Because of the sliding window approach taken to scoring documents, a single","# token can appear in multiple documents. E.g.","# Doc: the man went to the store and bought a gallon of milk","# Span A: the man went to the","# Span B: to the store and bought","# Span C: and bought a gallon of","# ...","#","# Now the word 'bought' will have two scores from spans B and C. We only","# want to consider the score with \"maximum context\", which we define as","# the *minimum* of its left and right context (the *sum* of left and","# right context will always be the same, of course).","#","# In the example the maximum context for 'bought' would be span C since","# it has 1 left context and 3 right context, while span B has 4 left context","# and 0 right context.","best_score","=","None","best_span_index","=","None","for","(","span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","end","=","doc_span",".","start","+","doc_span",".","length","-","1","if","position","<","doc_span",".","start",":","continue","if","position",">","end",":","continue","num_left_context","=","position","-","doc_span",".","start","num_right_context","=","end","-","position","score","=","min","(","num_left_context",",","num_right_context",")","+","0.01","*","doc_span",".","length","if","best_score","is","None","or","score",">","best_score",":","best_score","=","score","best_span_index","=","span_index","return","cur_span_index","==","best_span_index"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_squad.py#L513-L547"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_squad.py","language":"python","identifier":"create_model","parameters":"(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return (start_logits, end_logits)","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n final_hidden = model.get_sequence_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n hidden_size = final_hidden_shape[2]\n\n output_weights = tf.get_variable(\n \"cls\/squad\/output_weights\", [2, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"cls\/squad\/output_bias\", [2], initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden,\n [batch_size * seq_length, hidden_size])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(logits, [2, 0, 1])\n\n unstacked_logits = tf.unstack(logits, axis=0)\n\n (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n return (start_logits, end_logits)","function_tokens":["def","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","use_one_hot_embeddings",")",":","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","final_hidden","=","model",".","get_sequence_output","(",")","final_hidden_shape","=","modeling",".","get_shape_list","(","final_hidden",",","expected_rank","=","3",")","batch_size","=","final_hidden_shape","[","0","]","seq_length","=","final_hidden_shape","[","1","]","hidden_size","=","final_hidden_shape","[","2","]","output_weights","=","tf",".","get_variable","(","\"cls\/squad\/output_weights\"",",","[","2",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"cls\/squad\/output_bias\"",",","[","2","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","final_hidden_matrix","=","tf",".","reshape","(","final_hidden",",","[","batch_size","*","seq_length",",","hidden_size","]",")","logits","=","tf",".","matmul","(","final_hidden_matrix",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","logits","=","tf",".","reshape","(","logits",",","[","batch_size",",","seq_length",",","2","]",")","logits","=","tf",".","transpose","(","logits",",","[","2",",","0",",","1","]",")","unstacked_logits","=","tf",".","unstack","(","logits",",","axis","=","0",")","(","start_logits",",","end_logits",")","=","(","unstacked_logits","[","0","]",",","unstacked_logits","[","1","]",")","return","(","start_logits",",","end_logits",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_squad.py#L550-L587"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_squad.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (start_logits, end_logits) = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n start_positions = features[\"start_positions\"]\n end_positions = features[\"end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) \/ 2.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"start_logits\": start_logits,\n \"end_logits\": end_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","unique_ids","=","features","[","\"unique_ids\"","]","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","start_logits",",","end_logits",")","=","create_model","(","bert_config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","seq_length","=","modeling",".","get_shape_list","(","input_ids",")","[","1","]","def","compute_loss","(","logits",",","positions",")",":","one_hot_positions","=","tf",".","one_hot","(","positions",",","depth","=","seq_length",",","dtype","=","tf",".","float32",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","loss","=","-","tf",".","reduce_mean","(","tf",".","reduce_sum","(","one_hot_positions","*","log_probs",",","axis","=","-","1",")",")","return","loss","start_positions","=","features","[","\"start_positions\"","]","end_positions","=","features","[","\"end_positions\"","]","start_loss","=","compute_loss","(","start_logits",",","start_positions",")","end_loss","=","compute_loss","(","end_logits",",","end_positions",")","total_loss","=","(","start_loss","+","end_loss",")","\/","2.0","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","PREDICT",":","predictions","=","{","\"unique_ids\"",":","unique_ids",",","\"start_logits\"",":","start_logits",",","\"end_logits\"",":","end_logits",",","}","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","predictions",",","scaffold_fn","=","scaffold_fn",")","else",":","raise","ValueError","(","\"Only TRAIN and PREDICT modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_squad.py#L590-L684"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_squad.py","language":"python","identifier":"input_fn_builder","parameters":"(input_file, seq_length, is_training, drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(input_file, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","input_file",",","seq_length",",","is_training",",","drop_remainder",")",":","name_to_features","=","{","\"unique_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","}","if","is_training",":","name_to_features","[","\"start_positions\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")","name_to_features","[","\"end_positions\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","d","=","tf",".","data",".","TFRecordDataset","(","input_file",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_squad.py#L687-L734"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_squad.py","language":"python","identifier":"write_predictions","parameters":"(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file)","argument_list":"","return_statement":"","docstring":"Write final predictions to the json file and log-odds of null if needed.","docstring_summary":"Write final predictions to the json file and log-odds of null if needed.","docstring_tokens":["Write","final","predictions","to","the","json","file","and","log","-","odds","of","null","if","needed","."],"function":"def write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if FLAGS.version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n\n if FLAGS.version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # if we didn't inlude the empty option in the n-best, inlcude it\n if FLAGS.version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\", start_logit=null_start_logit,\n end_logit=null_end_logit))\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not FLAGS.version_2_with_negative:\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (\n best_non_null_entry.end_logit)\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > FLAGS.null_score_diff_threshold:\n all_predictions[example.qas_id] = \"\"\n else:\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n if FLAGS.version_2_with_negative:\n with tf.gfile.GFile(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")","function_tokens":["def","write_predictions","(","all_examples",",","all_features",",","all_results",",","n_best_size",",","max_answer_length",",","do_lower_case",",","output_prediction_file",",","output_nbest_file",",","output_null_log_odds_file",")",":","tf",".","logging",".","info","(","\"Writing predictions to: %s\"","%","(","output_prediction_file",")",")","tf",".","logging",".","info","(","\"Writing nbest to: %s\"","%","(","output_nbest_file",")",")","example_index_to_features","=","collections",".","defaultdict","(","list",")","for","feature","in","all_features",":","example_index_to_features","[","feature",".","example_index","]",".","append","(","feature",")","unique_id_to_result","=","{","}","for","result","in","all_results",":","unique_id_to_result","[","result",".","unique_id","]","=","result","_PrelimPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"PrelimPrediction\"",",","[","\"feature_index\"",",","\"start_index\"",",","\"end_index\"",",","\"start_logit\"",",","\"end_logit\"","]",")","all_predictions","=","collections",".","OrderedDict","(",")","all_nbest_json","=","collections",".","OrderedDict","(",")","scores_diff_json","=","collections",".","OrderedDict","(",")","for","(","example_index",",","example",")","in","enumerate","(","all_examples",")",":","features","=","example_index_to_features","[","example_index","]","prelim_predictions","=","[","]","# keep track of the minimum score of null start+end of position 0","score_null","=","1000000","# large and positive","min_null_feature_index","=","0","# the paragraph slice with min mull score","null_start_logit","=","0","# the start logit at the slice with min null score","null_end_logit","=","0","# the end logit at the slice with min null score","for","(","feature_index",",","feature",")","in","enumerate","(","features",")",":","result","=","unique_id_to_result","[","feature",".","unique_id","]","start_indexes","=","_get_best_indexes","(","result",".","start_logits",",","n_best_size",")","end_indexes","=","_get_best_indexes","(","result",".","end_logits",",","n_best_size",")","# if we could have irrelevant answers, get the min score of irrelevant","if","FLAGS",".","version_2_with_negative",":","feature_null_score","=","result",".","start_logits","[","0","]","+","result",".","end_logits","[","0","]","if","feature_null_score","<","score_null",":","score_null","=","feature_null_score","min_null_feature_index","=","feature_index","null_start_logit","=","result",".","start_logits","[","0","]","null_end_logit","=","result",".","end_logits","[","0","]","for","start_index","in","start_indexes",":","for","end_index","in","end_indexes",":","# We could hypothetically create invalid predictions, e.g., predict","# that the start of the span is in the question. We throw out all","# invalid predictions.","if","start_index",">=","len","(","feature",".","tokens",")",":","continue","if","end_index",">=","len","(","feature",".","tokens",")",":","continue","if","start_index","not","in","feature",".","token_to_orig_map",":","continue","if","end_index","not","in","feature",".","token_to_orig_map",":","continue","if","not","feature",".","token_is_max_context",".","get","(","start_index",",","False",")",":","continue","if","end_index","<","start_index",":","continue","length","=","end_index","-","start_index","+","1","if","length",">","max_answer_length",":","continue","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","feature_index",",","start_index","=","start_index",",","end_index","=","end_index",",","start_logit","=","result",".","start_logits","[","start_index","]",",","end_logit","=","result",".","end_logits","[","end_index","]",")",")","if","FLAGS",".","version_2_with_negative",":","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","min_null_feature_index",",","start_index","=","0",",","end_index","=","0",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","prelim_predictions","=","sorted","(","prelim_predictions",",","key","=","lambda","x",":","(","x",".","start_logit","+","x",".","end_logit",")",",","reverse","=","True",")","_NbestPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"NbestPrediction\"",",","[","\"text\"",",","\"start_logit\"",",","\"end_logit\"","]",")","seen_predictions","=","{","}","nbest","=","[","]","for","pred","in","prelim_predictions",":","if","len","(","nbest",")",">=","n_best_size",":","break","feature","=","features","[","pred",".","feature_index","]","if","pred",".","start_index",">","0",":","# this is a non-null prediction","tok_tokens","=","feature",".","tokens","[","pred",".","start_index",":","(","pred",".","end_index","+","1",")","]","orig_doc_start","=","feature",".","token_to_orig_map","[","pred",".","start_index","]","orig_doc_end","=","feature",".","token_to_orig_map","[","pred",".","end_index","]","orig_tokens","=","example",".","doc_tokens","[","orig_doc_start",":","(","orig_doc_end","+","1",")","]","tok_text","=","\" \"",".","join","(","tok_tokens",")","# De-tokenize WordPieces that have been split off.","tok_text","=","tok_text",".","replace","(","\" ##\"",",","\"\"",")","tok_text","=","tok_text",".","replace","(","\"##\"",",","\"\"",")","# Clean whitespace","tok_text","=","tok_text",".","strip","(",")","tok_text","=","\" \"",".","join","(","tok_text",".","split","(",")",")","orig_text","=","\" \"",".","join","(","orig_tokens",")","final_text","=","get_final_text","(","tok_text",",","orig_text",",","do_lower_case",")","if","final_text","in","seen_predictions",":","continue","seen_predictions","[","final_text","]","=","True","else",":","final_text","=","\"\"","seen_predictions","[","final_text","]","=","True","nbest",".","append","(","_NbestPrediction","(","text","=","final_text",",","start_logit","=","pred",".","start_logit",",","end_logit","=","pred",".","end_logit",")",")","# if we didn't inlude the empty option in the n-best, inlcude it","if","FLAGS",".","version_2_with_negative",":","if","\"\"","not","in","seen_predictions",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"\"",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","# In very rare edge cases we could have no valid predictions. So we","# just create a nonce prediction in this case to avoid failure.","if","not","nbest",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"empty\"",",","start_logit","=","0.0",",","end_logit","=","0.0",")",")","assert","len","(","nbest",")",">=","1","total_scores","=","[","]","best_non_null_entry","=","None","for","entry","in","nbest",":","total_scores",".","append","(","entry",".","start_logit","+","entry",".","end_logit",")","if","not","best_non_null_entry",":","if","entry",".","text",":","best_non_null_entry","=","entry","probs","=","_compute_softmax","(","total_scores",")","nbest_json","=","[","]","for","(","i",",","entry",")","in","enumerate","(","nbest",")",":","output","=","collections",".","OrderedDict","(",")","output","[","\"text\"","]","=","entry",".","text","output","[","\"probability\"","]","=","probs","[","i","]","output","[","\"start_logit\"","]","=","entry",".","start_logit","output","[","\"end_logit\"","]","=","entry",".","end_logit","nbest_json",".","append","(","output",")","assert","len","(","nbest_json",")",">=","1","if","not","FLAGS",".","version_2_with_negative",":","all_predictions","[","example",".","qas_id","]","=","nbest_json","[","0","]","[","\"text\"","]","else",":","# predict \"\" iff the null score - the score of best non-null > threshold","score_diff","=","score_null","-","best_non_null_entry",".","start_logit","-","(","best_non_null_entry",".","end_logit",")","scores_diff_json","[","example",".","qas_id","]","=","score_diff","if","score_diff",">","FLAGS",".","null_score_diff_threshold",":","all_predictions","[","example",".","qas_id","]","=","\"\"","else",":","all_predictions","[","example",".","qas_id","]","=","best_non_null_entry",".","text","all_nbest_json","[","example",".","qas_id","]","=","nbest_json","with","tf",".","gfile",".","GFile","(","output_prediction_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_predictions",",","indent","=","4",")","+","\"\\n\"",")","with","tf",".","gfile",".","GFile","(","output_nbest_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_nbest_json",",","indent","=","4",")","+","\"\\n\"",")","if","FLAGS",".","version_2_with_negative",":","with","tf",".","gfile",".","GFile","(","output_null_log_odds_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","scores_diff_json",",","indent","=","4",")","+","\"\\n\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_squad.py#L741-L924"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_squad.py","language":"python","identifier":"get_final_text","parameters":"(pred_text, orig_text, do_lower_case)","argument_list":"","return_statement":"return output_text","docstring":"Project the tokenized prediction back to the original text.","docstring_summary":"Project the tokenized prediction back to the original text.","docstring_tokens":["Project","the","tokenized","prediction","back","to","the","original","text","."],"function":"def get_final_text(pred_text, orig_text, do_lower_case):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping\/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if FLAGS.verbose_logging:\n tf.logging.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if FLAGS.verbose_logging:\n tf.logging.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text","function_tokens":["def","get_final_text","(","pred_text",",","orig_text",",","do_lower_case",")",":","# When we created the data, we kept track of the alignment between original","# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So","# now `orig_text` contains the span of our original text corresponding to the","# span that we predicted.","#","# However, `orig_text` may contain extra characters that we don't want in","# our prediction.","#","# For example, let's say:","# pred_text = steve smith","# orig_text = Steve Smith's","#","# We don't want to return `orig_text` because it contains the extra \"'s\".","#","# We don't want to return `pred_text` because it's already been normalized","# (the SQuAD eval script also does punctuation stripping\/lower casing but","# our tokenizer does additional normalization like stripping accent","# characters).","#","# What we really want to return is \"Steve Smith\".","#","# Therefore, we have to apply a semi-complicated alignment heruistic between","# `pred_text` and `orig_text` to get a character-to-charcter alignment. This","# can fail in certain cases in which case we just return `orig_text`.","def","_strip_spaces","(","text",")",":","ns_chars","=","[","]","ns_to_s_map","=","collections",".","OrderedDict","(",")","for","(","i",",","c",")","in","enumerate","(","text",")",":","if","c","==","\" \"",":","continue","ns_to_s_map","[","len","(","ns_chars",")","]","=","i","ns_chars",".","append","(","c",")","ns_text","=","\"\"",".","join","(","ns_chars",")","return","(","ns_text",",","ns_to_s_map",")","# We first tokenize `orig_text`, strip whitespace from the result","# and `pred_text`, and check if they are the same length. If they are","# NOT the same length, the heuristic has failed. If they are the same","# length, we assume the characters are one-to-one aligned.","tokenizer","=","tokenization",".","BasicTokenizer","(","do_lower_case","=","do_lower_case",")","tok_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_text",")",")","start_position","=","tok_text",".","find","(","pred_text",")","if","start_position","==","-","1",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Unable to find text: '%s' in '%s'\"","%","(","pred_text",",","orig_text",")",")","return","orig_text","end_position","=","start_position","+","len","(","pred_text",")","-","1","(","orig_ns_text",",","orig_ns_to_s_map",")","=","_strip_spaces","(","orig_text",")","(","tok_ns_text",",","tok_ns_to_s_map",")","=","_strip_spaces","(","tok_text",")","if","len","(","orig_ns_text",")","!=","len","(","tok_ns_text",")",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Length not equal after stripping spaces: '%s' vs '%s'\"",",","orig_ns_text",",","tok_ns_text",")","return","orig_text","# We then project the characters in `pred_text` back to `orig_text` using","# the character-to-character alignment.","tok_s_to_ns_map","=","{","}","for","(","i",",","tok_index",")","in","six",".","iteritems","(","tok_ns_to_s_map",")",":","tok_s_to_ns_map","[","tok_index","]","=","i","orig_start_position","=","None","if","start_position","in","tok_s_to_ns_map",":","ns_start_position","=","tok_s_to_ns_map","[","start_position","]","if","ns_start_position","in","orig_ns_to_s_map",":","orig_start_position","=","orig_ns_to_s_map","[","ns_start_position","]","if","orig_start_position","is","None",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Couldn't map start position\"",")","return","orig_text","orig_end_position","=","None","if","end_position","in","tok_s_to_ns_map",":","ns_end_position","=","tok_s_to_ns_map","[","end_position","]","if","ns_end_position","in","orig_ns_to_s_map",":","orig_end_position","=","orig_ns_to_s_map","[","ns_end_position","]","if","orig_end_position","is","None",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Couldn't map end position\"",")","return","orig_text","output_text","=","orig_text","[","orig_start_position",":","(","orig_end_position","+","1",")","]","return","output_text"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_squad.py#L927-L1020"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_squad.py","language":"python","identifier":"_get_best_indexes","parameters":"(logits, n_best_size)","argument_list":"","return_statement":"return best_indexes","docstring":"Get the n-best logits from a list.","docstring_summary":"Get the n-best logits from a list.","docstring_tokens":["Get","the","n","-","best","logits","from","a","list","."],"function":"def _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes","function_tokens":["def","_get_best_indexes","(","logits",",","n_best_size",")",":","index_and_score","=","sorted","(","enumerate","(","logits",")",",","key","=","lambda","x",":","x","[","1","]",",","reverse","=","True",")","best_indexes","=","[","]","for","i","in","range","(","len","(","index_and_score",")",")",":","if","i",">=","n_best_size",":","break","best_indexes",".","append","(","index_and_score","[","i","]","[","0","]",")","return","best_indexes"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_squad.py#L1023-L1032"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_squad.py","language":"python","identifier":"_compute_softmax","parameters":"(scores)","argument_list":"","return_statement":"return probs","docstring":"Compute softmax probability over raw logits.","docstring_summary":"Compute softmax probability over raw logits.","docstring_tokens":["Compute","softmax","probability","over","raw","logits","."],"function":"def _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score \/ total_sum)\n return probs","function_tokens":["def","_compute_softmax","(","scores",")",":","if","not","scores",":","return","[","]","max_score","=","None","for","score","in","scores",":","if","max_score","is","None","or","score",">","max_score",":","max_score","=","score","exp_scores","=","[","]","total_sum","=","0.0","for","score","in","scores",":","x","=","math",".","exp","(","score","-","max_score",")","exp_scores",".","append","(","x",")","total_sum","+=","x","probs","=","[","]","for","score","in","exp_scores",":","probs",".","append","(","score","\/","total_sum",")","return","probs"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_squad.py#L1035-L1055"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_squad.py","language":"python","identifier":"validate_flags_or_throw","parameters":"(bert_config)","argument_list":"","return_statement":"","docstring":"Validate the input FLAGS or throw an exception.","docstring_summary":"Validate the input FLAGS or throw an exception.","docstring_tokens":["Validate","the","input","FLAGS","or","throw","an","exception","."],"function":"def validate_flags_or_throw(bert_config):\n \"\"\"Validate the input FLAGS or throw an exception.\"\"\"\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_predict` must be True.\")\n\n if FLAGS.do_train:\n if not FLAGS.train_file:\n raise ValueError(\n \"If `do_train` is True, then `train_file` must be specified.\")\n if FLAGS.do_predict:\n if not FLAGS.predict_file:\n raise ValueError(\n \"If `do_predict` is True, then `predict_file` must be specified.\")\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:\n raise ValueError(\n \"The max_seq_length (%d) must be greater than max_query_length \"\n \"(%d) + 3\" % (FLAGS.max_seq_length, FLAGS.max_query_length))","function_tokens":["def","validate_flags_or_throw","(","bert_config",")",":","tokenization",".","validate_case_matches_checkpoint","(","FLAGS",".","do_lower_case",",","FLAGS",".","init_checkpoint",")","if","not","FLAGS",".","do_train","and","not","FLAGS",".","do_predict",":","raise","ValueError","(","\"At least one of `do_train` or `do_predict` must be True.\"",")","if","FLAGS",".","do_train",":","if","not","FLAGS",".","train_file",":","raise","ValueError","(","\"If `do_train` is True, then `train_file` must be specified.\"",")","if","FLAGS",".","do_predict",":","if","not","FLAGS",".","predict_file",":","raise","ValueError","(","\"If `do_predict` is True, then `predict_file` must be specified.\"",")","if","FLAGS",".","max_seq_length",">","bert_config",".","max_position_embeddings",":","raise","ValueError","(","\"Cannot use sequence length %d because the BERT model \"","\"was only trained up to sequence length %d\"","%","(","FLAGS",".","max_seq_length",",","bert_config",".","max_position_embeddings",")",")","if","FLAGS",".","max_seq_length","<=","FLAGS",".","max_query_length","+","3",":","raise","ValueError","(","\"The max_seq_length (%d) must be greater than max_query_length \"","\"(%d) + 3\"","%","(","FLAGS",".","max_seq_length",",","FLAGS",".","max_query_length",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_squad.py#L1097-L1123"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_squad.py","language":"python","identifier":"FeatureWriter.process_feature","parameters":"(self, feature)","argument_list":"","return_statement":"","docstring":"Write a InputFeature to the TFRecordWriter as a tf.train.Example.","docstring_summary":"Write a InputFeature to the TFRecordWriter as a tf.train.Example.","docstring_tokens":["Write","a","InputFeature","to","the","TFRecordWriter","as","a","tf",".","train",".","Example","."],"function":"def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())","function_tokens":["def","process_feature","(","self",",","feature",")",":","self",".","num_features","+=","1","def","create_int_feature","(","values",")",":","feature","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","feature","features","=","collections",".","OrderedDict","(",")","features","[","\"unique_ids\"","]","=","create_int_feature","(","[","feature",".","unique_id","]",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","if","self",".","is_training",":","features","[","\"start_positions\"","]","=","create_int_feature","(","[","feature",".","start_position","]",")","features","[","\"end_positions\"","]","=","create_int_feature","(","[","feature",".","end_position","]",")","impossible","=","0","if","feature",".","is_impossible",":","impossible","=","1","features","[","\"is_impossible\"","]","=","create_int_feature","(","[","impossible","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","self",".","_writer",".","write","(","tf_example",".","SerializeToString","(",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_squad.py#L1067-L1091"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"validate_case_matches_checkpoint","parameters":"(do_lower_case, init_checkpoint)","argument_list":"","return_statement":"","docstring":"Checks whether the casing config is consistent with the checkpoint name.","docstring_summary":"Checks whether the casing config is consistent with the checkpoint name.","docstring_tokens":["Checks","whether","the","casing","config","is","consistent","with","the","checkpoint","name","."],"function":"def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):\n \"\"\"Checks whether the casing config is consistent with the checkpoint name.\"\"\"\n\n # The casing has to be passed in by the user and there is no explicit check\n # as to whether it matches the checkpoint. The casing information probably\n # should have been stored in the bert_config.json file, but it's not, so\n # we have to heuristically detect it to validate.\n\n if not init_checkpoint:\n return\n\n m = re.match(\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\", init_checkpoint)\n if m is None:\n return\n\n model_name = m.group(1)\n\n lower_models = [\n \"uncased_L-24_H-1024_A-16\", \"uncased_L-12_H-768_A-12\",\n \"multilingual_L-12_H-768_A-12\", \"chinese_L-12_H-768_A-12\"\n ]\n\n cased_models = [\n \"cased_L-12_H-768_A-12\", \"cased_L-24_H-1024_A-16\",\n \"multi_cased_L-12_H-768_A-12\"\n ]\n\n is_bad_config = False\n if model_name in lower_models and not do_lower_case:\n is_bad_config = True\n actual_flag = \"False\"\n case_name = \"lowercased\"\n opposite_flag = \"True\"\n\n if model_name in cased_models and do_lower_case:\n is_bad_config = True\n actual_flag = \"True\"\n case_name = \"cased\"\n opposite_flag = \"False\"\n\n if is_bad_config:\n raise ValueError(\n \"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"\n \"However, `%s` seems to be a %s model, so you \"\n \"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"\n \"how the model was pre-training. If this error is wrong, please \"\n \"just comment out this check.\" % (actual_flag, init_checkpoint,\n model_name, case_name, opposite_flag))","function_tokens":["def","validate_case_matches_checkpoint","(","do_lower_case",",","init_checkpoint",")",":","# The casing has to be passed in by the user and there is no explicit check","# as to whether it matches the checkpoint. The casing information probably","# should have been stored in the bert_config.json file, but it's not, so","# we have to heuristically detect it to validate.","if","not","init_checkpoint",":","return","m","=","re",".","match","(","\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\"",",","init_checkpoint",")","if","m","is","None",":","return","model_name","=","m",".","group","(","1",")","lower_models","=","[","\"uncased_L-24_H-1024_A-16\"",",","\"uncased_L-12_H-768_A-12\"",",","\"multilingual_L-12_H-768_A-12\"",",","\"chinese_L-12_H-768_A-12\"","]","cased_models","=","[","\"cased_L-12_H-768_A-12\"",",","\"cased_L-24_H-1024_A-16\"",",","\"multi_cased_L-12_H-768_A-12\"","]","is_bad_config","=","False","if","model_name","in","lower_models","and","not","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"False\"","case_name","=","\"lowercased\"","opposite_flag","=","\"True\"","if","model_name","in","cased_models","and","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"True\"","case_name","=","\"cased\"","opposite_flag","=","\"False\"","if","is_bad_config",":","raise","ValueError","(","\"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"","\"However, `%s` seems to be a %s model, so you \"","\"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"","\"how the model was pre-training. If this error is wrong, please \"","\"just comment out this check.\"","%","(","actual_flag",",","init_checkpoint",",","model_name",",","case_name",",","opposite_flag",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L28-L75"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"convert_to_unicode","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_summary":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_tokens":["Converts","text","to","Unicode","(","if","it","s","not","already",")","assuming","utf","-","8","input","."],"function":"def convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","convert_to_unicode","(","text",")",":","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","elif","isinstance","(","text",",","unicode",")",":","return","text","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L78-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"printable_text","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_summary":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_tokens":["Returns","text","encoded","in","a","way","suitable","for","print","or","tf",".","logging","."],"function":"def printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","printable_text","(","text",")",":","# These functions want `str` for both Python2 and Python3, but in one case","# it's a Unicode string and in the other it's a byte string.","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","unicode",")",":","return","text",".","encode","(","\"utf-8\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L98-L118"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"load_vocab","parameters":"(vocab_file)","argument_list":"","return_statement":"return vocab","docstring":"Loads a vocabulary file into a dictionary.","docstring_summary":"Loads a vocabulary file into a dictionary.","docstring_tokens":["Loads","a","vocabulary","file","into","a","dictionary","."],"function":"def load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab","function_tokens":["def","load_vocab","(","vocab_file",")",":","vocab","=","collections",".","OrderedDict","(",")","index","=","0","with","tf",".","gfile",".","GFile","(","vocab_file",",","\"r\"",")","as","reader",":","while","True",":","token","=","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","token",":","break","token","=","token",".","strip","(",")","vocab","[","token","]","=","index","index","+=","1","return","vocab"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L121-L133"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"convert_by_vocab","parameters":"(vocab, items)","argument_list":"","return_statement":"return output","docstring":"Converts a sequence of [tokens|ids] using the vocab.","docstring_summary":"Converts a sequence of [tokens|ids] using the vocab.","docstring_tokens":["Converts","a","sequence","of","[","tokens|ids","]","using","the","vocab","."],"function":"def convert_by_vocab(vocab, items):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n output = []\n for item in items:\n output.append(vocab[item])\n return output","function_tokens":["def","convert_by_vocab","(","vocab",",","items",")",":","output","=","[","]","for","item","in","items",":","output",".","append","(","vocab","[","item","]",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L136-L141"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"whitespace_tokenize","parameters":"(text)","argument_list":"","return_statement":"return tokens","docstring":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_summary":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_tokens":["Runs","basic","whitespace","cleaning","and","splitting","on","a","piece","of","text","."],"function":"def whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens","function_tokens":["def","whitespace_tokenize","(","text",")",":","text","=","text",".","strip","(",")","if","not","text",":","return","[","]","tokens","=","text",".","split","(",")","return","tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L152-L158"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"_is_whitespace","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a whitespace character.","docstring_summary":"Checks whether `chars` is a whitespace character.","docstring_tokens":["Checks","whether","chars","is","a","whitespace","character","."],"function":"def _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False","function_tokens":["def","_is_whitespace","(","char",")",":","# \\t, \\n, and \\r are technically contorl characters but we treat them","# as whitespace since they are generally considered as such.","if","char","==","\" \"","or","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Zs\"",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L362-L371"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"_is_control","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a control character.","docstring_summary":"Checks whether `chars` is a control character.","docstring_tokens":["Checks","whether","chars","is","a","control","character","."],"function":"def _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False","function_tokens":["def","_is_control","(","char",")",":","# These are technically control characters but we count them as whitespace","# characters.","if","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","False","cat","=","unicodedata",".","category","(","char",")","if","cat","in","(","\"Cc\"",",","\"Cf\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L374-L383"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"_is_punctuation","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a punctuation character.","docstring_summary":"Checks whether `chars` is a punctuation character.","docstring_tokens":["Checks","whether","chars","is","a","punctuation","character","."],"function":"def _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter\/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False","function_tokens":["def","_is_punctuation","(","char",")",":","cp","=","ord","(","char",")","# We treat all non-letter\/number ASCII as punctuation.","# Characters such as \"^\", \"$\", and \"`\" are not in the Unicode","# Punctuation class but we treat them as punctuation anyways, for","# consistency.","if","(","(","cp",">=","33","and","cp","<=","47",")","or","(","cp",">=","58","and","cp","<=","64",")","or","(","cp",">=","91","and","cp","<=","96",")","or","(","cp",">=","123","and","cp","<=","126",")",")",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat",".","startswith","(","\"P\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L386-L399"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer.__init__","parameters":"(self, do_lower_case=True)","argument_list":"","return_statement":"","docstring":"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.","docstring_summary":"Constructs a BasicTokenizer.","docstring_tokens":["Constructs","a","BasicTokenizer","."],"function":"def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case","function_tokens":["def","__init__","(","self",",","do_lower_case","=","True",")",":","self",".","do_lower_case","=","do_lower_case"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L188-L194"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text.","docstring_summary":"Tokenizes a piece of text.","docstring_tokens":["Tokenizes","a","piece","of","text","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","text","=","self",".","_clean_text","(","text",")","# This was added on November 1st, 2018 for the multilingual and Chinese","# models. This is also applied to the English models now, but it doesn't","# matter since the English models were not trained on any Chinese data","# and generally don't have any Chinese data in them (there are Chinese","# characters in the vocabulary because Wikipedia does have some Chinese","# words in the English Wikipedia.).","text","=","self",".","_tokenize_chinese_chars","(","text",")","orig_tokens","=","whitespace_tokenize","(","text",")","split_tokens","=","[","]","for","token","in","orig_tokens",":","if","self",".","do_lower_case",":","token","=","token",".","lower","(",")","token","=","self",".","_run_strip_accents","(","token",")","split_tokens",".","extend","(","self",".","_run_split_on_punc","(","token",")",")","output_tokens","=","whitespace_tokenize","(","\" \"",".","join","(","split_tokens",")",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L196-L218"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_strip_accents","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Strips accents from a piece of text.","docstring_summary":"Strips accents from a piece of text.","docstring_tokens":["Strips","accents","from","a","piece","of","text","."],"function":"def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_run_strip_accents","(","self",",","text",")",":","text","=","unicodedata",".","normalize","(","\"NFD\"",",","text",")","output","=","[","]","for","char","in","text",":","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Mn\"",":","continue","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L220-L229"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_split_on_punc","parameters":"(self, text)","argument_list":"","return_statement":"return [\"\".join(x) for x in output]","docstring":"Splits punctuation on a piece of text.","docstring_summary":"Splits punctuation on a piece of text.","docstring_tokens":["Splits","punctuation","on","a","piece","of","text","."],"function":"def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]","function_tokens":["def","_run_split_on_punc","(","self",",","text",")",":","chars","=","list","(","text",")","i","=","0","start_new_word","=","True","output","=","[","]","while","i","<","len","(","chars",")",":","char","=","chars","[","i","]","if","_is_punctuation","(","char",")",":","output",".","append","(","[","char","]",")","start_new_word","=","True","else",":","if","start_new_word",":","output",".","append","(","[","]",")","start_new_word","=","False","output","[","-","1","]",".","append","(","char",")","i","+=","1","return","[","\"\"",".","join","(","x",")","for","x","in","output","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L231-L249"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._tokenize_chinese_chars","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Adds whitespace around any CJK character.","docstring_summary":"Adds whitespace around any CJK character.","docstring_tokens":["Adds","whitespace","around","any","CJK","character","."],"function":"def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_tokenize_chinese_chars","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","self",".","_is_chinese_char","(","cp",")",":","output",".","append","(","\" \"",")","output",".","append","(","char",")","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L251-L262"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._is_chinese_char","parameters":"(self, cp)","argument_list":"","return_statement":"return False","docstring":"Checks whether CP is the codepoint of a CJK character.","docstring_summary":"Checks whether CP is the codepoint of a CJK character.","docstring_tokens":["Checks","whether","CP","is","the","codepoint","of","a","CJK","character","."],"function":"def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False","function_tokens":["def","_is_chinese_char","(","self",",","cp",")",":","# This defines a \"chinese character\" as anything in the CJK Unicode block:","# https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)","#","# Note that the CJK Unicode block is NOT all Japanese and Korean characters,","# despite its name. The modern Korean Hangul alphabet is a different block,","# as is Japanese Hiragana and Katakana. Those alphabets are used to write","# space-separated words, so they are not treated specially and handled","# like the all of the other languages.","if","(","(","cp",">=","0x4E00","and","cp","<=","0x9FFF",")","or","#","(","cp",">=","0x3400","and","cp","<=","0x4DBF",")","or","#","(","cp",">=","0x20000","and","cp","<=","0x2A6DF",")","or","#","(","cp",">=","0x2A700","and","cp","<=","0x2B73F",")","or","#","(","cp",">=","0x2B740","and","cp","<=","0x2B81F",")","or","#","(","cp",">=","0x2B820","and","cp","<=","0x2CEAF",")","or","(","cp",">=","0xF900","and","cp","<=","0xFAFF",")","or","#","(","cp",">=","0x2F800","and","cp","<=","0x2FA1F",")",")",":","#","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L264-L284"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._clean_text","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Performs invalid character removal and whitespace cleanup on text.","docstring_summary":"Performs invalid character removal and whitespace cleanup on text.","docstring_tokens":["Performs","invalid","character","removal","and","whitespace","cleanup","on","text","."],"function":"def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_clean_text","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","cp","==","0","or","cp","==","0xfffd","or","_is_control","(","char",")",":","continue","if","_is_whitespace","(","char",")",":","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L286-L297"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tokenization.py","language":"python","identifier":"WordpieceTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.","docstring_summary":"Tokenizes a piece of text into its word pieces.","docstring_tokens":["Tokenizes","a","piece","of","text","into","its","word","pieces","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","output_tokens","=","[","]","for","token","in","whitespace_tokenize","(","text",")",":","chars","=","list","(","token",")","if","len","(","chars",")",">","self",".","max_input_chars_per_word",":","output_tokens",".","append","(","self",".","unk_token",")","continue","is_bad","=","False","start","=","0","sub_tokens","=","[","]","while","start","<","len","(","chars",")",":","end","=","len","(","chars",")","cur_substr","=","None","while","start","<","end",":","substr","=","\"\"",".","join","(","chars","[","start",":","end","]",")","if","start",">","0",":","substr","=","\"##\"","+","substr","if","substr","in","self",".","vocab",":","cur_substr","=","substr","break","end","-=","1","if","cur_substr","is","None",":","is_bad","=","True","break","sub_tokens",".","append","(","cur_substr",")","start","=","end","if","is_bad",":","output_tokens",".","append","(","self",".","unk_token",")","else",":","output_tokens",".","extend","(","sub_tokens",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tokenization.py#L308-L359"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tf_metrics.py","language":"python","identifier":"precision","parameters":"(labels, predictions, num_classes, pos_indices=None,\n weights=None, average='micro')","argument_list":"","return_statement":"return (pr, op)","docstring":"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","precision","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def precision(labels, predictions, num_classes, pos_indices=None,\n weights=None, average='micro'):\n \"\"\"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n pr, _, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n op, _, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (pr, op)","function_tokens":["def","precision","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","pr",",","_",",","_","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",")","op",",","_",",","_","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",")","return","(","pr",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tf_metrics.py#L15-L50"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tf_metrics.py","language":"python","identifier":"recall","parameters":"(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro')","argument_list":"","return_statement":"return (re, op)","docstring":"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","recall","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def recall(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro'):\n \"\"\"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, re, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n _, op, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (re, op)","function_tokens":["def","recall","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","_",",","re",",","_","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",")","_",",","op",",","_","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",")","return","(","re",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tf_metrics.py#L53-L88"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tf_metrics.py","language":"python","identifier":"fbeta","parameters":"(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro', beta=1)","argument_list":"","return_statement":"return (fbeta, op)","docstring":"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","fbeta","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","beta",":","int","optional","Weight","of","precision","in","harmonic","mean","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def fbeta(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro', beta=1):\n \"\"\"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, _, fbeta = metrics_from_confusion_matrix(\n cm, pos_indices, average=average, beta=beta)\n _, _, op = metrics_from_confusion_matrix(\n op, pos_indices, average=average, beta=beta)\n return (fbeta, op)","function_tokens":["def","fbeta","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",",","beta","=","1",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","_",",","_",",","fbeta","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",",","beta","=","beta",")","_",",","_",",","op","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",",","beta","=","beta",")","return","(","fbeta",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tf_metrics.py#L97-L134"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tf_metrics.py","language":"python","identifier":"safe_div","parameters":"(numerator, denominator)","argument_list":"","return_statement":"return tf.where(denominator_is_zero, zeros, numerator \/ denominator)","docstring":"Safe division, return 0 if denominator is 0","docstring_summary":"Safe division, return 0 if denominator is 0","docstring_tokens":["Safe","division","return","0","if","denominator","is","0"],"function":"def safe_div(numerator, denominator):\n \"\"\"Safe division, return 0 if denominator is 0\"\"\"\n numerator, denominator = tf.to_float(numerator), tf.to_float(denominator)\n zeros = tf.zeros_like(numerator, dtype=numerator.dtype)\n denominator_is_zero = tf.equal(denominator, zeros)\n return tf.where(denominator_is_zero, zeros, numerator \/ denominator)","function_tokens":["def","safe_div","(","numerator",",","denominator",")",":","numerator",",","denominator","=","tf",".","to_float","(","numerator",")",",","tf",".","to_float","(","denominator",")","zeros","=","tf",".","zeros_like","(","numerator",",","dtype","=","numerator",".","dtype",")","denominator_is_zero","=","tf",".","equal","(","denominator",",","zeros",")","return","tf",".","where","(","denominator_is_zero",",","zeros",",","numerator","\/","denominator",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tf_metrics.py#L137-L142"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tf_metrics.py","language":"python","identifier":"pr_re_fbeta","parameters":"(cm, pos_indices, beta=1)","argument_list":"","return_statement":"return pr, re, fbeta","docstring":"Uses a confusion matrix to compute precision, recall and fbeta","docstring_summary":"Uses a confusion matrix to compute precision, recall and fbeta","docstring_tokens":["Uses","a","confusion","matrix","to","compute","precision","recall","and","fbeta"],"function":"def pr_re_fbeta(cm, pos_indices, beta=1):\n \"\"\"Uses a confusion matrix to compute precision, recall and fbeta\"\"\"\n num_classes = cm.shape[0]\n neg_indices = [i for i in range(num_classes) if i not in pos_indices]\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, neg_indices] = 0\n diag_sum = tf.reduce_sum(tf.diag_part(cm * cm_mask))\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[:, neg_indices] = 0\n tot_pred = tf.reduce_sum(cm * cm_mask)\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, :] = 0\n tot_gold = tf.reduce_sum(cm * cm_mask)\n\n pr = safe_div(diag_sum, tot_pred)\n re = safe_div(diag_sum, tot_gold)\n fbeta = safe_div((1. + beta**2) * pr * re, beta**2 * pr + re)\n\n return pr, re, fbeta","function_tokens":["def","pr_re_fbeta","(","cm",",","pos_indices",",","beta","=","1",")",":","num_classes","=","cm",".","shape","[","0","]","neg_indices","=","[","i","for","i","in","range","(","num_classes",")","if","i","not","in","pos_indices","]","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[","neg_indices",",","neg_indices","]","=","0","diag_sum","=","tf",".","reduce_sum","(","tf",".","diag_part","(","cm","*","cm_mask",")",")","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[",":",",","neg_indices","]","=","0","tot_pred","=","tf",".","reduce_sum","(","cm","*","cm_mask",")","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[","neg_indices",",",":","]","=","0","tot_gold","=","tf",".","reduce_sum","(","cm","*","cm_mask",")","pr","=","safe_div","(","diag_sum",",","tot_pred",")","re","=","safe_div","(","diag_sum",",","tot_gold",")","fbeta","=","safe_div","(","(","1.","+","beta","**","2",")","*","pr","*","re",",","beta","**","2","*","pr","+","re",")","return","pr",",","re",",","fbeta"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tf_metrics.py#L145-L165"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/tf_metrics.py","language":"python","identifier":"metrics_from_confusion_matrix","parameters":"(cm, pos_indices=None, average='micro',\n beta=1)","argument_list":"","return_statement":"","docstring":"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'","docstring_summary":"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'","docstring_tokens":["Precision","Recall","and","F1","from","the","confusion","matrix","Parameters","----------","cm",":","tf",".","Tensor","of","type","tf",".","int32","of","shape","(","num_classes","num_classes",")","The","streaming","confusion","matrix",".","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","beta",":","int","optional","Weight","of","precision","in","harmonic","mean","average",":","str","optional","micro","macro","or","weighted"],"function":"def metrics_from_confusion_matrix(cm, pos_indices=None, average='micro',\n beta=1):\n \"\"\"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'\n \"\"\"\n num_classes = cm.shape[0]\n if pos_indices is None:\n pos_indices = [i for i in range(num_classes)]\n\n if average == 'micro':\n return pr_re_fbeta(cm, pos_indices, beta)\n elif average in {'macro', 'weighted'}:\n precisions, recalls, fbetas, n_golds = [], [], [], []\n for idx in pos_indices:\n pr, re, fbeta = pr_re_fbeta(cm, [idx], beta)\n precisions.append(pr)\n recalls.append(re)\n fbetas.append(fbeta)\n cm_mask = np.zeros([num_classes, num_classes])\n cm_mask[idx, :] = 1\n n_golds.append(tf.to_float(tf.reduce_sum(cm * cm_mask)))\n\n if average == 'macro':\n pr = tf.reduce_mean(precisions)\n re = tf.reduce_mean(recalls)\n fbeta = tf.reduce_mean(fbetas)\n return pr, re, fbeta\n if average == 'weighted':\n n_gold = tf.reduce_sum(n_golds)\n pr_sum = sum(p * n for p, n in zip(precisions, n_golds))\n pr = safe_div(pr_sum, n_gold)\n re_sum = sum(r * n for r, n in zip(recalls, n_golds))\n re = safe_div(re_sum, n_gold)\n fbeta_sum = sum(f * n for f, n in zip(fbetas, n_golds))\n fbeta = safe_div(fbeta_sum, n_gold)\n return pr, re, fbeta\n\n else:\n raise NotImplementedError()","function_tokens":["def","metrics_from_confusion_matrix","(","cm",",","pos_indices","=","None",",","average","=","'micro'",",","beta","=","1",")",":","num_classes","=","cm",".","shape","[","0","]","if","pos_indices","is","None",":","pos_indices","=","[","i","for","i","in","range","(","num_classes",")","]","if","average","==","'micro'",":","return","pr_re_fbeta","(","cm",",","pos_indices",",","beta",")","elif","average","in","{","'macro'",",","'weighted'","}",":","precisions",",","recalls",",","fbetas",",","n_golds","=","[","]",",","[","]",",","[","]",",","[","]","for","idx","in","pos_indices",":","pr",",","re",",","fbeta","=","pr_re_fbeta","(","cm",",","[","idx","]",",","beta",")","precisions",".","append","(","pr",")","recalls",".","append","(","re",")","fbetas",".","append","(","fbeta",")","cm_mask","=","np",".","zeros","(","[","num_classes",",","num_classes","]",")","cm_mask","[","idx",",",":","]","=","1","n_golds",".","append","(","tf",".","to_float","(","tf",".","reduce_sum","(","cm","*","cm_mask",")",")",")","if","average","==","'macro'",":","pr","=","tf",".","reduce_mean","(","precisions",")","re","=","tf",".","reduce_mean","(","recalls",")","fbeta","=","tf",".","reduce_mean","(","fbetas",")","return","pr",",","re",",","fbeta","if","average","==","'weighted'",":","n_gold","=","tf",".","reduce_sum","(","n_golds",")","pr_sum","=","sum","(","p","*","n","for","p",",","n","in","zip","(","precisions",",","n_golds",")",")","pr","=","safe_div","(","pr_sum",",","n_gold",")","re_sum","=","sum","(","r","*","n","for","r",",","n","in","zip","(","recalls",",","n_golds",")",")","re","=","safe_div","(","re_sum",",","n_gold",")","fbeta_sum","=","sum","(","f","*","n","for","f",",","n","in","zip","(","fbetas",",","n_golds",")",")","fbeta","=","safe_div","(","fbeta_sum",",","n_gold",")","return","pr",",","re",",","fbeta","else",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/tf_metrics.py#L168-L215"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"gelu","parameters":"(x)","argument_list":"","return_statement":"return x * cdf","docstring":"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.","docstring_summary":"Gaussian Error Linear Unit.","docstring_tokens":["Gaussian","Error","Linear","Unit","."],"function":"def gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.tanh(\n (np.sqrt(2 \/ np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf","function_tokens":["def","gelu","(","x",")",":","cdf","=","0.5","*","(","1.0","+","tf",".","tanh","(","(","np",".","sqrt","(","2","\/","np",".","pi",")","*","(","x","+","0.044715","*","tf",".","pow","(","x",",","3",")",")",")",")",")","return","x","*","cdf"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L264-L277"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"get_activation","parameters":"(activation_string)","argument_list":"","return_statement":"","docstring":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.","docstring_summary":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.","docstring_tokens":["Maps","a","string","to","a","Python","function","e",".","g",".","relu","=",">","tf",".","nn",".","relu","."],"function":"def get_activation(activation_string):\n \"\"\"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.\n \"\"\"\n\n # We assume that anything that\"s not a string is already an activation\n # function, so we just return it.\n if not isinstance(activation_string, six.string_types):\n return activation_string\n\n if not activation_string:\n return None\n\n act = activation_string.lower()\n if act == \"linear\":\n return None\n elif act == \"relu\":\n return tf.nn.relu\n elif act == \"gelu\":\n return gelu\n elif act == \"tanh\":\n return tf.tanh\n else:\n raise ValueError(\"Unsupported activation: %s\" % act)","function_tokens":["def","get_activation","(","activation_string",")",":","# We assume that anything that\"s not a string is already an activation","# function, so we just return it.","if","not","isinstance","(","activation_string",",","six",".","string_types",")",":","return","activation_string","if","not","activation_string",":","return","None","act","=","activation_string",".","lower","(",")","if","act","==","\"linear\"",":","return","None","elif","act","==","\"relu\"",":","return","tf",".","nn",".","relu","elif","act","==","\"gelu\"",":","return","gelu","elif","act","==","\"tanh\"",":","return","tf",".","tanh","else",":","raise","ValueError","(","\"Unsupported activation: %s\"","%","act",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L280-L314"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"get_assignment_map_from_checkpoint","parameters":"(tvars, init_checkpoint)","argument_list":"","return_statement":"return (assignment_map, initialized_variable_names)","docstring":"Compute the union of the current variables and checkpoint variables.","docstring_summary":"Compute the union of the current variables and checkpoint variables.","docstring_tokens":["Compute","the","union","of","the","current","variables","and","checkpoint","variables","."],"function":"def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)","function_tokens":["def","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")",":","assignment_map","=","{","}","initialized_variable_names","=","{","}","name_to_variable","=","collections",".","OrderedDict","(",")","for","var","in","tvars",":","name","=","var",".","name","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","name",")","if","m","is","not","None",":","name","=","m",".","group","(","1",")","name_to_variable","[","name","]","=","var","init_vars","=","tf",".","train",".","list_variables","(","init_checkpoint",")","assignment_map","=","collections",".","OrderedDict","(",")","for","x","in","init_vars",":","(","name",",","var",")","=","(","x","[","0","]",",","x","[","1","]",")","if","name","not","in","name_to_variable",":","continue","assignment_map","[","name","]","=","name","initialized_variable_names","[","name","]","=","1","initialized_variable_names","[","name","+","\":0\"","]","=","1","return","(","assignment_map",",","initialized_variable_names",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L317-L341"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"dropout","parameters":"(input_tensor, dropout_prob)","argument_list":"","return_statement":"return output","docstring":"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.","docstring_summary":"Perform dropout.","docstring_tokens":["Perform","dropout","."],"function":"def dropout(input_tensor, dropout_prob):\n \"\"\"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.\n \"\"\"\n if dropout_prob is None or dropout_prob == 0.0:\n return input_tensor\n\n output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)\n return output","function_tokens":["def","dropout","(","input_tensor",",","dropout_prob",")",":","if","dropout_prob","is","None","or","dropout_prob","==","0.0",":","return","input_tensor","output","=","tf",".","nn",".","dropout","(","input_tensor",",","1.0","-","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L344-L359"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"layer_norm","parameters":"(input_tensor, name=None)","argument_list":"","return_statement":"return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","docstring":"Run layer normalization on the last dimension of the tensor.","docstring_summary":"Run layer normalization on the last dimension of the tensor.","docstring_tokens":["Run","layer","normalization","on","the","last","dimension","of","the","tensor","."],"function":"def layer_norm(input_tensor, name=None):\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\n return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","function_tokens":["def","layer_norm","(","input_tensor",",","name","=","None",")",":","return","tf",".","contrib",".","layers",".","layer_norm","(","inputs","=","input_tensor",",","begin_norm_axis","=","-","1",",","begin_params_axis","=","-","1",",","scope","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L362-L365"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"layer_norm_and_dropout","parameters":"(input_tensor, dropout_prob, name=None)","argument_list":"","return_statement":"return output_tensor","docstring":"Runs layer normalization followed by dropout.","docstring_summary":"Runs layer normalization followed by dropout.","docstring_tokens":["Runs","layer","normalization","followed","by","dropout","."],"function":"def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):\n \"\"\"Runs layer normalization followed by dropout.\"\"\"\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob)\n return output_tensor","function_tokens":["def","layer_norm_and_dropout","(","input_tensor",",","dropout_prob",",","name","=","None",")",":","output_tensor","=","layer_norm","(","input_tensor",",","name",")","output_tensor","=","dropout","(","output_tensor",",","dropout_prob",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L368-L372"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"create_initializer","parameters":"(initializer_range=0.02)","argument_list":"","return_statement":"return tf.truncated_normal_initializer(stddev=initializer_range)","docstring":"Creates a `truncated_normal_initializer` with the given range.","docstring_summary":"Creates a `truncated_normal_initializer` with the given range.","docstring_tokens":["Creates","a","truncated_normal_initializer","with","the","given","range","."],"function":"def create_initializer(initializer_range=0.02):\n \"\"\"Creates a `truncated_normal_initializer` with the given range.\"\"\"\n return tf.truncated_normal_initializer(stddev=initializer_range)","function_tokens":["def","create_initializer","(","initializer_range","=","0.02",")",":","return","tf",".","truncated_normal_initializer","(","stddev","=","initializer_range",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L375-L377"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"embedding_lookup","parameters":"(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False)","argument_list":"","return_statement":"return (output, embedding_table)","docstring":"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].","docstring_summary":"Looks up words embeddings for id tensor.","docstring_tokens":["Looks","up","words","embeddings","for","id","tensor","."],"function":"def embedding_lookup(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False):\n \"\"\"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].\n \"\"\"\n # This function assumes that the input is of shape [batch_size, seq_length,\n # num_inputs].\n #\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\n # reshape to [batch_size, seq_length, 1].\n if input_ids.shape.ndims == 2:\n input_ids = tf.expand_dims(input_ids, axis=[-1])\n\n embedding_table = tf.get_variable(\n name=word_embedding_name,\n shape=[vocab_size, embedding_size],\n initializer=create_initializer(initializer_range))\n\n flat_input_ids = tf.reshape(input_ids, [-1])\n if use_one_hot_embeddings:\n one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)\n output = tf.matmul(one_hot_input_ids, embedding_table)\n else:\n output = tf.gather(embedding_table, flat_input_ids)\n\n input_shape = get_shape_list(input_ids)\n\n output = tf.reshape(output,\n input_shape[0:-1] + [input_shape[-1] * embedding_size])\n return (output, embedding_table)","function_tokens":["def","embedding_lookup","(","input_ids",",","vocab_size",",","embedding_size","=","128",",","initializer_range","=","0.02",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","False",")",":","# This function assumes that the input is of shape [batch_size, seq_length,","# num_inputs].","#","# If the input is a 2D tensor of shape [batch_size, seq_length], we","# reshape to [batch_size, seq_length, 1].","if","input_ids",".","shape",".","ndims","==","2",":","input_ids","=","tf",".","expand_dims","(","input_ids",",","axis","=","[","-","1","]",")","embedding_table","=","tf",".","get_variable","(","name","=","word_embedding_name",",","shape","=","[","vocab_size",",","embedding_size","]",",","initializer","=","create_initializer","(","initializer_range",")",")","flat_input_ids","=","tf",".","reshape","(","input_ids",",","[","-","1","]",")","if","use_one_hot_embeddings",":","one_hot_input_ids","=","tf",".","one_hot","(","flat_input_ids",",","depth","=","vocab_size",")","output","=","tf",".","matmul","(","one_hot_input_ids",",","embedding_table",")","else",":","output","=","tf",".","gather","(","embedding_table",",","flat_input_ids",")","input_shape","=","get_shape_list","(","input_ids",")","output","=","tf",".","reshape","(","output",",","input_shape","[","0",":","-","1","]","+","[","input_shape","[","-","1","]","*","embedding_size","]",")","return","(","output",",","embedding_table",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L380-L425"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"embedding_postprocessor","parameters":"(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1)","argument_list":"","return_statement":"return output","docstring":"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.","docstring_summary":"Performs various post-processing on a word embedding tensor.","docstring_tokens":["Performs","various","post","-","processing","on","a","word","embedding","tensor","."],"function":"def embedding_postprocessor(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1):\n \"\"\"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.\n \"\"\"\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = input_tensor\n\n if use_token_type:\n if token_type_ids is None:\n raise ValueError(\"`token_type_ids` must be specified if\"\n \"`use_token_type` is True.\")\n token_type_table = tf.get_variable(\n name=token_type_embedding_name,\n shape=[token_type_vocab_size, width],\n initializer=create_initializer(initializer_range))\n # This vocab will be small so we always do one-hot here, since it is always\n # faster for a small vocabulary.\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)\n token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if use_position_embeddings:\n assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)\n with tf.control_dependencies([assert_op]):\n full_position_embeddings = tf.get_variable(\n name=position_embedding_name,\n shape=[max_position_embeddings, width],\n initializer=create_initializer(initializer_range))\n # Since the position embedding table is a learned variable, we create it\n # using a (long) sequence length `max_position_embeddings`. The actual\n # sequence length might be shorter than this, for faster training of\n # tasks that do not have long sequences.\n #\n # So `full_position_embeddings` is effectively an embedding table\n # for position [0, 1, 2, ..., max_position_embeddings-1], and the current\n # sequence has positions [0, 1, 2, ... seq_length-1], so we can just\n # perform a slice.\n position_embeddings = tf.slice(full_position_embeddings, [0, 0],\n [seq_length, -1])\n num_dims = len(output.shape.as_list())\n\n # Only the last two dimensions are relevant (`seq_length` and `width`), so\n # we broadcast among the first dimensions, which is typically just\n # the batch size.\n position_broadcast_shape = []\n for _ in range(num_dims - 2):\n position_broadcast_shape.append(1)\n position_broadcast_shape.extend([seq_length, width])\n position_embeddings = tf.reshape(position_embeddings,\n position_broadcast_shape)\n output += position_embeddings\n\n output = layer_norm_and_dropout(output, dropout_prob)\n return output","function_tokens":["def","embedding_postprocessor","(","input_tensor",",","use_token_type","=","False",",","token_type_ids","=","None",",","token_type_vocab_size","=","16",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","0.02",",","max_position_embeddings","=","512",",","dropout_prob","=","0.1",")",":","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","width","=","input_shape","[","2","]","output","=","input_tensor","if","use_token_type",":","if","token_type_ids","is","None",":","raise","ValueError","(","\"`token_type_ids` must be specified if\"","\"`use_token_type` is True.\"",")","token_type_table","=","tf",".","get_variable","(","name","=","token_type_embedding_name",",","shape","=","[","token_type_vocab_size",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# This vocab will be small so we always do one-hot here, since it is always","# faster for a small vocabulary.","flat_token_type_ids","=","tf",".","reshape","(","token_type_ids",",","[","-","1","]",")","one_hot_ids","=","tf",".","one_hot","(","flat_token_type_ids",",","depth","=","token_type_vocab_size",")","token_type_embeddings","=","tf",".","matmul","(","one_hot_ids",",","token_type_table",")","token_type_embeddings","=","tf",".","reshape","(","token_type_embeddings",",","[","batch_size",",","seq_length",",","width","]",")","output","+=","token_type_embeddings","if","use_position_embeddings",":","assert_op","=","tf",".","assert_less_equal","(","seq_length",",","max_position_embeddings",")","with","tf",".","control_dependencies","(","[","assert_op","]",")",":","full_position_embeddings","=","tf",".","get_variable","(","name","=","position_embedding_name",",","shape","=","[","max_position_embeddings",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# Since the position embedding table is a learned variable, we create it","# using a (long) sequence length `max_position_embeddings`. The actual","# sequence length might be shorter than this, for faster training of","# tasks that do not have long sequences.","#","# So `full_position_embeddings` is effectively an embedding table","# for position [0, 1, 2, ..., max_position_embeddings-1], and the current","# sequence has positions [0, 1, 2, ... seq_length-1], so we can just","# perform a slice.","position_embeddings","=","tf",".","slice","(","full_position_embeddings",",","[","0",",","0","]",",","[","seq_length",",","-","1","]",")","num_dims","=","len","(","output",".","shape",".","as_list","(",")",")","# Only the last two dimensions are relevant (`seq_length` and `width`), so","# we broadcast among the first dimensions, which is typically just","# the batch size.","position_broadcast_shape","=","[","]","for","_","in","range","(","num_dims","-","2",")",":","position_broadcast_shape",".","append","(","1",")","position_broadcast_shape",".","extend","(","[","seq_length",",","width","]",")","position_embeddings","=","tf",".","reshape","(","position_embeddings",",","position_broadcast_shape",")","output","+=","position_embeddings","output","=","layer_norm_and_dropout","(","output",",","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L428-L521"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"create_attention_mask_from_input_mask","parameters":"(from_tensor, to_mask)","argument_list":"","return_statement":"return mask","docstring":"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].","docstring_summary":"Create 3D attention mask from a 2D tensor mask.","docstring_tokens":["Create","3D","attention","mask","from","a","2D","tensor","mask","."],"function":"def create_attention_mask_from_input_mask(from_tensor, to_mask):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n\n return mask","function_tokens":["def","create_attention_mask_from_input_mask","(","from_tensor",",","to_mask",")",":","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_shape","=","get_shape_list","(","to_mask",",","expected_rank","=","2",")","to_seq_length","=","to_shape","[","1","]","to_mask","=","tf",".","cast","(","tf",".","reshape","(","to_mask",",","[","batch_size",",","1",",","to_seq_length","]",")",",","tf",".","float32",")","# We don't assume that `from_tensor` is a mask (although it could be). We","# don't actually care if we attend *from* padding tokens (only *to* padding)","# tokens so we create a tensor of all ones.","#","# `broadcast_ones` = [batch_size, from_seq_length, 1]","broadcast_ones","=","tf",".","ones","(","shape","=","[","batch_size",",","from_seq_length",",","1","]",",","dtype","=","tf",".","float32",")","# Here we broadcast along two dimensions to create the mask.","mask","=","broadcast_ones","*","to_mask","return","mask"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L524-L555"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"attention_layer","parameters":"(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None)","argument_list":"","return_statement":"return context_layer","docstring":"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.","docstring_summary":"Performs multi-headed attention from `from_tensor` to `to_tensor`.","docstring_tokens":["Performs","multi","-","headed","attention","from","from_tensor","to","to_tensor","."],"function":"def attention_layer(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n \"\"\"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])\n return output_tensor\n\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n from_tensor_2d = reshape_to_matrix(from_tensor)\n to_tensor_2d = reshape_to_matrix(to_tensor)\n\n # `query_layer` = [B*F, N*H]\n query_layer = tf.layers.dense(\n from_tensor_2d,\n num_attention_heads * size_per_head,\n activation=query_act,\n name=\"query\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `key_layer` = [B*T, N*H]\n key_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=key_act,\n name=\"key\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `value_layer` = [B*T, N*H]\n value_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=value_act,\n name=\"value\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `query_layer` = [B, N, F, H]\n query_layer = transpose_for_scores(query_layer, batch_size,\n num_attention_heads, from_seq_length,\n size_per_head)\n\n # `key_layer` = [B, N, T, H]\n key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,\n to_seq_length, size_per_head)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n # `attention_scores` = [B, N, F, T]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(attention_scores,\n 1.0 \/ math.sqrt(float(size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\n\n # `value_layer` = [B, T, N, H]\n value_layer = tf.reshape(\n value_layer,\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\n\n # `value_layer` = [B, N, T, H]\n value_layer = tf.transpose(value_layer, [0, 2, 1, 3])\n\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\n\n if do_return_2d_tensor:\n # `context_layer` = [B*F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\n else:\n # `context_layer` = [B, F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\n\n return context_layer","function_tokens":["def","attention_layer","(","from_tensor",",","to_tensor",",","attention_mask","=","None",",","num_attention_heads","=","1",",","size_per_head","=","512",",","query_act","=","None",",","key_act","=","None",",","value_act","=","None",",","attention_probs_dropout_prob","=","0.0",",","initializer_range","=","0.02",",","do_return_2d_tensor","=","False",",","batch_size","=","None",",","from_seq_length","=","None",",","to_seq_length","=","None",")",":","def","transpose_for_scores","(","input_tensor",",","batch_size",",","num_attention_heads",",","seq_length",",","width",")",":","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","batch_size",",","seq_length",",","num_attention_heads",",","width","]",")","output_tensor","=","tf",".","transpose","(","output_tensor",",","[","0",",","2",",","1",",","3","]",")","return","output_tensor","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","to_shape","=","get_shape_list","(","to_tensor",",","expected_rank","=","[","2",",","3","]",")","if","len","(","from_shape",")","!=","len","(","to_shape",")",":","raise","ValueError","(","\"The rank of `from_tensor` must match the rank of `to_tensor`.\"",")","if","len","(","from_shape",")","==","3",":","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_seq_length","=","to_shape","[","1","]","elif","len","(","from_shape",")","==","2",":","if","(","batch_size","is","None","or","from_seq_length","is","None","or","to_seq_length","is","None",")",":","raise","ValueError","(","\"When passing in rank 2 tensors to attention_layer, the values \"","\"for `batch_size`, `from_seq_length`, and `to_seq_length` \"","\"must all be specified.\"",")","# Scalar dimensions referenced here:","# B = batch size (number of sequences)","# F = `from_tensor` sequence length","# T = `to_tensor` sequence length","# N = `num_attention_heads`","# H = `size_per_head`","from_tensor_2d","=","reshape_to_matrix","(","from_tensor",")","to_tensor_2d","=","reshape_to_matrix","(","to_tensor",")","# `query_layer` = [B*F, N*H]","query_layer","=","tf",".","layers",".","dense","(","from_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","query_act",",","name","=","\"query\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `key_layer` = [B*T, N*H]","key_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","key_act",",","name","=","\"key\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `value_layer` = [B*T, N*H]","value_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","value_act",",","name","=","\"value\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `query_layer` = [B, N, F, H]","query_layer","=","transpose_for_scores","(","query_layer",",","batch_size",",","num_attention_heads",",","from_seq_length",",","size_per_head",")","# `key_layer` = [B, N, T, H]","key_layer","=","transpose_for_scores","(","key_layer",",","batch_size",",","num_attention_heads",",","to_seq_length",",","size_per_head",")","# Take the dot product between \"query\" and \"key\" to get the raw","# attention scores.","# `attention_scores` = [B, N, F, T]","attention_scores","=","tf",".","matmul","(","query_layer",",","key_layer",",","transpose_b","=","True",")","attention_scores","=","tf",".","multiply","(","attention_scores",",","1.0","\/","math",".","sqrt","(","float","(","size_per_head",")",")",")","if","attention_mask","is","not","None",":","# `attention_mask` = [B, 1, F, T]","attention_mask","=","tf",".","expand_dims","(","attention_mask",",","axis","=","[","1","]",")","# Since attention_mask is 1.0 for positions we want to attend and 0.0 for","# masked positions, this operation will create a tensor which is 0.0 for","# positions we want to attend and -10000.0 for masked positions.","adder","=","(","1.0","-","tf",".","cast","(","attention_mask",",","tf",".","float32",")",")","*","-","10000.0","# Since we are adding it to the raw scores before the softmax, this is","# effectively the same as removing these entirely.","attention_scores","+=","adder","# Normalize the attention scores to probabilities.","# `attention_probs` = [B, N, F, T]","attention_probs","=","tf",".","nn",".","softmax","(","attention_scores",")","# This is actually dropping out entire tokens to attend to, which might","# seem a bit unusual, but is taken from the original Transformer paper.","attention_probs","=","dropout","(","attention_probs",",","attention_probs_dropout_prob",")","# `value_layer` = [B, T, N, H]","value_layer","=","tf",".","reshape","(","value_layer",",","[","batch_size",",","to_seq_length",",","num_attention_heads",",","size_per_head","]",")","# `value_layer` = [B, N, T, H]","value_layer","=","tf",".","transpose","(","value_layer",",","[","0",",","2",",","1",",","3","]",")","# `context_layer` = [B, N, F, H]","context_layer","=","tf",".","matmul","(","attention_probs",",","value_layer",")","# `context_layer` = [B, F, N, H]","context_layer","=","tf",".","transpose","(","context_layer",",","[","0",",","2",",","1",",","3","]",")","if","do_return_2d_tensor",":","# `context_layer` = [B*F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size","*","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","else",":","# `context_layer` = [B, F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size",",","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","return","context_layer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L558-L751"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"transformer_model","parameters":"(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False)","argument_list":"","return_statement":"","docstring":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.","docstring_summary":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".","docstring_tokens":["Multi","-","headed","multi","-","layer","Transformer","from","Attention","is","All","You","Need","."],"function":"def transformer_model(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False):\n \"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.\n \"\"\"\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n\n attention_head_size = int(hidden_size \/ num_attention_heads)\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n input_width = input_shape[2]\n\n # The Transformer performs sum residuals on all layers so the input needs\n # to be the same as the hidden size.\n if input_width != hidden_size:\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n (input_width, hidden_size))\n\n # We keep the representation as a 2D tensor to avoid re-shaping it back and\n # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\n # the GPU\/CPU but may not be free on the TPU, so we want to minimize them to\n # help the optimizer.\n prev_output = reshape_to_matrix(input_tensor)\n\n all_layer_outputs = []\n for layer_idx in range(num_hidden_layers):\n with tf.variable_scope(\"layer_%d\" % layer_idx):\n layer_input = prev_output\n\n with tf.variable_scope(\"attention\"):\n attention_heads = []\n with tf.variable_scope(\"self\"):\n attention_head = attention_layer(\n from_tensor=layer_input,\n to_tensor=layer_input,\n attention_mask=attention_mask,\n num_attention_heads=num_attention_heads,\n size_per_head=attention_head_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n initializer_range=initializer_range,\n do_return_2d_tensor=True,\n batch_size=batch_size,\n from_seq_length=seq_length,\n to_seq_length=seq_length)\n attention_heads.append(attention_head)\n\n attention_output = None\n if len(attention_heads) == 1:\n attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n attention_output = tf.concat(attention_heads, axis=-1)\n\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n with tf.variable_scope(\"output\"):\n attention_output = tf.layers.dense(\n attention_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n attention_output = dropout(attention_output, hidden_dropout_prob)\n attention_output = layer_norm(attention_output + layer_input)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = tf.layers.dense(\n attention_output,\n intermediate_size,\n activation=intermediate_act_fn,\n kernel_initializer=create_initializer(initializer_range))\n\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"):\n layer_output = tf.layers.dense(\n intermediate_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n layer_output = dropout(layer_output, hidden_dropout_prob)\n layer_output = layer_norm(layer_output + attention_output)\n prev_output = layer_output\n all_layer_outputs.append(layer_output)\n\n if do_return_all_layers:\n final_outputs = []\n for layer_output in all_layer_outputs:\n final_output = reshape_from_matrix(layer_output, input_shape)\n final_outputs.append(final_output)\n return final_outputs\n else:\n final_output = reshape_from_matrix(prev_output, input_shape)\n return final_output","function_tokens":["def","transformer_model","(","input_tensor",",","attention_mask","=","None",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","intermediate_act_fn","=","gelu",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","initializer_range","=","0.02",",","do_return_all_layers","=","False",")",":","if","hidden_size","%","num_attention_heads","!=","0",":","raise","ValueError","(","\"The hidden size (%d) is not a multiple of the number of attention \"","\"heads (%d)\"","%","(","hidden_size",",","num_attention_heads",")",")","attention_head_size","=","int","(","hidden_size","\/","num_attention_heads",")","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","input_width","=","input_shape","[","2","]","# The Transformer performs sum residuals on all layers so the input needs","# to be the same as the hidden size.","if","input_width","!=","hidden_size",":","raise","ValueError","(","\"The width of the input tensor (%d) != hidden size (%d)\"","%","(","input_width",",","hidden_size",")",")","# We keep the representation as a 2D tensor to avoid re-shaping it back and","# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on","# the GPU\/CPU but may not be free on the TPU, so we want to minimize them to","# help the optimizer.","prev_output","=","reshape_to_matrix","(","input_tensor",")","all_layer_outputs","=","[","]","for","layer_idx","in","range","(","num_hidden_layers",")",":","with","tf",".","variable_scope","(","\"layer_%d\"","%","layer_idx",")",":","layer_input","=","prev_output","with","tf",".","variable_scope","(","\"attention\"",")",":","attention_heads","=","[","]","with","tf",".","variable_scope","(","\"self\"",")",":","attention_head","=","attention_layer","(","from_tensor","=","layer_input",",","to_tensor","=","layer_input",",","attention_mask","=","attention_mask",",","num_attention_heads","=","num_attention_heads",",","size_per_head","=","attention_head_size",",","attention_probs_dropout_prob","=","attention_probs_dropout_prob",",","initializer_range","=","initializer_range",",","do_return_2d_tensor","=","True",",","batch_size","=","batch_size",",","from_seq_length","=","seq_length",",","to_seq_length","=","seq_length",")","attention_heads",".","append","(","attention_head",")","attention_output","=","None","if","len","(","attention_heads",")","==","1",":","attention_output","=","attention_heads","[","0","]","else",":","# In the case where we have other sequences, we just concatenate","# them to the self-attention head before the projection.","attention_output","=","tf",".","concat","(","attention_heads",",","axis","=","-","1",")","# Run a linear projection of `hidden_size` then add a residual","# with `layer_input`.","with","tf",".","variable_scope","(","\"output\"",")",":","attention_output","=","tf",".","layers",".","dense","(","attention_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","attention_output","=","dropout","(","attention_output",",","hidden_dropout_prob",")","attention_output","=","layer_norm","(","attention_output","+","layer_input",")","# The activation is only applied to the \"intermediate\" hidden layer.","with","tf",".","variable_scope","(","\"intermediate\"",")",":","intermediate_output","=","tf",".","layers",".","dense","(","attention_output",",","intermediate_size",",","activation","=","intermediate_act_fn",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# Down-project back to `hidden_size` then add the residual.","with","tf",".","variable_scope","(","\"output\"",")",":","layer_output","=","tf",".","layers",".","dense","(","intermediate_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","layer_output","=","dropout","(","layer_output",",","hidden_dropout_prob",")","layer_output","=","layer_norm","(","layer_output","+","attention_output",")","prev_output","=","layer_output","all_layer_outputs",".","append","(","layer_output",")","if","do_return_all_layers",":","final_outputs","=","[","]","for","layer_output","in","all_layer_outputs",":","final_output","=","reshape_from_matrix","(","layer_output",",","input_shape",")","final_outputs",".","append","(","final_output",")","return","final_outputs","else",":","final_output","=","reshape_from_matrix","(","prev_output",",","input_shape",")","return","final_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L754-L892"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"get_shape_list","parameters":"(tensor, expected_rank=None, name=None)","argument_list":"","return_statement":"return shape","docstring":"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.","docstring_summary":"Returns a list of the shape of tensor, preferring static dimensions.","docstring_tokens":["Returns","a","list","of","the","shape","of","tensor","preferring","static","dimensions","."],"function":"def get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if name is None:\n name = tensor.name\n\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape","function_tokens":["def","get_shape_list","(","tensor",",","expected_rank","=","None",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","if","expected_rank","is","not","None",":","assert_rank","(","tensor",",","expected_rank",",","name",")","shape","=","tensor",".","shape",".","as_list","(",")","non_static_indexes","=","[","]","for","(","index",",","dim",")","in","enumerate","(","shape",")",":","if","dim","is","None",":","non_static_indexes",".","append","(","index",")","if","not","non_static_indexes",":","return","shape","dyn_shape","=","tf",".","shape","(","tensor",")","for","index","in","non_static_indexes",":","shape","[","index","]","=","dyn_shape","[","index","]","return","shape"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L895-L929"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"reshape_to_matrix","parameters":"(input_tensor)","argument_list":"","return_statement":"return output_tensor","docstring":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_summary":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_tokens":["Reshapes","a",">","=","rank","2","tensor","to","a","rank","2","tensor","(","i",".","e",".","a","matrix",")","."],"function":"def reshape_to_matrix(input_tensor):\n \"\"\"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).\"\"\"\n ndims = input_tensor.shape.ndims\n if ndims < 2:\n raise ValueError(\"Input tensor must have at least rank 2. Shape = %s\" %\n (input_tensor.shape))\n if ndims == 2:\n return input_tensor\n\n width = input_tensor.shape[-1]\n output_tensor = tf.reshape(input_tensor, [-1, width])\n return output_tensor","function_tokens":["def","reshape_to_matrix","(","input_tensor",")",":","ndims","=","input_tensor",".","shape",".","ndims","if","ndims","<","2",":","raise","ValueError","(","\"Input tensor must have at least rank 2. Shape = %s\"","%","(","input_tensor",".","shape",")",")","if","ndims","==","2",":","return","input_tensor","width","=","input_tensor",".","shape","[","-","1","]","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","-","1",",","width","]",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L932-L943"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"reshape_from_matrix","parameters":"(output_tensor, orig_shape_list)","argument_list":"","return_statement":"return tf.reshape(output_tensor, orig_dims + [width])","docstring":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_summary":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_tokens":["Reshapes","a","rank","2","tensor","back","to","its","original","rank",">","=","2","tensor","."],"function":"def reshape_from_matrix(output_tensor, orig_shape_list):\n \"\"\"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.\"\"\"\n if len(orig_shape_list) == 2:\n return output_tensor\n\n output_shape = get_shape_list(output_tensor)\n\n orig_dims = orig_shape_list[0:-1]\n width = output_shape[-1]\n\n return tf.reshape(output_tensor, orig_dims + [width])","function_tokens":["def","reshape_from_matrix","(","output_tensor",",","orig_shape_list",")",":","if","len","(","orig_shape_list",")","==","2",":","return","output_tensor","output_shape","=","get_shape_list","(","output_tensor",")","orig_dims","=","orig_shape_list","[","0",":","-","1","]","width","=","output_shape","[","-","1","]","return","tf",".","reshape","(","output_tensor",",","orig_dims","+","[","width","]",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L946-L956"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"assert_rank","parameters":"(tensor, expected_rank, name=None)","argument_list":"","return_statement":"","docstring":"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.","docstring_summary":"Raises an exception if the tensor rank is not of the expected rank.","docstring_tokens":["Raises","an","exception","if","the","tensor","rank","is","not","of","the","expected","rank","."],"function":"def assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n if name is None:\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n scope_name = tf.get_variable_scope().name\n raise ValueError(\n \"For the tensor `%s` in scope `%s`, the actual rank \"\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))","function_tokens":["def","assert_rank","(","tensor",",","expected_rank",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","expected_rank_dict","=","{","}","if","isinstance","(","expected_rank",",","six",".","integer_types",")",":","expected_rank_dict","[","expected_rank","]","=","True","else",":","for","x","in","expected_rank",":","expected_rank_dict","[","x","]","=","True","actual_rank","=","tensor",".","shape",".","ndims","if","actual_rank","not","in","expected_rank_dict",":","scope_name","=","tf",".","get_variable_scope","(",")",".","name","raise","ValueError","(","\"For the tensor `%s` in scope `%s`, the actual rank \"","\"`%d` (shape = %s) is not equal to the expected rank `%s`\"","%","(","name",",","scope_name",",","actual_rank",",","str","(","tensor",".","shape",")",",","str","(","expected_rank",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L959-L986"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"BertConfig.__init__","parameters":"(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02)","argument_list":"","return_statement":"","docstring":"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.","docstring_summary":"Constructs BertConfig.","docstring_tokens":["Constructs","BertConfig","."],"function":"def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range","function_tokens":["def","__init__","(","self",",","vocab_size",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","hidden_act","=","\"gelu\"",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","max_position_embeddings","=","512",",","type_vocab_size","=","16",",","initializer_range","=","0.02",")",":","self",".","vocab_size","=","vocab_size","self",".","hidden_size","=","hidden_size","self",".","num_hidden_layers","=","num_hidden_layers","self",".","num_attention_heads","=","num_attention_heads","self",".","hidden_act","=","hidden_act","self",".","intermediate_size","=","intermediate_size","self",".","hidden_dropout_prob","=","hidden_dropout_prob","self",".","attention_probs_dropout_prob","=","attention_probs_dropout_prob","self",".","max_position_embeddings","=","max_position_embeddings","self",".","type_vocab_size","=","type_vocab_size","self",".","initializer_range","=","initializer_range"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L34-L80"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"BertConfig.from_dict","parameters":"(cls, json_object)","argument_list":"","return_statement":"return config","docstring":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_summary":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","Python","dictionary","of","parameters","."],"function":"def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config","function_tokens":["def","from_dict","(","cls",",","json_object",")",":","config","=","BertConfig","(","vocab_size","=","None",")","for","(","key",",","value",")","in","six",".","iteritems","(","json_object",")",":","config",".","__dict__","[","key","]","=","value","return","config"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L83-L88"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"BertConfig.from_json_file","parameters":"(cls, json_file)","argument_list":"","return_statement":"return cls.from_dict(json.loads(text))","docstring":"Constructs a `BertConfig` from a json file of parameters.","docstring_summary":"Constructs a `BertConfig` from a json file of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","json","file","of","parameters","."],"function":"def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with tf.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))","function_tokens":["def","from_json_file","(","cls",",","json_file",")",":","with","tf",".","gfile",".","GFile","(","json_file",",","\"r\"",")","as","reader",":","text","=","reader",".","read","(",")","return","cls",".","from_dict","(","json",".","loads","(","text",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L91-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"BertConfig.to_dict","parameters":"(self)","argument_list":"","return_statement":"return output","docstring":"Serializes this instance to a Python dictionary.","docstring_summary":"Serializes this instance to a Python dictionary.","docstring_tokens":["Serializes","this","instance","to","a","Python","dictionary","."],"function":"def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output","function_tokens":["def","to_dict","(","self",")",":","output","=","copy",".","deepcopy","(","self",".","__dict__",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L97-L100"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"BertConfig.to_json_string","parameters":"(self)","argument_list":"","return_statement":"return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","docstring":"Serializes this instance to a JSON string.","docstring_summary":"Serializes this instance to a JSON string.","docstring_tokens":["Serializes","this","instance","to","a","JSON","string","."],"function":"def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","function_tokens":["def","to_json_string","(","self",")",":","return","json",".","dumps","(","self",".","to_dict","(",")",",","indent","=","2",",","sort_keys","=","True",")","+","\"\\n\""],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L102-L104"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"BertModel.__init__","parameters":"(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None)","argument_list":"","return_statement":"","docstring":"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.","docstring_summary":"Constructor for BertModel.","docstring_tokens":["Constructor","for","BertModel","."],"function":"def __init__(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n\n input_shape = get_shape_list(input_ids, expected_rank=2)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n\n if input_mask is None:\n input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)\n\n if token_type_ids is None:\n token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)\n\n with tf.variable_scope(scope, default_name=\"bert\"):\n with tf.variable_scope(\"embeddings\"):\n # Perform embedding lookup on the word ids.\n (self.embedding_output, self.embedding_table) = embedding_lookup(\n input_ids=input_ids,\n vocab_size=config.vocab_size,\n embedding_size=config.hidden_size,\n initializer_range=config.initializer_range,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n self.embedding_output = embedding_postprocessor(\n input_tensor=self.embedding_output,\n use_token_type=True,\n token_type_ids=token_type_ids,\n token_type_vocab_size=config.type_vocab_size,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=config.initializer_range,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n\n with tf.variable_scope(\"encoder\"):\n # This converts a 2D mask of shape [batch_size, seq_length] to a 3D\n # mask of shape [batch_size, seq_length, seq_length] which is used\n # for the attention scores.\n attention_mask = create_attention_mask_from_input_mask(\n input_ids, input_mask)\n\n # Run the stacked transformer.\n # `sequence_output` shape = [batch_size, seq_length, hidden_size].\n self.all_encoder_layers = transformer_model(\n input_tensor=self.embedding_output,\n attention_mask=attention_mask,\n hidden_size=config.hidden_size,\n num_hidden_layers=config.num_hidden_layers,\n num_attention_heads=config.num_attention_heads,\n intermediate_size=config.intermediate_size,\n intermediate_act_fn=get_activation(config.hidden_act),\n hidden_dropout_prob=config.hidden_dropout_prob,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n initializer_range=config.initializer_range,\n do_return_all_layers=True)\n\n self.sequence_output = self.all_encoder_layers[-1]\n # The \"pooler\" converts the encoded sequence tensor of shape\n # [batch_size, seq_length, hidden_size] to a tensor of shape\n # [batch_size, hidden_size]. This is necessary for segment-level\n # (or segment-pair-level) classification tasks where we need a fixed\n # dimensional representation of the segment.\n with tf.variable_scope(\"pooler\"):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token. We assume that this has been pre-trained\n first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)\n self.pooled_output = tf.layers.dense(\n first_token_tensor,\n config.hidden_size,\n activation=tf.tanh,\n kernel_initializer=create_initializer(config.initializer_range))","function_tokens":["def","__init__","(","self",",","config",",","is_training",",","input_ids",",","input_mask","=","None",",","token_type_ids","=","None",",","use_one_hot_embeddings","=","False",",","scope","=","None",")",":","config","=","copy",".","deepcopy","(","config",")","if","not","is_training",":","config",".","hidden_dropout_prob","=","0.0","config",".","attention_probs_dropout_prob","=","0.0","input_shape","=","get_shape_list","(","input_ids",",","expected_rank","=","2",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","if","input_mask","is","None",":","input_mask","=","tf",".","ones","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","if","token_type_ids","is","None",":","token_type_ids","=","tf",".","zeros","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","with","tf",".","variable_scope","(","scope",",","default_name","=","\"bert\"",")",":","with","tf",".","variable_scope","(","\"embeddings\"",")",":","# Perform embedding lookup on the word ids.","(","self",".","embedding_output",",","self",".","embedding_table",")","=","embedding_lookup","(","input_ids","=","input_ids",",","vocab_size","=","config",".","vocab_size",",","embedding_size","=","config",".","hidden_size",",","initializer_range","=","config",".","initializer_range",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# Add positional embeddings and token type embeddings, then layer","# normalize and perform dropout.","self",".","embedding_output","=","embedding_postprocessor","(","input_tensor","=","self",".","embedding_output",",","use_token_type","=","True",",","token_type_ids","=","token_type_ids",",","token_type_vocab_size","=","config",".","type_vocab_size",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","config",".","initializer_range",",","max_position_embeddings","=","config",".","max_position_embeddings",",","dropout_prob","=","config",".","hidden_dropout_prob",")","with","tf",".","variable_scope","(","\"encoder\"",")",":","# This converts a 2D mask of shape [batch_size, seq_length] to a 3D","# mask of shape [batch_size, seq_length, seq_length] which is used","# for the attention scores.","attention_mask","=","create_attention_mask_from_input_mask","(","input_ids",",","input_mask",")","# Run the stacked transformer.","# `sequence_output` shape = [batch_size, seq_length, hidden_size].","self",".","all_encoder_layers","=","transformer_model","(","input_tensor","=","self",".","embedding_output",",","attention_mask","=","attention_mask",",","hidden_size","=","config",".","hidden_size",",","num_hidden_layers","=","config",".","num_hidden_layers",",","num_attention_heads","=","config",".","num_attention_heads",",","intermediate_size","=","config",".","intermediate_size",",","intermediate_act_fn","=","get_activation","(","config",".","hidden_act",")",",","hidden_dropout_prob","=","config",".","hidden_dropout_prob",",","attention_probs_dropout_prob","=","config",".","attention_probs_dropout_prob",",","initializer_range","=","config",".","initializer_range",",","do_return_all_layers","=","True",")","self",".","sequence_output","=","self",".","all_encoder_layers","[","-","1","]","# The \"pooler\" converts the encoded sequence tensor of shape","# [batch_size, seq_length, hidden_size] to a tensor of shape","# [batch_size, hidden_size]. This is necessary for segment-level","# (or segment-pair-level) classification tasks where we need a fixed","# dimensional representation of the segment.","with","tf",".","variable_scope","(","\"pooler\"",")",":","# We \"pool\" the model by simply taking the hidden state corresponding","# to the first token. We assume that this has been pre-trained","first_token_tensor","=","tf",".","squeeze","(","self",".","sequence_output","[",":",",","0",":","1",",",":","]",",","axis","=","1",")","self",".","pooled_output","=","tf",".","layers",".","dense","(","first_token_tensor",",","config",".","hidden_size",",","activation","=","tf",".","tanh",",","kernel_initializer","=","create_initializer","(","config",".","initializer_range",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L131-L232"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"BertModel.get_sequence_output","parameters":"(self)","argument_list":"","return_statement":"return self.sequence_output","docstring":"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.","docstring_summary":"Gets final hidden layer of encoder.","docstring_tokens":["Gets","final","hidden","layer","of","encoder","."],"function":"def get_sequence_output(self):\n \"\"\"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.\n \"\"\"\n return self.sequence_output","function_tokens":["def","get_sequence_output","(","self",")",":","return","self",".","sequence_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L237-L244"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/modeling.py","language":"python","identifier":"BertModel.get_embedding_output","parameters":"(self)","argument_list":"","return_statement":"return self.embedding_output","docstring":"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.","docstring_summary":"Gets output of the embedding lookup (i.e., input to the transformer).","docstring_tokens":["Gets","output","of","the","embedding","lookup","(","i",".","e",".","input","to","the","transformer",")","."],"function":"def get_embedding_output(self):\n \"\"\"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.\n \"\"\"\n return self.embedding_output","function_tokens":["def","get_embedding_output","(","self",")",":","return","self",".","embedding_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/modeling.py#L249-L258"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_classifier.py","language":"python","identifier":"convert_example_list_for_inews","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature_list","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_example_list_for_inews(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return [InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)]\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n must_len = len(tokens_a) + 3\n extra_len = max_seq_length - must_len\n feature_list = []\n if example.text_b and extra_len > 0:\n extra_num = int((len(tokens_b) - 1) \/ extra_len) + 1\n for num in range(extra_num):\n max_len = min((num + 1) * extra_len, len(tokens_b))\n tokens_b_sub = tokens_b[num * extra_len: max_len]\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b_sub, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n else:\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n return feature_list","function_tokens":["def","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","[","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","]","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","must_len","=","len","(","tokens_a",")","+","3","extra_len","=","max_seq_length","-","must_len","feature_list","=","[","]","if","example",".","text_b","and","extra_len",">","0",":","extra_num","=","int","(","(","len","(","tokens_b",")","-","1",")","\/","extra_len",")","+","1","for","num","in","range","(","extra_num",")",":","max_len","=","min","(","(","num","+","1",")","*","extra_len",",","len","(","tokens_b",")",")","tokens_b_sub","=","tokens_b","[","num","*","extra_len",":","max_len","]","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b_sub",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","else",":","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","return","feature_list"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_classifier.py#L233-L268"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features_for_inews","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features_for_inews(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n num_example = 0\n for (ex_index, example) in enumerate(examples):\n if ex_index % 1000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature_list = convert_example_list_for_inews(ex_index, example, label_list,\n max_seq_length, tokenizer)\n num_example += len(feature_list)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n for feature in feature_list:\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n tf.logging.info(\"feature num: %s\", num_example)\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features_for_inews","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","num_example","=","0","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","1000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature_list","=","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","num_example","+=","len","(","feature_list",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","for","feature","in","feature_list",":","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","tf",".","logging",".","info","(","\"feature num: %s\"",",","num_example",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_classifier.py#L271-L301"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_classifier.py","language":"python","identifier":"convert_single_example","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature","function_tokens":["def","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","if","tokens_b",":","# Modifies `tokens_a` and `tokens_b` in place so that the total","# length is less than the specified length.","# Account for [CLS], [SEP], [SEP] with \"- 3\"","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_seq_length","-","3",")","else",":","# Account for [CLS] and [SEP] with \"- 2\"","if","len","(","tokens_a",")",">","max_seq_length","-","2",":","tokens_a","=","tokens_a","[","0",":","(","max_seq_length","-","2",")","]","# The convention in BERT is:","# (a) For sequence pairs:","# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]","# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1","# (b) For single sequences:","# tokens: [CLS] the dog is hairy . [SEP]","# type_ids: 0 0 0 0 0 0 0","#","# Where \"type_ids\" are used to indicate whether this is the first","# sequence or the second sequence. The embedding vectors for `type=0` and","# `type=1` were learned during pre-training and are added to the wordpiece","# embedding vector (and position vector). This is not *strictly* necessary","# since the [SEP] token unambiguously separates the sequences, but it makes","# it easier for the model to learn the concept of sequences.","#","# For classification tasks, the first vector (corresponding to [CLS]) is","# used as the \"sentence vector\". Note that this only makes sense because","# the entire model is fine-tuned.","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","label_id","=","label_map","[","example",".","label","]","if","ex_index","<","5",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"guid: %s\"","%","(","example",".","guid",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","tf",".","logging",".","info","(","\"label: %s (id = %d)\"","%","(","example",".","label",",","label_id",")",")","feature","=","InputFeatures","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","label_id","=","label_id",",","is_real_example","=","True",")","return","feature"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_classifier.py#L304-L403"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_classifier.py#L406-L433"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_classifier.py","language":"python","identifier":"file_based_input_fn_builder","parameters":"(input_file, seq_length, is_training,\n drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn","function_tokens":["def","file_based_input_fn_builder","(","input_file",",","seq_length",",","is_training",",","drop_remainder",")",":","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"label_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"is_real_example\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","}","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","d","=","tf",".","data",".","TFRecordDataset","(","input_file",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_classifier.py#L436-L481"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_classifier.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_classifier.py#L484-L498"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_classifier.py","language":"python","identifier":"create_model","parameters":"(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings)","argument_list":"","return_statement":"","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)","function_tokens":["def","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","labels",",","num_labels",",","use_one_hot_embeddings",")",":","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# In the demo, we are doing a simple classification task on the entire","# segment.","#","# If you want to use the token-level output, use model.get_sequence_output()","# instead.","output_layer","=","model",".","get_pooled_output","(",")","hidden_size","=","output_layer",".","shape","[","-","1","]",".","value","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","[","num_labels",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","[","num_labels","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","with","tf",".","variable_scope","(","\"loss\"",")",":","if","is_training",":","# I.e., 0.1 dropout","output_layer","=","tf",".","nn",".","dropout","(","output_layer",",","keep_prob","=","0.9",")","logits","=","tf",".","matmul","(","output_layer",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","probabilities","=","tf",".","nn",".","softmax","(","logits",",","axis","=","-","1",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","num_labels",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","logits",",","probabilities",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_classifier.py#L501-L543"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_classifier.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions, weights=is_real_example)\n loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn,\n [per_example_loss, label_ids, logits, is_real_example])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","num_labels",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","label_ids","=","features","[","\"label_ids\"","]","is_real_example","=","None","if","\"is_real_example\"","in","features",":","is_real_example","=","tf",".","cast","(","features","[","\"is_real_example\"","]",",","dtype","=","tf",".","float32",")","else",":","is_real_example","=","tf",".","ones","(","tf",".","shape","(","label_ids",")",",","dtype","=","tf",".","float32",")","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","total_loss",",","per_example_loss",",","logits",",","probabilities",")","=","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","label_ids",",","num_labels",",","use_one_hot_embeddings",")","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","per_example_loss",",","label_ids",",","logits",",","is_real_example",")",":","predictions","=","tf",".","argmax","(","logits",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","label_ids",",","predictions","=","predictions",",","weights","=","is_real_example",")","loss","=","tf",".","metrics",".","mean","(","values","=","per_example_loss",",","weights","=","is_real_example",")","return","{","\"eval_accuracy\"",":","accuracy",",","\"eval_loss\"",":","loss",",","}","eval_metrics","=","(","metric_fn",",","[","per_example_loss",",","label_ids",",","logits",",","is_real_example","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","{","\"probabilities\"",":","probabilities","}",",","scaffold_fn","=","scaffold_fn",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_classifier.py#L546-L635"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_classifier.py","language":"python","identifier":"input_fn_builder","parameters":"(features, seq_length, is_training, drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","features",",","seq_length",",","is_training",",","drop_remainder",")",":","all_input_ids","=","[","]","all_input_mask","=","[","]","all_segment_ids","=","[","]","all_label_ids","=","[","]","for","feature","in","features",":","all_input_ids",".","append","(","feature",".","input_ids",")","all_input_mask",".","append","(","feature",".","input_mask",")","all_segment_ids",".","append","(","feature",".","segment_ids",")","all_label_ids",".","append","(","feature",".","label_id",")","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","num_examples","=","len","(","features",")","# This is for demo purposes and does NOT scale to large data sets. We do","# not use Dataset.from_generator() because that uses tf.py_func which is","# not TPU compatible. The right way to load data is with TFRecordReader.","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","{","\"input_ids\"",":","tf",".","constant","(","all_input_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_mask\"",":","tf",".","constant","(","all_input_mask",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"segment_ids\"",":","tf",".","constant","(","all_segment_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"label_ids\"",":","tf",".","constant","(","all_label_ids",",","shape","=","[","num_examples","]",",","dtype","=","tf",".","int32",")",",","}",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","batch","(","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_classifier.py#L640-L689"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_classifier.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return features","docstring":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_summary":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","list","of","InputFeatures","."],"function":"def convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n features.append(feature)\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",")",":","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","features",".","append","(","feature",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_classifier.py#L694-L707"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/optimization.py","language":"python","identifier":"create_optimizer","parameters":"(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu)","argument_list":"","return_statement":"return train_op","docstring":"Creates an optimizer training op.","docstring_summary":"Creates an optimizer training op.","docstring_tokens":["Creates","an","optimizer","training","op","."],"function":"def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n \"\"\"Creates an optimizer training op.\"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step\/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float \/ warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m\/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n if use_tpu:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op","function_tokens":["def","create_optimizer","(","loss",",","init_lr",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")",":","global_step","=","tf",".","train",".","get_or_create_global_step","(",")","learning_rate","=","tf",".","constant","(","value","=","init_lr",",","shape","=","[","]",",","dtype","=","tf",".","float32",")","# Implements linear decay of the learning rate.","learning_rate","=","tf",".","train",".","polynomial_decay","(","learning_rate",",","global_step",",","num_train_steps",",","end_learning_rate","=","0.0",",","power","=","1.0",",","cycle","=","False",")","# Implements linear warmup. I.e., if global_step < num_warmup_steps, the","# learning rate will be `global_step\/num_warmup_steps * init_lr`.","if","num_warmup_steps",":","global_steps_int","=","tf",".","cast","(","global_step",",","tf",".","int32",")","warmup_steps_int","=","tf",".","constant","(","num_warmup_steps",",","dtype","=","tf",".","int32",")","global_steps_float","=","tf",".","cast","(","global_steps_int",",","tf",".","float32",")","warmup_steps_float","=","tf",".","cast","(","warmup_steps_int",",","tf",".","float32",")","warmup_percent_done","=","global_steps_float","\/","warmup_steps_float","warmup_learning_rate","=","init_lr","*","warmup_percent_done","is_warmup","=","tf",".","cast","(","global_steps_int","<","warmup_steps_int",",","tf",".","float32",")","learning_rate","=","(","(","1.0","-","is_warmup",")","*","learning_rate","+","is_warmup","*","warmup_learning_rate",")","# It is recommended that you use this optimizer for fine tuning, since this","# is how the model was trained (note that the Adam m\/v variables are NOT","# loaded from init_checkpoint.)","optimizer","=","AdamWeightDecayOptimizer","(","learning_rate","=","learning_rate",",","weight_decay_rate","=","0.01",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","[","\"LayerNorm\"",",","\"layer_norm\"",",","\"bias\"","]",")","if","use_tpu",":","optimizer","=","tf",".","contrib",".","tpu",".","CrossShardOptimizer","(","optimizer",")","tvars","=","tf",".","trainable_variables","(",")","grads","=","tf",".","gradients","(","loss",",","tvars",")","# This is how the model was pre-trained.","(","grads",",","_",")","=","tf",".","clip_by_global_norm","(","grads",",","clip_norm","=","1.0",")","train_op","=","optimizer",".","apply_gradients","(","zip","(","grads",",","tvars",")",",","global_step","=","global_step",")","# Normally the global step update is done inside of `apply_gradients`.","# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use","# a different optimizer, you should probably take this line out.","new_global_step","=","global_step","+","1","train_op","=","tf",".","group","(","train_op",",","[","global_step",".","assign","(","new_global_step",")","]",")","return","train_op"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/optimization.py#L25-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.__init__","parameters":"(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\")","argument_list":"","return_statement":"","docstring":"Constructs a AdamWeightDecayOptimizer.","docstring_summary":"Constructs a AdamWeightDecayOptimizer.","docstring_tokens":["Constructs","a","AdamWeightDecayOptimizer","."],"function":"def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay","function_tokens":["def","__init__","(","self",",","learning_rate",",","weight_decay_rate","=","0.0",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","None",",","name","=","\"AdamWeightDecayOptimizer\"",")",":","super","(","AdamWeightDecayOptimizer",",","self",")",".","__init__","(","False",",","name",")","self",".","learning_rate","=","learning_rate","self",".","weight_decay_rate","=","weight_decay_rate","self",".","beta_1","=","beta_1","self",".","beta_2","=","beta_2","self",".","epsilon","=","epsilon","self",".","exclude_from_weight_decay","=","exclude_from_weight_decay"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/optimization.py#L90-L106"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.apply_gradients","parameters":"(self, grads_and_vars, global_step=None, name=None)","argument_list":"","return_statement":"return tf.group(*assignments, name=name)","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"\/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"\/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m \/ (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization\/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m\/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)","function_tokens":["def","apply_gradients","(","self",",","grads_and_vars",",","global_step","=","None",",","name","=","None",")",":","assignments","=","[","]","for","(","grad",",","param",")","in","grads_and_vars",":","if","grad","is","None","or","param","is","None",":","continue","param_name","=","self",".","_get_variable_name","(","param",".","name",")","m","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_m\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","v","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_v\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","# Standard Adam update.","next_m","=","(","tf",".","multiply","(","self",".","beta_1",",","m",")","+","tf",".","multiply","(","1.0","-","self",".","beta_1",",","grad",")",")","next_v","=","(","tf",".","multiply","(","self",".","beta_2",",","v",")","+","tf",".","multiply","(","1.0","-","self",".","beta_2",",","tf",".","square","(","grad",")",")",")","update","=","next_m","\/","(","tf",".","sqrt","(","next_v",")","+","self",".","epsilon",")","# Just adding the square of the weights to the loss function is *not*","# the correct way of using L2 regularization\/weight decay with Adam,","# since that will interact with the m and v parameters in strange ways.","#","# Instead we want ot decay the weights in a manner that doesn't interact","# with the m\/v parameters. This is equivalent to adding the square","# of the weights to the loss with plain (non-momentum) SGD.","if","self",".","_do_use_weight_decay","(","param_name",")",":","update","+=","self",".","weight_decay_rate","*","param","update_with_lr","=","self",".","learning_rate","*","update","next_param","=","param","-","update_with_lr","assignments",".","extend","(","[","param",".","assign","(","next_param",")",",","m",".","assign","(","next_m",")",",","v",".","assign","(","next_v",")","]",")","return","tf",".","group","(","*","assignments",",","name","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/optimization.py#L108-L157"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._do_use_weight_decay","parameters":"(self, param_name)","argument_list":"","return_statement":"return True","docstring":"Whether to use L2 weight decay for `param_name`.","docstring_summary":"Whether to use L2 weight decay for `param_name`.","docstring_tokens":["Whether","to","use","L2","weight","decay","for","param_name","."],"function":"def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True","function_tokens":["def","_do_use_weight_decay","(","self",",","param_name",")",":","if","not","self",".","weight_decay_rate",":","return","False","if","self",".","exclude_from_weight_decay",":","for","r","in","self",".","exclude_from_weight_decay",":","if","re",".","search","(","r",",","param_name",")","is","not","None",":","return","False","return","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/optimization.py#L159-L167"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._get_variable_name","parameters":"(self, param_name)","argument_list":"","return_statement":"return param_name","docstring":"Get the variable name from the tensor name.","docstring_summary":"Get the variable name from the tensor name.","docstring_tokens":["Get","the","variable","name","from","the","tensor","name","."],"function":"def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name","function_tokens":["def","_get_variable_name","(","self",",","param_name",")",":","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","param_name",")","if","m","is","not","None",":","param_name","=","m",".","group","(","1",")","return","param_name"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/optimization.py#L169-L174"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/create_pretraining_data.py","language":"python","identifier":"write_instance_to_example_files","parameters":"(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files)","argument_list":"","return_statement":"","docstring":"Create TF example files from `TrainingInstance`s.","docstring_summary":"Create TF example files from `TrainingInstance`s.","docstring_tokens":["Create","TF","example","files","from","TrainingInstance","s","."],"function":"def write_instance_to_example_files(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files):\n \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n writers = []\n for output_file in output_files:\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n writer_index = 0\n\n total_written = 0\n for (inst_index, instance) in enumerate(instances):\n input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)\n input_mask = [1] * len(input_ids)\n segment_ids = list(instance.segment_ids)\n assert len(input_ids) <= max_seq_length\n\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n while len(masked_lm_positions) < max_predictions_per_seq:\n masked_lm_positions.append(0)\n masked_lm_ids.append(0)\n masked_lm_weights.append(0.0)\n\n next_sentence_label = 1 if instance.is_random_next else 0\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n features[\"masked_lm_positions\"] = create_int_feature(masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n features[\"next_sentence_labels\"] = create_int_feature([next_sentence_label])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n\n writers[writer_index].write(tf_example.SerializeToString())\n writer_index = (writer_index + 1) % len(writers)\n\n total_written += 1\n\n if inst_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in instance.tokens]))\n\n for feature_name in features.keys():\n feature = features[feature_name]\n values = []\n if feature.int64_list.value:\n values = feature.int64_list.value\n elif feature.float_list.value:\n values = feature.float_list.value\n tf.logging.info(\n \"%s: %s\" % (feature_name, \" \".join([str(x) for x in values])))\n\n for writer in writers:\n writer.close()\n\n tf.logging.info(\"Wrote %d total instances\", total_written)","function_tokens":["def","write_instance_to_example_files","(","instances",",","tokenizer",",","max_seq_length",",","max_predictions_per_seq",",","output_files",")",":","writers","=","[","]","for","output_file","in","output_files",":","writers",".","append","(","tf",".","python_io",".","TFRecordWriter","(","output_file",")",")","writer_index","=","0","total_written","=","0","for","(","inst_index",",","instance",")","in","enumerate","(","instances",")",":","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","tokens",")","input_mask","=","[","1","]","*","len","(","input_ids",")","segment_ids","=","list","(","instance",".","segment_ids",")","assert","len","(","input_ids",")","<=","max_seq_length","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","masked_lm_positions","=","list","(","instance",".","masked_lm_positions",")","masked_lm_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","masked_lm_labels",")","masked_lm_weights","=","[","1.0","]","*","len","(","masked_lm_ids",")","while","len","(","masked_lm_positions",")","<","max_predictions_per_seq",":","masked_lm_positions",".","append","(","0",")","masked_lm_ids",".","append","(","0",")","masked_lm_weights",".","append","(","0.0",")","next_sentence_label","=","1","if","instance",".","is_random_next","else","0","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","segment_ids",")","features","[","\"masked_lm_positions\"","]","=","create_int_feature","(","masked_lm_positions",")","features","[","\"masked_lm_ids\"","]","=","create_int_feature","(","masked_lm_ids",")","features","[","\"masked_lm_weights\"","]","=","create_float_feature","(","masked_lm_weights",")","features","[","\"next_sentence_labels\"","]","=","create_int_feature","(","[","next_sentence_label","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writers","[","writer_index","]",".","write","(","tf_example",".","SerializeToString","(",")",")","writer_index","=","(","writer_index","+","1",")","%","len","(","writers",")","total_written","+=","1","if","inst_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","instance",".","tokens","]",")",")","for","feature_name","in","features",".","keys","(",")",":","feature","=","features","[","feature_name","]","values","=","[","]","if","feature",".","int64_list",".","value",":","values","=","feature",".","int64_list",".","value","elif","feature",".","float_list",".","value",":","values","=","feature",".","float_list",".","value","tf",".","logging",".","info","(","\"%s: %s\"","%","(","feature_name",",","\" \"",".","join","(","[","str","(","x",")","for","x","in","values","]",")",")",")","for","writer","in","writers",":","writer",".","close","(",")","tf",".","logging",".","info","(","\"Wrote %d total instances\"",",","total_written",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/create_pretraining_data.py#L96-L166"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/create_pretraining_data.py","language":"python","identifier":"create_training_instances","parameters":"(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng)","argument_list":"","return_statement":"return instances","docstring":"Create `TrainingInstance`s from raw text.","docstring_summary":"Create `TrainingInstance`s from raw text.","docstring_tokens":["Create","TrainingInstance","s","from","raw","text","."],"function":"def create_training_instances(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng):\n \"\"\"Create `TrainingInstance`s from raw text.\"\"\"\n all_documents = [[]]\n\n # Input file format:\n # (1) One sentence per line. These should ideally be actual sentences, not\n # entire paragraphs or arbitrary spans of text. (Because we use the\n # sentence boundaries for the \"next sentence prediction\" task).\n # (2) Blank lines between documents. Document boundaries are needed so\n # that the \"next sentence prediction\" task doesn't span between documents.\n for input_file in input_files:\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n\n # Empty lines are used as document delimiters\n if not line:\n all_documents.append([])\n tokens = tokenizer.tokenize(line)\n if tokens:\n all_documents[-1].append(tokens)\n\n # Remove empty documents\n all_documents = [x for x in all_documents if x]\n rng.shuffle(all_documents)\n\n vocab_words = list(tokenizer.vocab.keys())\n instances = []\n for _ in range(dupe_factor):\n for document_index in range(len(all_documents)):\n instances.extend(\n create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng))\n\n rng.shuffle(instances)\n return instances","function_tokens":["def","create_training_instances","(","input_files",",","tokenizer",",","max_seq_length",",","dupe_factor",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","rng",")",":","all_documents","=","[","[","]","]","# Input file format:","# (1) One sentence per line. These should ideally be actual sentences, not","# entire paragraphs or arbitrary spans of text. (Because we use the","# sentence boundaries for the \"next sentence prediction\" task).","# (2) Blank lines between documents. Document boundaries are needed so","# that the \"next sentence prediction\" task doesn't span between documents.","for","input_file","in","input_files",":","with","tf",".","gfile",".","GFile","(","input_file",",","\"r\"",")","as","reader",":","while","True",":","line","=","tokenization",".","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","line",":","break","line","=","line",".","strip","(",")","# Empty lines are used as document delimiters","if","not","line",":","all_documents",".","append","(","[","]",")","tokens","=","tokenizer",".","tokenize","(","line",")","if","tokens",":","all_documents","[","-","1","]",".","append","(","tokens",")","# Remove empty documents","all_documents","=","[","x","for","x","in","all_documents","if","x","]","rng",".","shuffle","(","all_documents",")","vocab_words","=","list","(","tokenizer",".","vocab",".","keys","(",")",")","instances","=","[","]","for","_","in","range","(","dupe_factor",")",":","for","document_index","in","range","(","len","(","all_documents",")",")",":","instances",".","extend","(","create_instances_from_document","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",")","rng",".","shuffle","(","instances",")","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/create_pretraining_data.py#L179-L220"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/create_pretraining_data.py","language":"python","identifier":"create_instances_from_document","parameters":"(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return instances","docstring":"Creates `TrainingInstance`s for a single document.","docstring_summary":"Creates `TrainingInstance`s for a single document.","docstring_tokens":["Creates","TrainingInstance","s","for","a","single","document","."],"function":"def create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[document_index]\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if rng.random() < short_seq_prob:\n target_seq_length = rng.randint(2, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n instances = []\n current_chunk = []\n current_length = 0\n i = 0\n while i < len(document):\n segment = document[i]\n current_chunk.append(segment)\n current_length += len(segment)\n if i == len(document) - 1 or current_length >= target_seq_length:\n if current_chunk:\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2:\n a_end = rng.randint(1, len(current_chunk) - 1)\n\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n tokens_b = []\n # Random next\n is_random_next = False\n if len(current_chunk) == 1 or rng.random() < 0.5:\n is_random_next = True\n target_b_length = target_seq_length - len(tokens_a)\n\n # This should rarely go for more than one iteration for large\n # corpora. However, just to be careful, we try to make sure that\n # the random document is not the same as the document\n # we're processing.\n for _ in range(10):\n random_document_index = rng.randint(0, len(all_documents) - 1)\n if random_document_index != document_index:\n break\n\n random_document = all_documents[random_document_index]\n random_start = rng.randint(0, len(random_document) - 1)\n for j in range(random_start, len(random_document)):\n tokens_b.extend(random_document[j])\n if len(tokens_b) >= target_b_length:\n break\n # We didn't actually use these segments so we \"put them back\" so\n # they don't go to waste.\n num_unused_segments = len(current_chunk) - a_end\n i -= num_unused_segments\n # Actual next\n else:\n is_random_next = False\n for j in range(a_end, len(current_chunk)):\n tokens_b.extend(current_chunk[j])\n truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)\n\n assert len(tokens_a) >= 1\n assert len(tokens_b) >= 1\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n current_chunk = []\n current_length = 0\n i += 1\n\n return instances","function_tokens":["def","create_instances_from_document","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","document","=","all_documents","[","document_index","]","# Account for [CLS], [SEP], [SEP]","max_num_tokens","=","max_seq_length","-","3","# We *usually* want to fill up the entire sequence since we are padding","# to `max_seq_length` anyways, so short sequences are generally wasted","# computation. However, we *sometimes*","# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter","# sequences to minimize the mismatch between pre-training and fine-tuning.","# The `target_seq_length` is just a rough target however, whereas","# `max_seq_length` is a hard limit.","target_seq_length","=","max_num_tokens","if","rng",".","random","(",")","<","short_seq_prob",":","target_seq_length","=","rng",".","randint","(","2",",","max_num_tokens",")","# We DON'T just concatenate all of the tokens from a document into a long","# sequence and choose an arbitrary split point because this would make the","# next sentence prediction task too easy. Instead, we split the input into","# segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user","# input.","instances","=","[","]","current_chunk","=","[","]","current_length","=","0","i","=","0","while","i","<","len","(","document",")",":","segment","=","document","[","i","]","current_chunk",".","append","(","segment",")","current_length","+=","len","(","segment",")","if","i","==","len","(","document",")","-","1","or","current_length",">=","target_seq_length",":","if","current_chunk",":","# `a_end` is how many segments from `current_chunk` go into the `A`","# (first) sentence.","a_end","=","1","if","len","(","current_chunk",")",">=","2",":","a_end","=","rng",".","randint","(","1",",","len","(","current_chunk",")","-","1",")","tokens_a","=","[","]","for","j","in","range","(","a_end",")",":","tokens_a",".","extend","(","current_chunk","[","j","]",")","tokens_b","=","[","]","# Random next","is_random_next","=","False","if","len","(","current_chunk",")","==","1","or","rng",".","random","(",")","<","0.5",":","is_random_next","=","True","target_b_length","=","target_seq_length","-","len","(","tokens_a",")","# This should rarely go for more than one iteration for large","# corpora. However, just to be careful, we try to make sure that","# the random document is not the same as the document","# we're processing.","for","_","in","range","(","10",")",":","random_document_index","=","rng",".","randint","(","0",",","len","(","all_documents",")","-","1",")","if","random_document_index","!=","document_index",":","break","random_document","=","all_documents","[","random_document_index","]","random_start","=","rng",".","randint","(","0",",","len","(","random_document",")","-","1",")","for","j","in","range","(","random_start",",","len","(","random_document",")",")",":","tokens_b",".","extend","(","random_document","[","j","]",")","if","len","(","tokens_b",")",">=","target_b_length",":","break","# We didn't actually use these segments so we \"put them back\" so","# they don't go to waste.","num_unused_segments","=","len","(","current_chunk",")","-","a_end","i","-=","num_unused_segments","# Actual next","else",":","is_random_next","=","False","for","j","in","range","(","a_end",",","len","(","current_chunk",")",")",":","tokens_b",".","extend","(","current_chunk","[","j","]",")","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")","assert","len","(","tokens_a",")",">=","1","assert","len","(","tokens_b",")",">=","1","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","(","tokens",",","masked_lm_positions",",","masked_lm_labels",")","=","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")","instance","=","TrainingInstance","(","tokens","=","tokens",",","segment_ids","=","segment_ids",",","is_random_next","=","is_random_next",",","masked_lm_positions","=","masked_lm_positions",",","masked_lm_labels","=","masked_lm_labels",")","instances",".","append","(","instance",")","current_chunk","=","[","]","current_length","=","0","i","+=","1","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/create_pretraining_data.py#L223-L335"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/create_pretraining_data.py","language":"python","identifier":"create_masked_lm_predictions","parameters":"(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return (output_tokens, masked_lm_positions, masked_lm_labels)","docstring":"Creates the predictions for the masked LM objective.","docstring_summary":"Creates the predictions for the masked LM objective.","docstring_tokens":["Creates","the","predictions","for","the","masked","LM","objective","."],"function":"def create_masked_lm_predictions(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n continue\n # Whole Word Masking means that if we mask all of the wordpieces\n # corresponding to an original word. When a word has been split into\n # WordPieces, the first token does not have any marker and any subsequence\n # tokens are prefixed with ##. So whenever we see the ## token, we\n # append it to the previous set of word indexes.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each WordPiece independently, softmaxed\n # over the entire vocabulary.\n if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and\n token.startswith(\"##\")):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n rng.shuffle(cand_indexes)\n\n output_tokens = list(tokens)\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n assert len(masked_lms) <= num_to_predict\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)","function_tokens":["def","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","cand_indexes","=","[","]","for","(","i",",","token",")","in","enumerate","(","tokens",")",":","if","token","==","\"[CLS]\"","or","token","==","\"[SEP]\"",":","continue","# Whole Word Masking means that if we mask all of the wordpieces","# corresponding to an original word. When a word has been split into","# WordPieces, the first token does not have any marker and any subsequence","# tokens are prefixed with ##. So whenever we see the ## token, we","# append it to the previous set of word indexes.","#","# Note that Whole Word Masking does *not* change the training code","# at all -- we still predict each WordPiece independently, softmaxed","# over the entire vocabulary.","if","(","FLAGS",".","do_whole_word_mask","and","len","(","cand_indexes",")",">=","1","and","token",".","startswith","(","\"##\"",")",")",":","cand_indexes","[","-","1","]",".","append","(","i",")","else",":","cand_indexes",".","append","(","[","i","]",")","rng",".","shuffle","(","cand_indexes",")","output_tokens","=","list","(","tokens",")","num_to_predict","=","min","(","max_predictions_per_seq",",","max","(","1",",","int","(","round","(","len","(","tokens",")","*","masked_lm_prob",")",")",")",")","masked_lms","=","[","]","covered_indexes","=","set","(",")","for","index_set","in","cand_indexes",":","if","len","(","masked_lms",")",">=","num_to_predict",":","break","# If adding a whole-word mask would exceed the maximum number of","# predictions, then just skip this candidate.","if","len","(","masked_lms",")","+","len","(","index_set",")",">","num_to_predict",":","continue","is_any_index_covered","=","False","for","index","in","index_set",":","if","index","in","covered_indexes",":","is_any_index_covered","=","True","break","if","is_any_index_covered",":","continue","for","index","in","index_set",":","covered_indexes",".","add","(","index",")","masked_token","=","None","# 80% of the time, replace with [MASK]","if","rng",".","random","(",")","<","0.8",":","masked_token","=","\"[MASK]\"","else",":","# 10% of the time, keep original","if","rng",".","random","(",")","<","0.5",":","masked_token","=","tokens","[","index","]","# 10% of the time, replace with random word","else",":","masked_token","=","vocab_words","[","rng",".","randint","(","0",",","len","(","vocab_words",")","-","1",")","]","output_tokens","[","index","]","=","masked_token","masked_lms",".","append","(","MaskedLmInstance","(","index","=","index",",","label","=","tokens","[","index","]",")",")","assert","len","(","masked_lms",")","<=","num_to_predict","masked_lms","=","sorted","(","masked_lms",",","key","=","lambda","x",":","x",".","index",")","masked_lm_positions","=","[","]","masked_lm_labels","=","[","]","for","p","in","masked_lms",":","masked_lm_positions",".","append","(","p",".","index",")","masked_lm_labels",".","append","(","p",".","label",")","return","(","output_tokens",",","masked_lm_positions",",","masked_lm_labels",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/create_pretraining_data.py#L342-L415"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/create_pretraining_data.py","language":"python","identifier":"truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_num_tokens, rng)","argument_list":"","return_statement":"","docstring":"Truncates a pair of sequences to a maximum sequence length.","docstring_summary":"Truncates a pair of sequences to a maximum sequence length.","docstring_tokens":["Truncates","a","pair","of","sequences","to","a","maximum","sequence","length","."],"function":"def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):\n \"\"\"Truncates a pair of sequences to a maximum sequence length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()","function_tokens":["def","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")",":","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_num_tokens",":","break","trunc_tokens","=","tokens_a","if","len","(","tokens_a",")",">","len","(","tokens_b",")","else","tokens_b","assert","len","(","trunc_tokens",")",">=","1","# We want to sometimes truncate from the front and sometimes from the","# back to add more randomness and avoid biases.","if","rng",".","random","(",")","<","0.5",":","del","trunc_tokens","[","0","]","else",":","trunc_tokens",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/create_pretraining_data.py#L418-L433"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_ner.py","language":"python","identifier":"InputExample.__init__","parameters":"(self, guid, text, label=None)","argument_list":"","return_statement":"","docstring":"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.","docstring_summary":"Constructs a InputExample.","docstring_tokens":["Constructs","a","InputExample","."],"function":"def __init__(self, guid, text, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text = text\n self.label = label","function_tokens":["def","__init__","(","self",",","guid",",","text",",","label","=","None",")",":","self",".","guid","=","guid","self",".","text","=","text","self",".","label","=","label"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_ner.py#L123-L135"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_ner.py","language":"python","identifier":"DataProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the train set.","docstring_summary":"Gets a collection of `InputExample`s for the train set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","train","set","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_ner.py#L152-L154"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_ner.py","language":"python","identifier":"DataProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the dev set.","docstring_summary":"Gets a collection of `InputExample`s for the dev set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","dev","set","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_ner.py#L156-L158"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_ner.py","language":"python","identifier":"DataProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Gets the list of labels for this data set.","docstring_summary":"Gets the list of labels for this data set.","docstring_tokens":["Gets","the","list","of","labels","for","this","data","set","."],"function":"def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_labels","(","self",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_ner.py#L160-L162"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_ner.py","language":"python","identifier":"DataProcessor._read_data","parameters":"(cls, input_file)","argument_list":"","return_statement":"","docstring":"Reads a BIO data.","docstring_summary":"Reads a BIO data.","docstring_tokens":["Reads","a","BIO","data","."],"function":"def _read_data(cls, input_file):\n \"\"\"Reads a BIO data.\"\"\"\n with open(input_file) as f:\n lines = []\n words = []\n labels = []\n for line in f:\n contends = line.strip()\n word = line.strip().split(' ')[0]\n label = line.strip().split(' ')[-1]\n if contends.startswith(\"-DOCSTART-\"):\n words.append('')\n continue\n if len(contends) == 0 and words[-1] == '.':\n l = ' '.join([label for label in labels if len(label) > 0])\n w = ' '.join([word for word in words if len(word) > 0])\n lines.append([l, w])\n words = []\n labels = []\n continue\n if len(contends) == 0:\n continue\n words.append(word)\n labels.append(label)\n return lines","function_tokens":["def","_read_data","(","cls",",","input_file",")",":","with","open","(","input_file",")","as","f",":","lines","=","[","]","words","=","[","]","labels","=","[","]","for","line","in","f",":","contends","=","line",".","strip","(",")","word","=","line",".","strip","(",")",".","split","(","' '",")","[","0","]","label","=","line",".","strip","(",")",".","split","(","' '",")","[","-","1","]","if","contends",".","startswith","(","\"-DOCSTART-\"",")",":","words",".","append","(","''",")","continue","if","len","(","contends",")","==","0","and","words","[","-","1","]","==","'.'",":","l","=","' '",".","join","(","[","label","for","label","in","labels","if","len","(","label",")",">","0","]",")","w","=","' '",".","join","(","[","word","for","word","in","words","if","len","(","word",")",">","0","]",")","lines",".","append","(","[","l",",","w","]",")","words","=","[","]","labels","=","[","]","continue","if","len","(","contends",")","==","0",":","continue","words",".","append","(","word",")","labels",".","append","(","label",")","return","lines"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_ner.py#L165-L189"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/extract_features.py","language":"python","identifier":"input_fn_builder","parameters":"(features, seq_length)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(features, seq_length):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_unique_ids = []\n all_input_ids = []\n all_input_mask = []\n all_input_type_ids = []\n\n for feature in features:\n all_unique_ids.append(feature.unique_id)\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_input_type_ids.append(feature.input_type_ids)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"unique_ids\":\n tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_type_ids\":\n tf.constant(\n all_input_type_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n })\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","features",",","seq_length",")",":","all_unique_ids","=","[","]","all_input_ids","=","[","]","all_input_mask","=","[","]","all_input_type_ids","=","[","]","for","feature","in","features",":","all_unique_ids",".","append","(","feature",".","unique_id",")","all_input_ids",".","append","(","feature",".","input_ids",")","all_input_mask",".","append","(","feature",".","input_mask",")","all_input_type_ids",".","append","(","feature",".","input_type_ids",")","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","num_examples","=","len","(","features",")","# This is for demo purposes and does NOT scale to large data sets. We do","# not use Dataset.from_generator() because that uses tf.py_func which is","# not TPU compatible. The right way to load data is with TFRecordReader.","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","{","\"unique_ids\"",":","tf",".","constant","(","all_unique_ids",",","shape","=","[","num_examples","]",",","dtype","=","tf",".","int32",")",",","\"input_ids\"",":","tf",".","constant","(","all_input_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_mask\"",":","tf",".","constant","(","all_input_mask",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_type_ids\"",":","tf",".","constant","(","all_input_type_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","}",")","d","=","d",".","batch","(","batch_size","=","batch_size",",","drop_remainder","=","False",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/extract_features.py#L100-L145"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/extract_features.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, layer_indexes, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n input_type_ids = features[\"input_type_ids\"]\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=False,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=input_type_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n raise ValueError(\"Only PREDICT modes are supported: %s\" % (mode))\n\n tvars = tf.trainable_variables()\n scaffold_fn = None\n (assignment_map,\n initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n all_layers = model.get_all_encoder_layers()\n\n predictions = {\n \"unique_id\": unique_ids,\n }\n\n for (i, layer_index) in enumerate(layer_indexes):\n predictions[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","layer_indexes",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","unique_ids","=","features","[","\"unique_ids\"","]","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","input_type_ids","=","features","[","\"input_type_ids\"","]","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","False",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","input_type_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","if","mode","!=","tf",".","estimator",".","ModeKeys",".","PREDICT",":","raise","ValueError","(","\"Only PREDICT modes are supported: %s\"","%","(","mode",")",")","tvars","=","tf",".","trainable_variables","(",")","scaffold_fn","=","None","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","all_layers","=","model",".","get_all_encoder_layers","(",")","predictions","=","{","\"unique_id\"",":","unique_ids",",","}","for","(","i",",","layer_index",")","in","enumerate","(","layer_indexes",")",":","predictions","[","\"layer_output_%d\"","%","i","]","=","all_layers","[","layer_index","]","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","predictions",",","scaffold_fn","=","scaffold_fn",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/extract_features.py#L148-L207"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/extract_features.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, seq_length, tokenizer)","argument_list":"","return_statement":"return features","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > seq_length - 2:\n tokens_a = tokens_a[0:(seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n input_type_ids = []\n tokens.append(\"[CLS]\")\n input_type_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n input_type_ids.append(0)\n tokens.append(\"[SEP]\")\n input_type_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n input_type_ids.append(1)\n tokens.append(\"[SEP]\")\n input_type_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < seq_length:\n input_ids.append(0)\n input_mask.append(0)\n input_type_ids.append(0)\n\n assert len(input_ids) == seq_length\n assert len(input_mask) == seq_length\n assert len(input_type_ids) == seq_length\n\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (example.unique_id))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"input_type_ids: %s\" % \" \".join([str(x) for x in input_type_ids]))\n\n features.append(\n InputFeatures(\n unique_id=example.unique_id,\n tokens=tokens,\n input_ids=input_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids))\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","seq_length",",","tokenizer",")",":","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","if","tokens_b",":","# Modifies `tokens_a` and `tokens_b` in place so that the total","# length is less than the specified length.","# Account for [CLS], [SEP], [SEP] with \"- 3\"","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","seq_length","-","3",")","else",":","# Account for [CLS] and [SEP] with \"- 2\"","if","len","(","tokens_a",")",">","seq_length","-","2",":","tokens_a","=","tokens_a","[","0",":","(","seq_length","-","2",")","]","# The convention in BERT is:","# (a) For sequence pairs:","# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]","# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1","# (b) For single sequences:","# tokens: [CLS] the dog is hairy . [SEP]","# type_ids: 0 0 0 0 0 0 0","#","# Where \"type_ids\" are used to indicate whether this is the first","# sequence or the second sequence. The embedding vectors for `type=0` and","# `type=1` were learned during pre-training and are added to the wordpiece","# embedding vector (and position vector). This is not *strictly* necessary","# since the [SEP] token unambiguously separates the sequences, but it makes","# it easier for the model to learn the concept of sequences.","#","# For classification tasks, the first vector (corresponding to [CLS]) is","# used as as the \"sentence vector\". Note that this only makes sense because","# the entire model is fine-tuned.","tokens","=","[","]","input_type_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","input_type_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","input_type_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","input_type_ids",".","append","(","0",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","input_type_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","input_type_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","input_type_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","seq_length","assert","len","(","input_mask",")","==","seq_length","assert","len","(","input_type_ids",")","==","seq_length","if","ex_index","<","5",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"unique_id: %s\"","%","(","example",".","unique_id",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"input_type_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_type_ids","]",")",")","features",".","append","(","InputFeatures","(","unique_id","=","example",".","unique_id",",","tokens","=","tokens",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","input_type_ids","=","input_type_ids",")",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/extract_features.py#L210-L299"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/extract_features.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/extract_features.py#L302-L316"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/extract_features.py","language":"python","identifier":"read_examples","parameters":"(input_file)","argument_list":"","return_statement":"return examples","docstring":"Read a list of `InputExample`s from an input file.","docstring_summary":"Read a list of `InputExample`s from an input file.","docstring_tokens":["Read","a","list","of","InputExample","s","from","an","input","file","."],"function":"def read_examples(input_file):\n \"\"\"Read a list of `InputExample`s from an input file.\"\"\"\n examples = []\n unique_id = 0\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n examples.append(\n InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))\n unique_id += 1\n return examples","function_tokens":["def","read_examples","(","input_file",")",":","examples","=","[","]","unique_id","=","0","with","tf",".","gfile",".","GFile","(","input_file",",","\"r\"",")","as","reader",":","while","True",":","line","=","tokenization",".","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","line",":","break","line","=","line",".","strip","(",")","text_a","=","None","text_b","=","None","m","=","re",".","match","(","r\"^(.*) \\|\\|\\| (.*)$\"",",","line",")","if","m","is","None",":","text_a","=","line","else",":","text_a","=","m",".","group","(","1",")","text_b","=","m",".","group","(","2",")","examples",".","append","(","InputExample","(","unique_id","=","unique_id",",","text_a","=","text_a",",","text_b","=","text_b",")",")","unique_id","+=","1","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/extract_features.py#L319-L340"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_classifier_with_tfhub.py","language":"python","identifier":"create_model","parameters":"(is_training, input_ids, input_mask, segment_ids, labels,\n num_labels, bert_hub_module_handle)","argument_list":"","return_statement":"","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(is_training, input_ids, input_mask, segment_ids, labels,\n num_labels, bert_hub_module_handle):\n \"\"\"Creates a classification model.\"\"\"\n tags = set()\n if is_training:\n tags.add(\"train\")\n bert_module = hub.Module(bert_hub_module_handle, tags=tags, trainable=True)\n bert_inputs = dict(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids)\n bert_outputs = bert_module(\n inputs=bert_inputs,\n signature=\"tokens\",\n as_dict=True)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use\n # bert_outputs[\"sequence_output\"] instead.\n output_layer = bert_outputs[\"pooled_output\"]\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)","function_tokens":["def","create_model","(","is_training",",","input_ids",",","input_mask",",","segment_ids",",","labels",",","num_labels",",","bert_hub_module_handle",")",":","tags","=","set","(",")","if","is_training",":","tags",".","add","(","\"train\"",")","bert_module","=","hub",".","Module","(","bert_hub_module_handle",",","tags","=","tags",",","trainable","=","True",")","bert_inputs","=","dict","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",")","bert_outputs","=","bert_module","(","inputs","=","bert_inputs",",","signature","=","\"tokens\"",",","as_dict","=","True",")","# In the demo, we are doing a simple classification task on the entire","# segment.","#","# If you want to use the token-level output, use","# bert_outputs[\"sequence_output\"] instead.","output_layer","=","bert_outputs","[","\"pooled_output\"","]","hidden_size","=","output_layer",".","shape","[","-","1","]",".","value","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","[","num_labels",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","[","num_labels","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","with","tf",".","variable_scope","(","\"loss\"",")",":","if","is_training",":","# I.e., 0.1 dropout","output_layer","=","tf",".","nn",".","dropout","(","output_layer",",","keep_prob","=","0.9",")","logits","=","tf",".","matmul","(","output_layer",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","probabilities","=","tf",".","nn",".","softmax","(","logits",",","axis","=","-","1",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","num_labels",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","logits",",","probabilities",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_classifier_with_tfhub.py#L37-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_classifier_with_tfhub.py","language":"python","identifier":"model_fn_builder","parameters":"(num_labels, learning_rate, num_train_steps,\n num_warmup_steps, use_tpu, bert_hub_module_handle)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(num_labels, learning_rate, num_train_steps,\n num_warmup_steps, use_tpu, bert_hub_module_handle):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n is_training, input_ids, input_mask, segment_ids, label_ids, num_labels,\n bert_hub_module_handle)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions={\"probabilities\": probabilities})\n else:\n raise ValueError(\n \"Only TRAIN, EVAL and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","num_labels",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","bert_hub_module_handle",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","label_ids","=","features","[","\"label_ids\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","total_loss",",","per_example_loss",",","logits",",","probabilities",")","=","create_model","(","is_training",",","input_ids",",","input_mask",",","segment_ids",",","label_ids",",","num_labels",",","bert_hub_module_handle",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","per_example_loss",",","label_ids",",","logits",")",":","predictions","=","tf",".","argmax","(","logits",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","accuracy","=","tf",".","metrics",".","accuracy","(","label_ids",",","predictions",")","loss","=","tf",".","metrics",".","mean","(","per_example_loss",")","return","{","\"eval_accuracy\"",":","accuracy",",","\"eval_loss\"",":","loss",",","}","eval_metrics","=","(","metric_fn",",","[","per_example_loss",",","label_ids",",","logits","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","PREDICT",":","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","{","\"probabilities\"",":","probabilities","}",")","else",":","raise","ValueError","(","\"Only TRAIN, EVAL and PREDICT modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_classifier_with_tfhub.py#L87-L143"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_ext\/run_classifier_with_tfhub.py","language":"python","identifier":"create_tokenizer_from_hub_module","parameters":"(bert_hub_module_handle)","argument_list":"","return_statement":"return tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)","docstring":"Get the vocab file and casing info from the Hub module.","docstring_summary":"Get the vocab file and casing info from the Hub module.","docstring_tokens":["Get","the","vocab","file","and","casing","info","from","the","Hub","module","."],"function":"def create_tokenizer_from_hub_module(bert_hub_module_handle):\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n bert_module = hub.Module(bert_hub_module_handle)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n return tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)","function_tokens":["def","create_tokenizer_from_hub_module","(","bert_hub_module_handle",")",":","with","tf",".","Graph","(",")",".","as_default","(",")",":","bert_module","=","hub",".","Module","(","bert_hub_module_handle",")","tokenization_info","=","bert_module","(","signature","=","\"tokenization_info\"",",","as_dict","=","True",")","with","tf",".","Session","(",")","as","sess",":","vocab_file",",","do_lower_case","=","sess",".","run","(","[","tokenization_info","[","\"vocab_file\"","]",",","tokenization_info","[","\"do_lower_case\"","]","]",")","return","tokenization",".","FullTokenizer","(","vocab_file","=","vocab_file",",","do_lower_case","=","do_lower_case",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_ext\/run_classifier_with_tfhub.py#L146-L155"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"gelu","parameters":"(x)","argument_list":"","return_statement":"return x * cdf","docstring":"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.","docstring_summary":"Gaussian Error Linear Unit.","docstring_tokens":["Gaussian","Error","Linear","Unit","."],"function":"def gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.tanh(\n (np.sqrt(2 \/ np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf","function_tokens":["def","gelu","(","x",")",":","cdf","=","0.5","*","(","1.0","+","tf",".","tanh","(","(","np",".","sqrt","(","2","\/","np",".","pi",")","*","(","x","+","0.044715","*","tf",".","pow","(","x",",","3",")",")",")",")",")","return","x","*","cdf"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L9-L22"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"embedding_lookup","parameters":"(x, n_token, d_embed, initializer, use_tpu=True,\n scope='embedding', reuse=None, dtype=tf.float32)","argument_list":"","return_statement":"","docstring":"TPU and GPU embedding_lookup function.","docstring_summary":"TPU and GPU embedding_lookup function.","docstring_tokens":["TPU","and","GPU","embedding_lookup","function","."],"function":"def embedding_lookup(x, n_token, d_embed, initializer, use_tpu=True,\n scope='embedding', reuse=None, dtype=tf.float32):\n \"\"\"TPU and GPU embedding_lookup function.\"\"\"\n with tf.variable_scope(scope, reuse=reuse):\n lookup_table = tf.get_variable('lookup_table', [n_token, d_embed],\n dtype=dtype, initializer=initializer)\n if use_tpu:\n one_hot_idx = tf.one_hot(x, n_token, dtype=dtype)\n if one_hot_idx.shape.ndims == 2:\n return tf.einsum('in,nd->id', one_hot_idx, lookup_table), lookup_table\n else:\n return tf.einsum('ibn,nd->ibd', one_hot_idx, lookup_table), lookup_table\n else:\n return tf.nn.embedding_lookup(lookup_table, x), lookup_table","function_tokens":["def","embedding_lookup","(","x",",","n_token",",","d_embed",",","initializer",",","use_tpu","=","True",",","scope","=","'embedding'",",","reuse","=","None",",","dtype","=","tf",".","float32",")",":","with","tf",".","variable_scope","(","scope",",","reuse","=","reuse",")",":","lookup_table","=","tf",".","get_variable","(","'lookup_table'",",","[","n_token",",","d_embed","]",",","dtype","=","dtype",",","initializer","=","initializer",")","if","use_tpu",":","one_hot_idx","=","tf",".","one_hot","(","x",",","n_token",",","dtype","=","dtype",")","if","one_hot_idx",".","shape",".","ndims","==","2",":","return","tf",".","einsum","(","'in,nd->id'",",","one_hot_idx",",","lookup_table",")",",","lookup_table","else",":","return","tf",".","einsum","(","'ibn,nd->ibd'",",","one_hot_idx",",","lookup_table",")",",","lookup_table","else",":","return","tf",".","nn",".","embedding_lookup","(","lookup_table",",","x",")",",","lookup_table"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L25-L38"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"positionwise_ffn","parameters":"(inp, d_model, d_inner, dropout, kernel_initializer,\n activation_type='relu', scope='ff', is_training=True,\n reuse=None)","argument_list":"","return_statement":"return output","docstring":"Position-wise Feed-forward Network.","docstring_summary":"Position-wise Feed-forward Network.","docstring_tokens":["Position","-","wise","Feed","-","forward","Network","."],"function":"def positionwise_ffn(inp, d_model, d_inner, dropout, kernel_initializer,\n activation_type='relu', scope='ff', is_training=True,\n reuse=None):\n \"\"\"Position-wise Feed-forward Network.\"\"\"\n if activation_type == 'relu':\n activation = tf.nn.relu\n elif activation_type == 'gelu':\n activation = gelu\n else:\n raise ValueError('Unsupported activation type {}'.format(activation_type))\n\n output = inp\n with tf.variable_scope(scope, reuse=reuse):\n output = tf.layers.dense(output, d_inner, activation=activation,\n kernel_initializer=kernel_initializer,\n name='layer_1')\n output = tf.layers.dropout(output, dropout, training=is_training,\n name='drop_1')\n output = tf.layers.dense(output, d_model,\n kernel_initializer=kernel_initializer,\n name='layer_2')\n output = tf.layers.dropout(output, dropout, training=is_training,\n name='drop_2')\n output = tf.contrib.layers.layer_norm(output + inp, begin_norm_axis=-1,\n scope='LayerNorm')\n return output","function_tokens":["def","positionwise_ffn","(","inp",",","d_model",",","d_inner",",","dropout",",","kernel_initializer",",","activation_type","=","'relu'",",","scope","=","'ff'",",","is_training","=","True",",","reuse","=","None",")",":","if","activation_type","==","'relu'",":","activation","=","tf",".","nn",".","relu","elif","activation_type","==","'gelu'",":","activation","=","gelu","else",":","raise","ValueError","(","'Unsupported activation type {}'",".","format","(","activation_type",")",")","output","=","inp","with","tf",".","variable_scope","(","scope",",","reuse","=","reuse",")",":","output","=","tf",".","layers",".","dense","(","output",",","d_inner",",","activation","=","activation",",","kernel_initializer","=","kernel_initializer",",","name","=","'layer_1'",")","output","=","tf",".","layers",".","dropout","(","output",",","dropout",",","training","=","is_training",",","name","=","'drop_1'",")","output","=","tf",".","layers",".","dense","(","output",",","d_model",",","kernel_initializer","=","kernel_initializer",",","name","=","'layer_2'",")","output","=","tf",".","layers",".","dropout","(","output",",","dropout",",","training","=","is_training",",","name","=","'drop_2'",")","output","=","tf",".","contrib",".","layers",".","layer_norm","(","output","+","inp",",","begin_norm_axis","=","-","1",",","scope","=","'LayerNorm'",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L52-L77"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"head_projection","parameters":"(h, d_model, n_head, d_head, kernel_initializer, name)","argument_list":"","return_statement":"return head","docstring":"Project hidden states to a specific head with a 4D-shape.","docstring_summary":"Project hidden states to a specific head with a 4D-shape.","docstring_tokens":["Project","hidden","states","to","a","specific","head","with","a","4D","-","shape","."],"function":"def head_projection(h, d_model, n_head, d_head, kernel_initializer, name):\n \"\"\"Project hidden states to a specific head with a 4D-shape.\"\"\"\n proj_weight = tf.get_variable('{}\/kernel'.format(name),\n [d_model, n_head, d_head], dtype=h.dtype,\n initializer=kernel_initializer)\n head = tf.einsum('ibh,hnd->ibnd', h, proj_weight)\n\n return head","function_tokens":["def","head_projection","(","h",",","d_model",",","n_head",",","d_head",",","kernel_initializer",",","name",")",":","proj_weight","=","tf",".","get_variable","(","'{}\/kernel'",".","format","(","name",")",",","[","d_model",",","n_head",",","d_head","]",",","dtype","=","h",".","dtype",",","initializer","=","kernel_initializer",")","head","=","tf",".","einsum","(","'ibh,hnd->ibnd'",",","h",",","proj_weight",")","return","head"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L80-L87"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"post_attention","parameters":"(h, attn_vec, d_model, n_head, d_head, dropout, is_training,\n kernel_initializer, residual=True)","argument_list":"","return_statement":"return output","docstring":"Post-attention processing.","docstring_summary":"Post-attention processing.","docstring_tokens":["Post","-","attention","processing","."],"function":"def post_attention(h, attn_vec, d_model, n_head, d_head, dropout, is_training,\n kernel_initializer, residual=True):\n \"\"\"Post-attention processing.\"\"\"\n # post-attention projection (back to `d_model`)\n proj_o = tf.get_variable('o\/kernel', [d_model, n_head, d_head],\n dtype=h.dtype, initializer=kernel_initializer)\n attn_out = tf.einsum('ibnd,hnd->ibh', attn_vec, proj_o)\n\n attn_out = tf.layers.dropout(attn_out, dropout, training=is_training)\n if residual:\n output = tf.contrib.layers.layer_norm(attn_out + h, begin_norm_axis=-1,\n scope='LayerNorm')\n else:\n output = tf.contrib.layers.layer_norm(attn_out, begin_norm_axis=-1,\n scope='LayerNorm')\n\n return output","function_tokens":["def","post_attention","(","h",",","attn_vec",",","d_model",",","n_head",",","d_head",",","dropout",",","is_training",",","kernel_initializer",",","residual","=","True",")",":","# post-attention projection (back to `d_model`)","proj_o","=","tf",".","get_variable","(","'o\/kernel'",",","[","d_model",",","n_head",",","d_head","]",",","dtype","=","h",".","dtype",",","initializer","=","kernel_initializer",")","attn_out","=","tf",".","einsum","(","'ibnd,hnd->ibh'",",","attn_vec",",","proj_o",")","attn_out","=","tf",".","layers",".","dropout","(","attn_out",",","dropout",",","training","=","is_training",")","if","residual",":","output","=","tf",".","contrib",".","layers",".","layer_norm","(","attn_out","+","h",",","begin_norm_axis","=","-","1",",","scope","=","'LayerNorm'",")","else",":","output","=","tf",".","contrib",".","layers",".","layer_norm","(","attn_out",",","begin_norm_axis","=","-","1",",","scope","=","'LayerNorm'",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L90-L106"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"abs_attn_core","parameters":"(q_head, k_head, v_head, attn_mask, dropatt, is_training,\n scale)","argument_list":"","return_statement":"return attn_vec","docstring":"Core absolute positional attention operations.","docstring_summary":"Core absolute positional attention operations.","docstring_tokens":["Core","absolute","positional","attention","operations","."],"function":"def abs_attn_core(q_head, k_head, v_head, attn_mask, dropatt, is_training,\n scale):\n \"\"\"Core absolute positional attention operations.\"\"\"\n\n attn_score = tf.einsum('ibnd,jbnd->ijbn', q_head, k_head)\n attn_score *= scale\n if attn_mask is not None:\n attn_score = attn_score - 1e30 * attn_mask\n\n # attention probability\n attn_prob = tf.nn.softmax(attn_score, 1)\n attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training)\n\n # attention output\n attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head)\n\n return attn_vec","function_tokens":["def","abs_attn_core","(","q_head",",","k_head",",","v_head",",","attn_mask",",","dropatt",",","is_training",",","scale",")",":","attn_score","=","tf",".","einsum","(","'ibnd,jbnd->ijbn'",",","q_head",",","k_head",")","attn_score","*=","scale","if","attn_mask","is","not","None",":","attn_score","=","attn_score","-","1e30","*","attn_mask","# attention probability","attn_prob","=","tf",".","nn",".","softmax","(","attn_score",",","1",")","attn_prob","=","tf",".","layers",".","dropout","(","attn_prob",",","dropatt",",","training","=","is_training",")","# attention output","attn_vec","=","tf",".","einsum","(","'ijbn,jbnd->ibnd'",",","attn_prob",",","v_head",")","return","attn_vec"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L109-L125"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"rel_attn_core","parameters":"(q_head, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat,\n r_w_bias, r_r_bias, r_s_bias, attn_mask, dropatt, is_training,\n scale)","argument_list":"","return_statement":"return attn_vec","docstring":"Core relative positional attention operations.","docstring_summary":"Core relative positional attention operations.","docstring_tokens":["Core","relative","positional","attention","operations","."],"function":"def rel_attn_core(q_head, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat,\n r_w_bias, r_r_bias, r_s_bias, attn_mask, dropatt, is_training,\n scale):\n \"\"\"Core relative positional attention operations.\"\"\"\n\n # content based attention score\n ac = tf.einsum('ibnd,jbnd->ijbn', q_head + r_w_bias, k_head_h)\n\n # position based attention score\n bd = tf.einsum('ibnd,jbnd->ijbn', q_head + r_r_bias, k_head_r)\n bd = rel_shift(bd, klen=tf.shape(ac)[1])\n\n # segment based attention score\n if seg_mat is None:\n ef = 0\n else:\n ef = tf.einsum('ibnd,snd->ibns', q_head + r_s_bias, seg_embed)\n ef = tf.einsum('ijbs,ibns->ijbn', seg_mat, ef)\n\n # merge attention scores and perform masking\n attn_score = (ac + bd + ef) * scale\n if attn_mask is not None:\n # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask\n attn_score = attn_score - 1e30 * attn_mask\n\n # attention probability\n attn_prob = tf.nn.softmax(attn_score, 1)\n attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training)\n\n # attention output\n attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)\n\n return attn_vec","function_tokens":["def","rel_attn_core","(","q_head",",","k_head_h",",","v_head_h",",","k_head_r",",","seg_embed",",","seg_mat",",","r_w_bias",",","r_r_bias",",","r_s_bias",",","attn_mask",",","dropatt",",","is_training",",","scale",")",":","# content based attention score","ac","=","tf",".","einsum","(","'ibnd,jbnd->ijbn'",",","q_head","+","r_w_bias",",","k_head_h",")","# position based attention score","bd","=","tf",".","einsum","(","'ibnd,jbnd->ijbn'",",","q_head","+","r_r_bias",",","k_head_r",")","bd","=","rel_shift","(","bd",",","klen","=","tf",".","shape","(","ac",")","[","1","]",")","# segment based attention score","if","seg_mat","is","None",":","ef","=","0","else",":","ef","=","tf",".","einsum","(","'ibnd,snd->ibns'",",","q_head","+","r_s_bias",",","seg_embed",")","ef","=","tf",".","einsum","(","'ijbs,ibns->ijbn'",",","seg_mat",",","ef",")","# merge attention scores and perform masking","attn_score","=","(","ac","+","bd","+","ef",")","*","scale","if","attn_mask","is","not","None",":","# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask","attn_score","=","attn_score","-","1e30","*","attn_mask","# attention probability","attn_prob","=","tf",".","nn",".","softmax","(","attn_score",",","1",")","attn_prob","=","tf",".","layers",".","dropout","(","attn_prob",",","dropatt",",","training","=","is_training",")","# attention output","attn_vec","=","tf",".","einsum","(","'ijbn,jbnd->ibnd'",",","attn_prob",",","v_head_h",")","return","attn_vec"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L128-L160"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"rel_shift","parameters":"(x, klen=-1)","argument_list":"","return_statement":"return x","docstring":"perform relative shift to form the relative attention score.","docstring_summary":"perform relative shift to form the relative attention score.","docstring_tokens":["perform","relative","shift","to","form","the","relative","attention","score","."],"function":"def rel_shift(x, klen=-1):\n \"\"\"perform relative shift to form the relative attention score.\"\"\"\n x_size = tf.shape(x)\n\n x = tf.reshape(x, [x_size[1], x_size[0], x_size[2], x_size[3]])\n x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])\n x = tf.reshape(x, [x_size[0], x_size[1] - 1, x_size[2], x_size[3]])\n x = tf.slice(x, [0, 0, 0, 0], [-1, klen, -1, -1])\n\n return x","function_tokens":["def","rel_shift","(","x",",","klen","=","-","1",")",":","x_size","=","tf",".","shape","(","x",")","x","=","tf",".","reshape","(","x",",","[","x_size","[","1","]",",","x_size","[","0","]",",","x_size","[","2","]",",","x_size","[","3","]","]",")","x","=","tf",".","slice","(","x",",","[","1",",","0",",","0",",","0","]",",","[","-","1",",","-","1",",","-","1",",","-","1","]",")","x","=","tf",".","reshape","(","x",",","[","x_size","[","0","]",",","x_size","[","1","]","-","1",",","x_size","[","2","]",",","x_size","[","3","]","]",")","x","=","tf",".","slice","(","x",",","[","0",",","0",",","0",",","0","]",",","[","-","1",",","klen",",","-","1",",","-","1","]",")","return","x"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L163-L172"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"_create_mask","parameters":"(qlen, mlen, dtype=tf.float32, same_length=False)","argument_list":"","return_statement":"return ret","docstring":"create causal attention mask.","docstring_summary":"create causal attention mask.","docstring_tokens":["create","causal","attention","mask","."],"function":"def _create_mask(qlen, mlen, dtype=tf.float32, same_length=False):\n \"\"\"create causal attention mask.\"\"\"\n attn_mask = tf.ones([qlen, qlen], dtype=dtype)\n mask_u = tf.matrix_band_part(attn_mask, 0, -1)\n mask_dia = tf.matrix_band_part(attn_mask, 0, 0)\n attn_mask_pad = tf.zeros([qlen, mlen], dtype=dtype)\n ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)\n if same_length:\n mask_l = tf.matrix_band_part(attn_mask, -1, 0)\n ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1)\n\n return ret","function_tokens":["def","_create_mask","(","qlen",",","mlen",",","dtype","=","tf",".","float32",",","same_length","=","False",")",":","attn_mask","=","tf",".","ones","(","[","qlen",",","qlen","]",",","dtype","=","dtype",")","mask_u","=","tf",".","matrix_band_part","(","attn_mask",",","0",",","-","1",")","mask_dia","=","tf",".","matrix_band_part","(","attn_mask",",","0",",","0",")","attn_mask_pad","=","tf",".","zeros","(","[","qlen",",","mlen","]",",","dtype","=","dtype",")","ret","=","tf",".","concat","(","[","attn_mask_pad",",","mask_u","-","mask_dia","]",",","1",")","if","same_length",":","mask_l","=","tf",".","matrix_band_part","(","attn_mask",",","-","1",",","0",")","ret","=","tf",".","concat","(","[","ret","[",":",",",":","qlen","]","+","mask_l","-","mask_dia",",","ret","[",":",",","qlen",":","]","]",",","1",")","return","ret"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L175-L186"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"_cache_mem","parameters":"(curr_out, prev_mem, mem_len, reuse_len=None)","argument_list":"","return_statement":"return tf.stop_gradient(new_mem)","docstring":"cache hidden states into memory.","docstring_summary":"cache hidden states into memory.","docstring_tokens":["cache","hidden","states","into","memory","."],"function":"def _cache_mem(curr_out, prev_mem, mem_len, reuse_len=None):\n \"\"\"cache hidden states into memory.\"\"\"\n if mem_len is None or mem_len == 0:\n return None\n else:\n if reuse_len is not None and reuse_len > 0:\n curr_out = curr_out[:reuse_len]\n\n if prev_mem is None:\n new_mem = curr_out[-mem_len:]\n else:\n new_mem = tf.concat([prev_mem, curr_out], 0)[-mem_len:]\n\n return tf.stop_gradient(new_mem)","function_tokens":["def","_cache_mem","(","curr_out",",","prev_mem",",","mem_len",",","reuse_len","=","None",")",":","if","mem_len","is","None","or","mem_len","==","0",":","return","None","else",":","if","reuse_len","is","not","None","and","reuse_len",">","0",":","curr_out","=","curr_out","[",":","reuse_len","]","if","prev_mem","is","None",":","new_mem","=","curr_out","[","-","mem_len",":","]","else",":","new_mem","=","tf",".","concat","(","[","prev_mem",",","curr_out","]",",","0",")","[","-","mem_len",":","]","return","tf",".","stop_gradient","(","new_mem",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L189-L202"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"relative_positional_encoding","parameters":"(qlen, klen, d_model, clamp_len, attn_type,\n bi_data, bsz=None, dtype=None)","argument_list":"","return_statement":"return pos_emb","docstring":"create relative positional encoding.","docstring_summary":"create relative positional encoding.","docstring_tokens":["create","relative","positional","encoding","."],"function":"def relative_positional_encoding(qlen, klen, d_model, clamp_len, attn_type,\n bi_data, bsz=None, dtype=None):\n \"\"\"create relative positional encoding.\"\"\"\n freq_seq = tf.range(0, d_model, 2.0)\n if dtype is not None and dtype != tf.float32:\n freq_seq = tf.cast(freq_seq, dtype=dtype)\n inv_freq = 1 \/ (10000 ** (freq_seq \/ d_model))\n\n if attn_type == 'bi':\n # beg, end = klen - 1, -qlen\n beg, end = klen, -qlen\n elif attn_type == 'uni':\n # beg, end = klen - 1, -1\n beg, end = klen, -1\n else:\n raise ValueError('Unknown `attn_type` {}.'.format(attn_type))\n\n if bi_data:\n fwd_pos_seq = tf.range(beg, end, -1.0)\n bwd_pos_seq = tf.range(-beg, -end, 1.0)\n\n if dtype is not None and dtype != tf.float32:\n fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype)\n bwd_pos_seq = tf.cast(bwd_pos_seq, dtype=dtype)\n\n if clamp_len > 0:\n fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -clamp_len, clamp_len)\n bwd_pos_seq = tf.clip_by_value(bwd_pos_seq, -clamp_len, clamp_len)\n\n if bsz is not None:\n # With bi_data, the batch size should be divisible by 2.\n assert bsz%2 == 0\n fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz\/\/2)\n bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq, bsz\/\/2)\n else:\n fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq)\n bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq)\n\n pos_emb = tf.concat([fwd_pos_emb, bwd_pos_emb], axis=1)\n else:\n fwd_pos_seq = tf.range(beg, end, -1.0)\n if dtype is not None and dtype != tf.float32:\n fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype)\n if clamp_len > 0:\n fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -clamp_len, clamp_len)\n pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz)\n\n return pos_emb","function_tokens":["def","relative_positional_encoding","(","qlen",",","klen",",","d_model",",","clamp_len",",","attn_type",",","bi_data",",","bsz","=","None",",","dtype","=","None",")",":","freq_seq","=","tf",".","range","(","0",",","d_model",",","2.0",")","if","dtype","is","not","None","and","dtype","!=","tf",".","float32",":","freq_seq","=","tf",".","cast","(","freq_seq",",","dtype","=","dtype",")","inv_freq","=","1","\/","(","10000","**","(","freq_seq","\/","d_model",")",")","if","attn_type","==","'bi'",":","# beg, end = klen - 1, -qlen","beg",",","end","=","klen",",","-","qlen","elif","attn_type","==","'uni'",":","# beg, end = klen - 1, -1","beg",",","end","=","klen",",","-","1","else",":","raise","ValueError","(","'Unknown `attn_type` {}.'",".","format","(","attn_type",")",")","if","bi_data",":","fwd_pos_seq","=","tf",".","range","(","beg",",","end",",","-","1.0",")","bwd_pos_seq","=","tf",".","range","(","-","beg",",","-","end",",","1.0",")","if","dtype","is","not","None","and","dtype","!=","tf",".","float32",":","fwd_pos_seq","=","tf",".","cast","(","fwd_pos_seq",",","dtype","=","dtype",")","bwd_pos_seq","=","tf",".","cast","(","bwd_pos_seq",",","dtype","=","dtype",")","if","clamp_len",">","0",":","fwd_pos_seq","=","tf",".","clip_by_value","(","fwd_pos_seq",",","-","clamp_len",",","clamp_len",")","bwd_pos_seq","=","tf",".","clip_by_value","(","bwd_pos_seq",",","-","clamp_len",",","clamp_len",")","if","bsz","is","not","None",":","# With bi_data, the batch size should be divisible by 2.","assert","bsz","%","2","==","0","fwd_pos_emb","=","positional_embedding","(","fwd_pos_seq",",","inv_freq",",","bsz","\/\/","2",")","bwd_pos_emb","=","positional_embedding","(","bwd_pos_seq",",","inv_freq",",","bsz","\/\/","2",")","else",":","fwd_pos_emb","=","positional_embedding","(","fwd_pos_seq",",","inv_freq",")","bwd_pos_emb","=","positional_embedding","(","bwd_pos_seq",",","inv_freq",")","pos_emb","=","tf",".","concat","(","[","fwd_pos_emb",",","bwd_pos_emb","]",",","axis","=","1",")","else",":","fwd_pos_seq","=","tf",".","range","(","beg",",","end",",","-","1.0",")","if","dtype","is","not","None","and","dtype","!=","tf",".","float32",":","fwd_pos_seq","=","tf",".","cast","(","fwd_pos_seq",",","dtype","=","dtype",")","if","clamp_len",">","0",":","fwd_pos_seq","=","tf",".","clip_by_value","(","fwd_pos_seq",",","-","clamp_len",",","clamp_len",")","pos_emb","=","positional_embedding","(","fwd_pos_seq",",","inv_freq",",","bsz",")","return","pos_emb"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L205-L252"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"multihead_attn","parameters":"(q, k, v, attn_mask, d_model, n_head, d_head, dropout,\n dropatt, is_training, kernel_initializer, residual=True,\n scope='abs_attn', reuse=None)","argument_list":"","return_statement":"return output","docstring":"Standard multi-head attention with absolute positional embedding.","docstring_summary":"Standard multi-head attention with absolute positional embedding.","docstring_tokens":["Standard","multi","-","head","attention","with","absolute","positional","embedding","."],"function":"def multihead_attn(q, k, v, attn_mask, d_model, n_head, d_head, dropout,\n dropatt, is_training, kernel_initializer, residual=True,\n scope='abs_attn', reuse=None):\n \"\"\"Standard multi-head attention with absolute positional embedding.\"\"\"\n\n scale = 1 \/ (d_head ** 0.5)\n with tf.variable_scope(scope, reuse=reuse):\n # attention heads\n q_head = head_projection(\n q, d_model, n_head, d_head, kernel_initializer, 'q')\n k_head = head_projection(\n k, d_model, n_head, d_head, kernel_initializer, 'k')\n v_head = head_projection(\n v, d_model, n_head, d_head, kernel_initializer, 'v')\n\n # attention vector\n attn_vec = abs_attn_core(q_head, k_head, v_head, attn_mask, dropatt,\n is_training, scale)\n\n # post processing\n output = post_attention(v, attn_vec, d_model, n_head, d_head, dropout,\n is_training, kernel_initializer, residual)\n\n return output","function_tokens":["def","multihead_attn","(","q",",","k",",","v",",","attn_mask",",","d_model",",","n_head",",","d_head",",","dropout",",","dropatt",",","is_training",",","kernel_initializer",",","residual","=","True",",","scope","=","'abs_attn'",",","reuse","=","None",")",":","scale","=","1","\/","(","d_head","**","0.5",")","with","tf",".","variable_scope","(","scope",",","reuse","=","reuse",")",":","# attention heads","q_head","=","head_projection","(","q",",","d_model",",","n_head",",","d_head",",","kernel_initializer",",","'q'",")","k_head","=","head_projection","(","k",",","d_model",",","n_head",",","d_head",",","kernel_initializer",",","'k'",")","v_head","=","head_projection","(","v",",","d_model",",","n_head",",","d_head",",","kernel_initializer",",","'v'",")","# attention vector","attn_vec","=","abs_attn_core","(","q_head",",","k_head",",","v_head",",","attn_mask",",","dropatt",",","is_training",",","scale",")","# post processing","output","=","post_attention","(","v",",","attn_vec",",","d_model",",","n_head",",","d_head",",","dropout",",","is_training",",","kernel_initializer",",","residual",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L255-L278"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"rel_multihead_attn","parameters":"(h, r, r_w_bias, r_r_bias, seg_mat, r_s_bias, seg_embed,\n attn_mask, mems, d_model, n_head, d_head, dropout,\n dropatt, is_training, kernel_initializer,\n scope='rel_attn', reuse=None)","argument_list":"","return_statement":"return output","docstring":"Multi-head attention with relative positional encoding.","docstring_summary":"Multi-head attention with relative positional encoding.","docstring_tokens":["Multi","-","head","attention","with","relative","positional","encoding","."],"function":"def rel_multihead_attn(h, r, r_w_bias, r_r_bias, seg_mat, r_s_bias, seg_embed,\n attn_mask, mems, d_model, n_head, d_head, dropout,\n dropatt, is_training, kernel_initializer,\n scope='rel_attn', reuse=None):\n \"\"\"Multi-head attention with relative positional encoding.\"\"\"\n\n scale = 1 \/ (d_head ** 0.5)\n with tf.variable_scope(scope, reuse=reuse):\n if mems is not None and mems.shape.ndims > 1:\n cat = tf.concat([mems, h], 0)\n else:\n cat = h\n\n # content heads\n q_head_h = head_projection(\n h, d_model, n_head, d_head, kernel_initializer, 'q')\n k_head_h = head_projection(\n cat, d_model, n_head, d_head, kernel_initializer, 'k')\n v_head_h = head_projection(\n cat, d_model, n_head, d_head, kernel_initializer, 'v')\n\n # positional heads\n k_head_r = head_projection(\n r, d_model, n_head, d_head, kernel_initializer, 'r')\n\n # core attention ops\n attn_vec = rel_attn_core(\n q_head_h, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias,\n r_r_bias, r_s_bias, attn_mask, dropatt, is_training, scale)\n\n # post processing\n output = post_attention(h, attn_vec, d_model, n_head, d_head, dropout,\n is_training, kernel_initializer)\n\n return output","function_tokens":["def","rel_multihead_attn","(","h",",","r",",","r_w_bias",",","r_r_bias",",","seg_mat",",","r_s_bias",",","seg_embed",",","attn_mask",",","mems",",","d_model",",","n_head",",","d_head",",","dropout",",","dropatt",",","is_training",",","kernel_initializer",",","scope","=","'rel_attn'",",","reuse","=","None",")",":","scale","=","1","\/","(","d_head","**","0.5",")","with","tf",".","variable_scope","(","scope",",","reuse","=","reuse",")",":","if","mems","is","not","None","and","mems",".","shape",".","ndims",">","1",":","cat","=","tf",".","concat","(","[","mems",",","h","]",",","0",")","else",":","cat","=","h","# content heads","q_head_h","=","head_projection","(","h",",","d_model",",","n_head",",","d_head",",","kernel_initializer",",","'q'",")","k_head_h","=","head_projection","(","cat",",","d_model",",","n_head",",","d_head",",","kernel_initializer",",","'k'",")","v_head_h","=","head_projection","(","cat",",","d_model",",","n_head",",","d_head",",","kernel_initializer",",","'v'",")","# positional heads","k_head_r","=","head_projection","(","r",",","d_model",",","n_head",",","d_head",",","kernel_initializer",",","'r'",")","# core attention ops","attn_vec","=","rel_attn_core","(","q_head_h",",","k_head_h",",","v_head_h",",","k_head_r",",","seg_embed",",","seg_mat",",","r_w_bias",",","r_r_bias",",","r_s_bias",",","attn_mask",",","dropatt",",","is_training",",","scale",")","# post processing","output","=","post_attention","(","h",",","attn_vec",",","d_model",",","n_head",",","d_head",",","dropout",",","is_training",",","kernel_initializer",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L282-L316"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"two_stream_rel_attn","parameters":"(h, g, r, mems, r_w_bias, r_r_bias, seg_mat, r_s_bias,\n seg_embed, attn_mask_h, attn_mask_g, target_mapping,\n d_model, n_head, d_head, dropout, dropatt, is_training,\n kernel_initializer, scope='rel_attn')","argument_list":"","return_statement":"","docstring":"Two-stream attention with relative positional encoding.","docstring_summary":"Two-stream attention with relative positional encoding.","docstring_tokens":["Two","-","stream","attention","with","relative","positional","encoding","."],"function":"def two_stream_rel_attn(h, g, r, mems, r_w_bias, r_r_bias, seg_mat, r_s_bias,\n seg_embed, attn_mask_h, attn_mask_g, target_mapping,\n d_model, n_head, d_head, dropout, dropatt, is_training,\n kernel_initializer, scope='rel_attn'):\n \"\"\"Two-stream attention with relative positional encoding.\"\"\"\n\n scale = 1 \/ (d_head ** 0.5)\n with tf.variable_scope(scope, reuse=False):\n\n # content based attention score\n if mems is not None and mems.shape.ndims > 1:\n cat = tf.concat([mems, h], 0)\n else:\n cat = h\n\n # content-based key head\n k_head_h = head_projection(\n cat, d_model, n_head, d_head, kernel_initializer, 'k')\n\n # content-based value head\n v_head_h = head_projection(\n cat, d_model, n_head, d_head, kernel_initializer, 'v')\n\n # position-based key head\n k_head_r = head_projection(\n r, d_model, n_head, d_head, kernel_initializer, 'r')\n\n ##### h-stream\n # content-stream query head\n q_head_h = head_projection(\n h, d_model, n_head, d_head, kernel_initializer, 'q')\n\n # core attention ops\n attn_vec_h = rel_attn_core(\n q_head_h, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias,\n r_r_bias, r_s_bias, attn_mask_h, dropatt, is_training, scale)\n\n # post processing\n output_h = post_attention(h, attn_vec_h, d_model, n_head, d_head, dropout,\n is_training, kernel_initializer)\n\n with tf.variable_scope(scope, reuse=True):\n ##### g-stream\n # query-stream query head\n q_head_g = head_projection(\n g, d_model, n_head, d_head, kernel_initializer, 'q')\n\n # core attention ops\n if target_mapping is not None:\n q_head_g = tf.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping)\n attn_vec_g = rel_attn_core(\n q_head_g, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias,\n r_r_bias, r_s_bias, attn_mask_g, dropatt, is_training, scale)\n attn_vec_g = tf.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping)\n else:\n attn_vec_g = rel_attn_core(\n q_head_g, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias,\n r_r_bias, r_s_bias, attn_mask_g, dropatt, is_training, scale)\n\n # post processing\n output_g = post_attention(g, attn_vec_g, d_model, n_head, d_head, dropout,\n is_training, kernel_initializer)\n\n return output_h, output_g","function_tokens":["def","two_stream_rel_attn","(","h",",","g",",","r",",","mems",",","r_w_bias",",","r_r_bias",",","seg_mat",",","r_s_bias",",","seg_embed",",","attn_mask_h",",","attn_mask_g",",","target_mapping",",","d_model",",","n_head",",","d_head",",","dropout",",","dropatt",",","is_training",",","kernel_initializer",",","scope","=","'rel_attn'",")",":","scale","=","1","\/","(","d_head","**","0.5",")","with","tf",".","variable_scope","(","scope",",","reuse","=","False",")",":","# content based attention score","if","mems","is","not","None","and","mems",".","shape",".","ndims",">","1",":","cat","=","tf",".","concat","(","[","mems",",","h","]",",","0",")","else",":","cat","=","h","# content-based key head","k_head_h","=","head_projection","(","cat",",","d_model",",","n_head",",","d_head",",","kernel_initializer",",","'k'",")","# content-based value head","v_head_h","=","head_projection","(","cat",",","d_model",",","n_head",",","d_head",",","kernel_initializer",",","'v'",")","# position-based key head","k_head_r","=","head_projection","(","r",",","d_model",",","n_head",",","d_head",",","kernel_initializer",",","'r'",")","##### h-stream","# content-stream query head","q_head_h","=","head_projection","(","h",",","d_model",",","n_head",",","d_head",",","kernel_initializer",",","'q'",")","# core attention ops","attn_vec_h","=","rel_attn_core","(","q_head_h",",","k_head_h",",","v_head_h",",","k_head_r",",","seg_embed",",","seg_mat",",","r_w_bias",",","r_r_bias",",","r_s_bias",",","attn_mask_h",",","dropatt",",","is_training",",","scale",")","# post processing","output_h","=","post_attention","(","h",",","attn_vec_h",",","d_model",",","n_head",",","d_head",",","dropout",",","is_training",",","kernel_initializer",")","with","tf",".","variable_scope","(","scope",",","reuse","=","True",")",":","##### g-stream","# query-stream query head","q_head_g","=","head_projection","(","g",",","d_model",",","n_head",",","d_head",",","kernel_initializer",",","'q'",")","# core attention ops","if","target_mapping","is","not","None",":","q_head_g","=","tf",".","einsum","(","'mbnd,mlb->lbnd'",",","q_head_g",",","target_mapping",")","attn_vec_g","=","rel_attn_core","(","q_head_g",",","k_head_h",",","v_head_h",",","k_head_r",",","seg_embed",",","seg_mat",",","r_w_bias",",","r_r_bias",",","r_s_bias",",","attn_mask_g",",","dropatt",",","is_training",",","scale",")","attn_vec_g","=","tf",".","einsum","(","'lbnd,mlb->mbnd'",",","attn_vec_g",",","target_mapping",")","else",":","attn_vec_g","=","rel_attn_core","(","q_head_g",",","k_head_h",",","v_head_h",",","k_head_r",",","seg_embed",",","seg_mat",",","r_w_bias",",","r_r_bias",",","r_s_bias",",","attn_mask_g",",","dropatt",",","is_training",",","scale",")","# post processing","output_g","=","post_attention","(","g",",","attn_vec_g",",","d_model",",","n_head",",","d_head",",","dropout",",","is_training",",","kernel_initializer",")","return","output_h",",","output_g"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L319-L382"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"transformer_xl","parameters":"(inp_k, n_token, n_layer, d_model, n_head,\n d_head, d_inner, dropout, dropatt, attn_type,\n bi_data, initializer, is_training, mem_len=None,\n inp_q=None, mems=None,\n same_length=False, clamp_len=-1, untie_r=False,\n use_tpu=True, input_mask=None,\n perm_mask=None, seg_id=None, reuse_len=None,\n ff_activation='relu', target_mapping=None,\n use_bfloat16=False, scope='transformer', **kwargs)","argument_list":"","return_statement":"","docstring":"Defines a Transformer-XL computation graph with additional\n support for XLNet.\n\n Args:\n\n inp_k: int32 Tensor in shape [len, bsz], the input token IDs.\n seg_id: int32 Tensor in shape [len, bsz], the input segment IDs.\n input_mask: float32 Tensor in shape [len, bsz], the input mask.\n 0 for real tokens and 1 for padding.\n mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory\n from previous batches. The length of the list equals n_layer.\n If None, no memory is used.\n perm_mask: float32 Tensor in shape [len, len, bsz].\n If perm_mask[i, j, k] = 0, i attend to j in batch k;\n if perm_mask[i, j, k] = 1, i does not attend to j in batch k.\n If None, each position attends to all the others.\n target_mapping: float32 Tensor in shape [num_predict, len, bsz].\n If target_mapping[i, j, k] = 1, the i-th predict in batch k is\n on the j-th token.\n Only used during pretraining for partial prediction.\n Set to None during finetuning.\n inp_q: float32 Tensor in shape [len, bsz].\n 1 for tokens with losses and 0 for tokens without losses.\n Only used during pretraining for two-stream attention.\n Set to None during finetuning.\n\n n_layer: int, the number of layers.\n d_model: int, the hidden size.\n n_head: int, the number of attention heads.\n d_head: int, the dimension size of each attention head.\n d_inner: int, the hidden size in feed-forward layers.\n ff_activation: str, \"relu\" or \"gelu\".\n untie_r: bool, whether to untie the biases in attention.\n n_token: int, the vocab size.\n\n is_training: bool, whether in training mode.\n use_tpu: bool, whether TPUs are used.\n use_bfloat16: bool, use bfloat16 instead of float32.\n dropout: float, dropout rate.\n dropatt: float, dropout rate on attention probabilities.\n init: str, the initialization scheme, either \"normal\" or \"uniform\".\n init_range: float, initialize the parameters with a uniform distribution\n in [-init_range, init_range]. Only effective when init=\"uniform\".\n init_std: float, initialize the parameters with a normal distribution\n with mean 0 and stddev init_std. Only effective when init=\"normal\".\n mem_len: int, the number of tokens to cache.\n reuse_len: int, the number of tokens in the currect batch to be cached\n and reused in the future.\n bi_data: bool, whether to use bidirectional input pipeline.\n Usually set to True during pretraining and False during finetuning.\n clamp_len: int, clamp all relative distances larger than clamp_len.\n -1 means no clamping.\n same_length: bool, whether to use the same attention length for each token.\n summary_type: str, \"last\", \"first\", \"mean\", or \"attn\". The method\n to pool the input to get a vector representation.\n initializer: A tf initializer.\n scope: scope name for the computation graph.","docstring_summary":"Defines a Transformer-XL computation graph with additional\n support for XLNet.","docstring_tokens":["Defines","a","Transformer","-","XL","computation","graph","with","additional","support","for","XLNet","."],"function":"def transformer_xl(inp_k, n_token, n_layer, d_model, n_head,\n d_head, d_inner, dropout, dropatt, attn_type,\n bi_data, initializer, is_training, mem_len=None,\n inp_q=None, mems=None,\n same_length=False, clamp_len=-1, untie_r=False,\n use_tpu=True, input_mask=None,\n perm_mask=None, seg_id=None, reuse_len=None,\n ff_activation='relu', target_mapping=None,\n use_bfloat16=False, scope='transformer', **kwargs):\n \"\"\"\n Defines a Transformer-XL computation graph with additional\n support for XLNet.\n\n Args:\n\n inp_k: int32 Tensor in shape [len, bsz], the input token IDs.\n seg_id: int32 Tensor in shape [len, bsz], the input segment IDs.\n input_mask: float32 Tensor in shape [len, bsz], the input mask.\n 0 for real tokens and 1 for padding.\n mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory\n from previous batches. The length of the list equals n_layer.\n If None, no memory is used.\n perm_mask: float32 Tensor in shape [len, len, bsz].\n If perm_mask[i, j, k] = 0, i attend to j in batch k;\n if perm_mask[i, j, k] = 1, i does not attend to j in batch k.\n If None, each position attends to all the others.\n target_mapping: float32 Tensor in shape [num_predict, len, bsz].\n If target_mapping[i, j, k] = 1, the i-th predict in batch k is\n on the j-th token.\n Only used during pretraining for partial prediction.\n Set to None during finetuning.\n inp_q: float32 Tensor in shape [len, bsz].\n 1 for tokens with losses and 0 for tokens without losses.\n Only used during pretraining for two-stream attention.\n Set to None during finetuning.\n\n n_layer: int, the number of layers.\n d_model: int, the hidden size.\n n_head: int, the number of attention heads.\n d_head: int, the dimension size of each attention head.\n d_inner: int, the hidden size in feed-forward layers.\n ff_activation: str, \"relu\" or \"gelu\".\n untie_r: bool, whether to untie the biases in attention.\n n_token: int, the vocab size.\n\n is_training: bool, whether in training mode.\n use_tpu: bool, whether TPUs are used.\n use_bfloat16: bool, use bfloat16 instead of float32.\n dropout: float, dropout rate.\n dropatt: float, dropout rate on attention probabilities.\n init: str, the initialization scheme, either \"normal\" or \"uniform\".\n init_range: float, initialize the parameters with a uniform distribution\n in [-init_range, init_range]. Only effective when init=\"uniform\".\n init_std: float, initialize the parameters with a normal distribution\n with mean 0 and stddev init_std. Only effective when init=\"normal\".\n mem_len: int, the number of tokens to cache.\n reuse_len: int, the number of tokens in the currect batch to be cached\n and reused in the future.\n bi_data: bool, whether to use bidirectional input pipeline.\n Usually set to True during pretraining and False during finetuning.\n clamp_len: int, clamp all relative distances larger than clamp_len.\n -1 means no clamping.\n same_length: bool, whether to use the same attention length for each token.\n summary_type: str, \"last\", \"first\", \"mean\", or \"attn\". The method\n to pool the input to get a vector representation.\n initializer: A tf initializer.\n scope: scope name for the computation graph.\n \"\"\"\n tf.logging.info('memory input {}'.format(mems))\n tf_float = tf.bfloat16 if use_bfloat16 else tf.float32\n tf.logging.info('Use float type {}'.format(tf_float))\n\n new_mems = []\n with tf.variable_scope(scope):\n if untie_r:\n r_w_bias = tf.get_variable('r_w_bias', [n_layer, n_head, d_head],\n dtype=tf_float, initializer=initializer)\n r_r_bias = tf.get_variable('r_r_bias', [n_layer, n_head, d_head],\n dtype=tf_float, initializer=initializer)\n else:\n r_w_bias = tf.get_variable('r_w_bias', [n_head, d_head],\n dtype=tf_float, initializer=initializer)\n r_r_bias = tf.get_variable('r_r_bias', [n_head, d_head],\n dtype=tf_float, initializer=initializer)\n\n bsz = tf.shape(inp_k)[1]\n qlen = tf.shape(inp_k)[0]\n mlen = tf.shape(mems[0])[0] if mems is not None else 0\n klen = mlen + qlen\n\n ##### Attention mask\n # causal attention mask\n if attn_type == 'uni':\n attn_mask = _create_mask(qlen, mlen, tf_float, same_length)\n attn_mask = attn_mask[:, :, None, None]\n elif attn_type == 'bi':\n attn_mask = None\n else:\n raise ValueError('Unsupported attention type: {}'.format(attn_type))\n\n # data mask: input mask & perm mask\n if input_mask is not None and perm_mask is not None:\n data_mask = input_mask[None] + perm_mask\n elif input_mask is not None and perm_mask is None:\n data_mask = input_mask[None]\n elif input_mask is None and perm_mask is not None:\n data_mask = perm_mask\n else:\n data_mask = None\n\n if data_mask is not None:\n # all mems can be attended to\n mems_mask = tf.zeros([tf.shape(data_mask)[0], mlen, bsz],\n dtype=tf_float)\n data_mask = tf.concat([mems_mask, data_mask], 1)\n if attn_mask is None:\n attn_mask = data_mask[:, :, :, None]\n else:\n attn_mask += data_mask[:, :, :, None]\n\n if attn_mask is not None:\n attn_mask = tf.cast(attn_mask > 0, dtype=tf_float)\n\n if attn_mask is not None:\n non_tgt_mask = -tf.eye(qlen, dtype=tf_float)\n non_tgt_mask = tf.concat([tf.zeros([qlen, mlen], dtype=tf_float),\n non_tgt_mask], axis=-1)\n non_tgt_mask = tf.cast((attn_mask + non_tgt_mask[:, :, None, None]) > 0,\n dtype=tf_float)\n else:\n non_tgt_mask = None\n\n ##### Word embedding\n word_emb_k, lookup_table = embedding_lookup(\n x=inp_k,\n n_token=n_token,\n d_embed=d_model,\n initializer=initializer,\n use_tpu=use_tpu,\n dtype=tf_float,\n scope='word_embedding')\n\n if inp_q is not None:\n with tf.variable_scope('mask_emb'):\n mask_emb = tf.get_variable('mask_emb', [1, 1, d_model], dtype=tf_float)\n if target_mapping is not None:\n word_emb_q = tf.tile(mask_emb, [tf.shape(target_mapping)[0], bsz, 1])\n else:\n inp_q_ext = inp_q[:, :, None]\n word_emb_q = inp_q_ext * mask_emb + (1 - inp_q_ext) * word_emb_k\n output_h = tf.layers.dropout(word_emb_k, dropout, training=is_training)\n if inp_q is not None:\n output_g = tf.layers.dropout(word_emb_q, dropout, training=is_training)\n\n ##### Segment embedding\n if seg_id is not None:\n if untie_r:\n r_s_bias = tf.get_variable('r_s_bias', [n_layer, n_head, d_head],\n dtype=tf_float, initializer=initializer)\n else:\n # default case (tie)\n r_s_bias = tf.get_variable('r_s_bias', [n_head, d_head],\n dtype=tf_float, initializer=initializer)\n\n seg_embed = tf.get_variable('seg_embed', [n_layer, 2, n_head, d_head],\n dtype=tf_float, initializer=initializer)\n\n # Convert `seg_id` to one-hot `seg_mat`\n mem_pad = tf.zeros([mlen, bsz], dtype=tf.int32)\n cat_ids = tf.concat([mem_pad, seg_id], 0)\n\n # `1` indicates not in the same segment [qlen x klen x bsz]\n seg_mat = tf.cast(\n tf.logical_not(tf.equal(seg_id[:, None], cat_ids[None, :])),\n tf.int32)\n seg_mat = tf.one_hot(seg_mat, 2, dtype=tf_float)\n else:\n seg_mat = None\n\n ##### Positional encoding\n pos_emb = relative_positional_encoding(\n qlen, klen, d_model, clamp_len, attn_type, bi_data,\n bsz=bsz, dtype=tf_float)\n pos_emb = tf.layers.dropout(pos_emb, dropout, training=is_training)\n\n ##### Attention layers\n if mems is None:\n mems = [None] * n_layer\n\n for i in range(n_layer):\n # cache new mems\n new_mems.append(_cache_mem(output_h, mems[i], mem_len, reuse_len))\n\n # segment bias\n if seg_id is None:\n r_s_bias_i = None\n seg_embed_i = None\n else:\n r_s_bias_i = r_s_bias if not untie_r else r_s_bias[i]\n seg_embed_i = seg_embed[i]\n\n with tf.variable_scope('layer_{}'.format(i)):\n if inp_q is not None:\n output_h, output_g = two_stream_rel_attn(\n h=output_h,\n g=output_g,\n r=pos_emb,\n r_w_bias=r_w_bias if not untie_r else r_w_bias[i],\n r_r_bias=r_r_bias if not untie_r else r_r_bias[i],\n seg_mat=seg_mat,\n r_s_bias=r_s_bias_i,\n seg_embed=seg_embed_i,\n attn_mask_h=non_tgt_mask,\n attn_mask_g=attn_mask,\n mems=mems[i],\n target_mapping=target_mapping,\n d_model=d_model,\n n_head=n_head,\n d_head=d_head,\n dropout=dropout,\n dropatt=dropatt,\n is_training=is_training,\n kernel_initializer=initializer)\n reuse = True\n else:\n reuse = False\n\n output_h = rel_multihead_attn(\n h=output_h,\n r=pos_emb,\n r_w_bias=r_w_bias if not untie_r else r_w_bias[i],\n r_r_bias=r_r_bias if not untie_r else r_r_bias[i],\n seg_mat=seg_mat,\n r_s_bias=r_s_bias_i,\n seg_embed=seg_embed_i,\n attn_mask=non_tgt_mask,\n mems=mems[i],\n d_model=d_model,\n n_head=n_head,\n d_head=d_head,\n dropout=dropout,\n dropatt=dropatt,\n is_training=is_training,\n kernel_initializer=initializer,\n reuse=reuse)\n\n if inp_q is not None:\n output_g = positionwise_ffn(\n inp=output_g,\n d_model=d_model,\n d_inner=d_inner,\n dropout=dropout,\n kernel_initializer=initializer,\n activation_type=ff_activation,\n is_training=is_training)\n\n output_h = positionwise_ffn(\n inp=output_h,\n d_model=d_model,\n d_inner=d_inner,\n dropout=dropout,\n kernel_initializer=initializer,\n activation_type=ff_activation,\n is_training=is_training,\n reuse=reuse)\n\n if inp_q is not None:\n output = tf.layers.dropout(output_g, dropout, training=is_training)\n else:\n output = tf.layers.dropout(output_h, dropout, training=is_training)\n\n return output, new_mems, lookup_table","function_tokens":["def","transformer_xl","(","inp_k",",","n_token",",","n_layer",",","d_model",",","n_head",",","d_head",",","d_inner",",","dropout",",","dropatt",",","attn_type",",","bi_data",",","initializer",",","is_training",",","mem_len","=","None",",","inp_q","=","None",",","mems","=","None",",","same_length","=","False",",","clamp_len","=","-","1",",","untie_r","=","False",",","use_tpu","=","True",",","input_mask","=","None",",","perm_mask","=","None",",","seg_id","=","None",",","reuse_len","=","None",",","ff_activation","=","'relu'",",","target_mapping","=","None",",","use_bfloat16","=","False",",","scope","=","'transformer'",",","*","*","kwargs",")",":","tf",".","logging",".","info","(","'memory input {}'",".","format","(","mems",")",")","tf_float","=","tf",".","bfloat16","if","use_bfloat16","else","tf",".","float32","tf",".","logging",".","info","(","'Use float type {}'",".","format","(","tf_float",")",")","new_mems","=","[","]","with","tf",".","variable_scope","(","scope",")",":","if","untie_r",":","r_w_bias","=","tf",".","get_variable","(","'r_w_bias'",",","[","n_layer",",","n_head",",","d_head","]",",","dtype","=","tf_float",",","initializer","=","initializer",")","r_r_bias","=","tf",".","get_variable","(","'r_r_bias'",",","[","n_layer",",","n_head",",","d_head","]",",","dtype","=","tf_float",",","initializer","=","initializer",")","else",":","r_w_bias","=","tf",".","get_variable","(","'r_w_bias'",",","[","n_head",",","d_head","]",",","dtype","=","tf_float",",","initializer","=","initializer",")","r_r_bias","=","tf",".","get_variable","(","'r_r_bias'",",","[","n_head",",","d_head","]",",","dtype","=","tf_float",",","initializer","=","initializer",")","bsz","=","tf",".","shape","(","inp_k",")","[","1","]","qlen","=","tf",".","shape","(","inp_k",")","[","0","]","mlen","=","tf",".","shape","(","mems","[","0","]",")","[","0","]","if","mems","is","not","None","else","0","klen","=","mlen","+","qlen","##### Attention mask","# causal attention mask","if","attn_type","==","'uni'",":","attn_mask","=","_create_mask","(","qlen",",","mlen",",","tf_float",",","same_length",")","attn_mask","=","attn_mask","[",":",",",":",",","None",",","None","]","elif","attn_type","==","'bi'",":","attn_mask","=","None","else",":","raise","ValueError","(","'Unsupported attention type: {}'",".","format","(","attn_type",")",")","# data mask: input mask & perm mask","if","input_mask","is","not","None","and","perm_mask","is","not","None",":","data_mask","=","input_mask","[","None","]","+","perm_mask","elif","input_mask","is","not","None","and","perm_mask","is","None",":","data_mask","=","input_mask","[","None","]","elif","input_mask","is","None","and","perm_mask","is","not","None",":","data_mask","=","perm_mask","else",":","data_mask","=","None","if","data_mask","is","not","None",":","# all mems can be attended to","mems_mask","=","tf",".","zeros","(","[","tf",".","shape","(","data_mask",")","[","0","]",",","mlen",",","bsz","]",",","dtype","=","tf_float",")","data_mask","=","tf",".","concat","(","[","mems_mask",",","data_mask","]",",","1",")","if","attn_mask","is","None",":","attn_mask","=","data_mask","[",":",",",":",",",":",",","None","]","else",":","attn_mask","+=","data_mask","[",":",",",":",",",":",",","None","]","if","attn_mask","is","not","None",":","attn_mask","=","tf",".","cast","(","attn_mask",">","0",",","dtype","=","tf_float",")","if","attn_mask","is","not","None",":","non_tgt_mask","=","-","tf",".","eye","(","qlen",",","dtype","=","tf_float",")","non_tgt_mask","=","tf",".","concat","(","[","tf",".","zeros","(","[","qlen",",","mlen","]",",","dtype","=","tf_float",")",",","non_tgt_mask","]",",","axis","=","-","1",")","non_tgt_mask","=","tf",".","cast","(","(","attn_mask","+","non_tgt_mask","[",":",",",":",",","None",",","None","]",")",">","0",",","dtype","=","tf_float",")","else",":","non_tgt_mask","=","None","##### Word embedding","word_emb_k",",","lookup_table","=","embedding_lookup","(","x","=","inp_k",",","n_token","=","n_token",",","d_embed","=","d_model",",","initializer","=","initializer",",","use_tpu","=","use_tpu",",","dtype","=","tf_float",",","scope","=","'word_embedding'",")","if","inp_q","is","not","None",":","with","tf",".","variable_scope","(","'mask_emb'",")",":","mask_emb","=","tf",".","get_variable","(","'mask_emb'",",","[","1",",","1",",","d_model","]",",","dtype","=","tf_float",")","if","target_mapping","is","not","None",":","word_emb_q","=","tf",".","tile","(","mask_emb",",","[","tf",".","shape","(","target_mapping",")","[","0","]",",","bsz",",","1","]",")","else",":","inp_q_ext","=","inp_q","[",":",",",":",",","None","]","word_emb_q","=","inp_q_ext","*","mask_emb","+","(","1","-","inp_q_ext",")","*","word_emb_k","output_h","=","tf",".","layers",".","dropout","(","word_emb_k",",","dropout",",","training","=","is_training",")","if","inp_q","is","not","None",":","output_g","=","tf",".","layers",".","dropout","(","word_emb_q",",","dropout",",","training","=","is_training",")","##### Segment embedding","if","seg_id","is","not","None",":","if","untie_r",":","r_s_bias","=","tf",".","get_variable","(","'r_s_bias'",",","[","n_layer",",","n_head",",","d_head","]",",","dtype","=","tf_float",",","initializer","=","initializer",")","else",":","# default case (tie)","r_s_bias","=","tf",".","get_variable","(","'r_s_bias'",",","[","n_head",",","d_head","]",",","dtype","=","tf_float",",","initializer","=","initializer",")","seg_embed","=","tf",".","get_variable","(","'seg_embed'",",","[","n_layer",",","2",",","n_head",",","d_head","]",",","dtype","=","tf_float",",","initializer","=","initializer",")","# Convert `seg_id` to one-hot `seg_mat`","mem_pad","=","tf",".","zeros","(","[","mlen",",","bsz","]",",","dtype","=","tf",".","int32",")","cat_ids","=","tf",".","concat","(","[","mem_pad",",","seg_id","]",",","0",")","# `1` indicates not in the same segment [qlen x klen x bsz]","seg_mat","=","tf",".","cast","(","tf",".","logical_not","(","tf",".","equal","(","seg_id","[",":",",","None","]",",","cat_ids","[","None",",",":","]",")",")",",","tf",".","int32",")","seg_mat","=","tf",".","one_hot","(","seg_mat",",","2",",","dtype","=","tf_float",")","else",":","seg_mat","=","None","##### Positional encoding","pos_emb","=","relative_positional_encoding","(","qlen",",","klen",",","d_model",",","clamp_len",",","attn_type",",","bi_data",",","bsz","=","bsz",",","dtype","=","tf_float",")","pos_emb","=","tf",".","layers",".","dropout","(","pos_emb",",","dropout",",","training","=","is_training",")","##### Attention layers","if","mems","is","None",":","mems","=","[","None","]","*","n_layer","for","i","in","range","(","n_layer",")",":","# cache new mems","new_mems",".","append","(","_cache_mem","(","output_h",",","mems","[","i","]",",","mem_len",",","reuse_len",")",")","# segment bias","if","seg_id","is","None",":","r_s_bias_i","=","None","seg_embed_i","=","None","else",":","r_s_bias_i","=","r_s_bias","if","not","untie_r","else","r_s_bias","[","i","]","seg_embed_i","=","seg_embed","[","i","]","with","tf",".","variable_scope","(","'layer_{}'",".","format","(","i",")",")",":","if","inp_q","is","not","None",":","output_h",",","output_g","=","two_stream_rel_attn","(","h","=","output_h",",","g","=","output_g",",","r","=","pos_emb",",","r_w_bias","=","r_w_bias","if","not","untie_r","else","r_w_bias","[","i","]",",","r_r_bias","=","r_r_bias","if","not","untie_r","else","r_r_bias","[","i","]",",","seg_mat","=","seg_mat",",","r_s_bias","=","r_s_bias_i",",","seg_embed","=","seg_embed_i",",","attn_mask_h","=","non_tgt_mask",",","attn_mask_g","=","attn_mask",",","mems","=","mems","[","i","]",",","target_mapping","=","target_mapping",",","d_model","=","d_model",",","n_head","=","n_head",",","d_head","=","d_head",",","dropout","=","dropout",",","dropatt","=","dropatt",",","is_training","=","is_training",",","kernel_initializer","=","initializer",")","reuse","=","True","else",":","reuse","=","False","output_h","=","rel_multihead_attn","(","h","=","output_h",",","r","=","pos_emb",",","r_w_bias","=","r_w_bias","if","not","untie_r","else","r_w_bias","[","i","]",",","r_r_bias","=","r_r_bias","if","not","untie_r","else","r_r_bias","[","i","]",",","seg_mat","=","seg_mat",",","r_s_bias","=","r_s_bias_i",",","seg_embed","=","seg_embed_i",",","attn_mask","=","non_tgt_mask",",","mems","=","mems","[","i","]",",","d_model","=","d_model",",","n_head","=","n_head",",","d_head","=","d_head",",","dropout","=","dropout",",","dropatt","=","dropatt",",","is_training","=","is_training",",","kernel_initializer","=","initializer",",","reuse","=","reuse",")","if","inp_q","is","not","None",":","output_g","=","positionwise_ffn","(","inp","=","output_g",",","d_model","=","d_model",",","d_inner","=","d_inner",",","dropout","=","dropout",",","kernel_initializer","=","initializer",",","activation_type","=","ff_activation",",","is_training","=","is_training",")","output_h","=","positionwise_ffn","(","inp","=","output_h",",","d_model","=","d_model",",","d_inner","=","d_inner",",","dropout","=","dropout",",","kernel_initializer","=","initializer",",","activation_type","=","ff_activation",",","is_training","=","is_training",",","reuse","=","reuse",")","if","inp_q","is","not","None",":","output","=","tf",".","layers",".","dropout","(","output_g",",","dropout",",","training","=","is_training",")","else",":","output","=","tf",".","layers",".","dropout","(","output_h",",","dropout",",","training","=","is_training",")","return","output",",","new_mems",",","lookup_table"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L385-L656"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"lm_loss","parameters":"(hidden, target, n_token, d_model, initializer, lookup_table=None,\n tie_weight=False, bi_data=True, use_tpu=False)","argument_list":"","return_statement":"","docstring":"doc.","docstring_summary":"doc.","docstring_tokens":["doc","."],"function":"def lm_loss(hidden, target, n_token, d_model, initializer, lookup_table=None,\n tie_weight=False, bi_data=True, use_tpu=False):\n \"\"\"doc.\"\"\"\n\n with tf.variable_scope('lm_loss'):\n if tie_weight:\n assert lookup_table is not None, \\\n 'lookup_table cannot be None for tie_weight'\n softmax_w = lookup_table\n else:\n softmax_w = tf.get_variable('weight', [n_token, d_model],\n dtype=hidden.dtype, initializer=initializer)\n\n softmax_b = tf.get_variable('bias', [n_token], dtype=hidden.dtype,\n initializer=tf.zeros_initializer())\n\n logits = tf.einsum('ibd,nd->ibn', hidden, softmax_w) + softmax_b\n\n if use_tpu:\n one_hot_target = tf.one_hot(target, n_token, dtype=logits.dtype)\n loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * one_hot_target, -1)\n else:\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target,\n logits=logits)\n\n return loss","function_tokens":["def","lm_loss","(","hidden",",","target",",","n_token",",","d_model",",","initializer",",","lookup_table","=","None",",","tie_weight","=","False",",","bi_data","=","True",",","use_tpu","=","False",")",":","with","tf",".","variable_scope","(","'lm_loss'",")",":","if","tie_weight",":","assert","lookup_table","is","not","None",",","'lookup_table cannot be None for tie_weight'","softmax_w","=","lookup_table","else",":","softmax_w","=","tf",".","get_variable","(","'weight'",",","[","n_token",",","d_model","]",",","dtype","=","hidden",".","dtype",",","initializer","=","initializer",")","softmax_b","=","tf",".","get_variable","(","'bias'",",","[","n_token","]",",","dtype","=","hidden",".","dtype",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","einsum","(","'ibd,nd->ibn'",",","hidden",",","softmax_w",")","+","softmax_b","if","use_tpu",":","one_hot_target","=","tf",".","one_hot","(","target",",","n_token",",","dtype","=","logits",".","dtype",")","loss","=","-","tf",".","reduce_sum","(","tf",".","nn",".","log_softmax","(","logits",")","*","one_hot_target",",","-","1",")","else",":","loss","=","tf",".","nn",".","sparse_softmax_cross_entropy_with_logits","(","labels","=","target",",","logits","=","logits",")","return","loss"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L659-L684"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"summarize_sequence","parameters":"(summary_type, hidden, d_model, n_head, d_head, dropout,\n dropatt, input_mask, is_training, initializer,\n scope=None, reuse=None, use_proj=True)","argument_list":"","return_statement":"return summary","docstring":"Different classification tasks may not may not share the same parameters\n to summarize the sequence features.\n\n If shared, one can keep the `scope` to the default value `None`.\n Otherwise, one should specify a different `scope` for each task.","docstring_summary":"Different classification tasks may not may not share the same parameters\n to summarize the sequence features.","docstring_tokens":["Different","classification","tasks","may","not","may","not","share","the","same","parameters","to","summarize","the","sequence","features","."],"function":"def summarize_sequence(summary_type, hidden, d_model, n_head, d_head, dropout,\n dropatt, input_mask, is_training, initializer,\n scope=None, reuse=None, use_proj=True):\n\n \"\"\"\n Different classification tasks may not may not share the same parameters\n to summarize the sequence features.\n\n If shared, one can keep the `scope` to the default value `None`.\n Otherwise, one should specify a different `scope` for each task.\n \"\"\"\n\n with tf.variable_scope(scope, 'sequnece_summary', reuse=reuse):\n if summary_type == 'last':\n summary = hidden[-1]\n elif summary_type == 'first':\n summary = hidden[0]\n elif summary_type == 'mean':\n summary = tf.reduce_mean(hidden, axis=0)\n elif summary_type == 'attn':\n bsz = tf.shape(hidden)[1]\n\n summary_bias = tf.get_variable('summary_bias', [d_model],\n dtype=hidden.dtype,\n initializer=initializer)\n summary_bias = tf.tile(summary_bias[None, None], [1, bsz, 1])\n\n if input_mask is not None:\n input_mask = input_mask[None, :, :, None]\n\n summary = multihead_attn(summary_bias, hidden, hidden, input_mask,\n d_model, n_head, d_head, dropout, dropatt,\n is_training, initializer, residual=False)\n summary = summary[0]\n else:\n raise ValueError('Unsupported summary type {}'.format(summary_type))\n\n # use another projection as in BERT\n if use_proj:\n summary = tf.layers.dense(\n summary,\n d_model,\n activation=tf.tanh,\n kernel_initializer=initializer,\n name='summary')\n\n # dropout\n summary = tf.layers.dropout(\n summary, dropout, training=is_training,\n name='dropout')\n\n return summary","function_tokens":["def","summarize_sequence","(","summary_type",",","hidden",",","d_model",",","n_head",",","d_head",",","dropout",",","dropatt",",","input_mask",",","is_training",",","initializer",",","scope","=","None",",","reuse","=","None",",","use_proj","=","True",")",":","with","tf",".","variable_scope","(","scope",",","'sequnece_summary'",",","reuse","=","reuse",")",":","if","summary_type","==","'last'",":","summary","=","hidden","[","-","1","]","elif","summary_type","==","'first'",":","summary","=","hidden","[","0","]","elif","summary_type","==","'mean'",":","summary","=","tf",".","reduce_mean","(","hidden",",","axis","=","0",")","elif","summary_type","==","'attn'",":","bsz","=","tf",".","shape","(","hidden",")","[","1","]","summary_bias","=","tf",".","get_variable","(","'summary_bias'",",","[","d_model","]",",","dtype","=","hidden",".","dtype",",","initializer","=","initializer",")","summary_bias","=","tf",".","tile","(","summary_bias","[","None",",","None","]",",","[","1",",","bsz",",","1","]",")","if","input_mask","is","not","None",":","input_mask","=","input_mask","[","None",",",":",",",":",",","None","]","summary","=","multihead_attn","(","summary_bias",",","hidden",",","hidden",",","input_mask",",","d_model",",","n_head",",","d_head",",","dropout",",","dropatt",",","is_training",",","initializer",",","residual","=","False",")","summary","=","summary","[","0","]","else",":","raise","ValueError","(","'Unsupported summary type {}'",".","format","(","summary_type",")",")","# use another projection as in BERT","if","use_proj",":","summary","=","tf",".","layers",".","dense","(","summary",",","d_model",",","activation","=","tf",".","tanh",",","kernel_initializer","=","initializer",",","name","=","'summary'",")","# dropout","summary","=","tf",".","layers",".","dropout","(","summary",",","dropout",",","training","=","is_training",",","name","=","'dropout'",")","return","summary"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L687-L738"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/modeling.py","language":"python","identifier":"classification_loss","parameters":"(hidden, labels, n_class, initializer, scope, reuse=None,\n return_logits=False)","argument_list":"","return_statement":"","docstring":"Different classification tasks should use different scope names to ensure\n different dense layers (parameters) are used to produce the logits.\n\n An exception will be in transfer learning, where one hopes to transfer\n the classification weights.","docstring_summary":"Different classification tasks should use different scope names to ensure\n different dense layers (parameters) are used to produce the logits.","docstring_tokens":["Different","classification","tasks","should","use","different","scope","names","to","ensure","different","dense","layers","(","parameters",")","are","used","to","produce","the","logits","."],"function":"def classification_loss(hidden, labels, n_class, initializer, scope, reuse=None,\n return_logits=False):\n \"\"\"\n Different classification tasks should use different scope names to ensure\n different dense layers (parameters) are used to produce the logits.\n\n An exception will be in transfer learning, where one hopes to transfer\n the classification weights.\n \"\"\"\n\n with tf.variable_scope(scope, reuse=reuse):\n logits = tf.layers.dense(\n hidden,\n n_class,\n kernel_initializer=initializer,\n name='logit')\n\n one_hot_target = tf.one_hot(labels, n_class, dtype=hidden.dtype)\n loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * one_hot_target, -1)\n\n if return_logits:\n return loss, logits\n\n return loss","function_tokens":["def","classification_loss","(","hidden",",","labels",",","n_class",",","initializer",",","scope",",","reuse","=","None",",","return_logits","=","False",")",":","with","tf",".","variable_scope","(","scope",",","reuse","=","reuse",")",":","logits","=","tf",".","layers",".","dense","(","hidden",",","n_class",",","kernel_initializer","=","initializer",",","name","=","'logit'",")","one_hot_target","=","tf",".","one_hot","(","labels",",","n_class",",","dtype","=","hidden",".","dtype",")","loss","=","-","tf",".","reduce_sum","(","tf",".","nn",".","log_softmax","(","logits",")","*","one_hot_target",",","-","1",")","if","return_logits",":","return","loss",",","logits","return","loss"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/modeling.py#L741-L764"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/xlnet.py","language":"python","identifier":"_get_initializer","parameters":"(FLAGS)","argument_list":"","return_statement":"return initializer","docstring":"Get variable intializer.","docstring_summary":"Get variable intializer.","docstring_tokens":["Get","variable","intializer","."],"function":"def _get_initializer(FLAGS):\n \"\"\"Get variable intializer.\"\"\"\n if FLAGS.init == \"uniform\":\n initializer = tf.initializers.random_uniform(\n minval=-FLAGS.init_range,\n maxval=FLAGS.init_range,\n seed=None)\n elif FLAGS.init == \"normal\":\n initializer = tf.initializers.random_normal(\n stddev=FLAGS.init_std,\n seed=None)\n else:\n raise ValueError(\"Initializer {} not supported\".format(FLAGS.init))\n return initializer","function_tokens":["def","_get_initializer","(","FLAGS",")",":","if","FLAGS",".","init","==","\"uniform\"",":","initializer","=","tf",".","initializers",".","random_uniform","(","minval","=","-","FLAGS",".","init_range",",","maxval","=","FLAGS",".","init_range",",","seed","=","None",")","elif","FLAGS",".","init","==","\"normal\"",":","initializer","=","tf",".","initializers",".","random_normal","(","stddev","=","FLAGS",".","init_std",",","seed","=","None",")","else",":","raise","ValueError","(","\"Initializer {} not supported\"",".","format","(","FLAGS",".","init",")",")","return","initializer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/xlnet.py#L11-L24"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/xlnet.py","language":"python","identifier":"XLNetConfig.__init__","parameters":"(self, FLAGS=None, json_path=None)","argument_list":"","return_statement":"","docstring":"Constructing an XLNetConfig.\n One of FLAGS or json_path should be provided.","docstring_summary":"Constructing an XLNetConfig.\n One of FLAGS or json_path should be provided.","docstring_tokens":["Constructing","an","XLNetConfig",".","One","of","FLAGS","or","json_path","should","be","provided","."],"function":"def __init__(self, FLAGS=None, json_path=None):\n \"\"\"Constructing an XLNetConfig.\n One of FLAGS or json_path should be provided.\"\"\"\n\n assert FLAGS is not None or json_path is not None\n\n self.keys = [\"n_layer\", \"d_model\", \"n_head\", \"d_head\", \"d_inner\",\n \"ff_activation\", \"untie_r\", \"n_token\"]\n\n if FLAGS is not None:\n self.init_from_flags(FLAGS)\n\n if json_path is not None:\n self.init_from_json(json_path)","function_tokens":["def","__init__","(","self",",","FLAGS","=","None",",","json_path","=","None",")",":","assert","FLAGS","is","not","None","or","json_path","is","not","None","self",".","keys","=","[","\"n_layer\"",",","\"d_model\"",",","\"n_head\"",",","\"d_head\"",",","\"d_inner\"",",","\"ff_activation\"",",","\"untie_r\"",",","\"n_token\"","]","if","FLAGS","is","not","None",":","self",".","init_from_flags","(","FLAGS",")","if","json_path","is","not","None",":","self",".","init_from_json","(","json_path",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/xlnet.py#L43-L56"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/xlnet.py","language":"python","identifier":"XLNetConfig.to_json","parameters":"(self, json_path)","argument_list":"","return_statement":"","docstring":"Save XLNetConfig to a json file.","docstring_summary":"Save XLNetConfig to a json file.","docstring_tokens":["Save","XLNetConfig","to","a","json","file","."],"function":"def to_json(self, json_path):\n \"\"\"Save XLNetConfig to a json file.\"\"\"\n json_data = {}\n for key in self.keys:\n json_data[key] = getattr(self, key)\n\n json_dir = os.path.dirname(json_path)\n if not tf.gfile.Exists(json_dir):\n tf.gfile.MakeDirs(json_dir)\n with tf.gfile.Open(json_path, \"w\") as f:\n json.dump(json_data, f, indent=4, sort_keys=True)","function_tokens":["def","to_json","(","self",",","json_path",")",":","json_data","=","{","}","for","key","in","self",".","keys",":","json_data","[","key","]","=","getattr","(","self",",","key",")","json_dir","=","os",".","path",".","dirname","(","json_path",")","if","not","tf",".","gfile",".","Exists","(","json_dir",")",":","tf",".","gfile",".","MakeDirs","(","json_dir",")","with","tf",".","gfile",".","Open","(","json_path",",","\"w\"",")","as","f",":","json",".","dump","(","json_data",",","f",",","indent","=","4",",","sort_keys","=","True",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/xlnet.py#L68-L78"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/xlnet.py","language":"python","identifier":"RunConfig.__init__","parameters":"(self, is_training, use_tpu, use_bfloat16, dropout, dropatt,\n init=\"normal\", init_range=0.1, init_std=0.02, mem_len=None,\n reuse_len=None, bi_data=False, clamp_len=-1, same_length=False)","argument_list":"","return_statement":"","docstring":"Args:\n is_training: bool, whether in training mode.\n use_tpu: bool, whether TPUs are used.\n use_bfloat16: bool, use bfloat16 instead of float32.\n dropout: float, dropout rate.\n dropatt: float, dropout rate on attention probabilities.\n init: str, the initialization scheme, either \"normal\" or \"uniform\".\n init_range: float, initialize the parameters with a uniform distribution\n in [-init_range, init_range]. Only effective when init=\"uniform\".\n init_std: float, initialize the parameters with a normal distribution\n with mean 0 and stddev init_std. Only effective when init=\"normal\".\n mem_len: int, the number of tokens to cache.\n reuse_len: int, the number of tokens in the currect batch to be cached\n and reused in the future.\n bi_data: bool, whether to use bidirectional input pipeline.\n Usually set to True during pretraining and False during finetuning.\n clamp_len: int, clamp all relative distances larger than clamp_len.\n -1 means no clamping.\n same_length: bool, whether to use the same attention length for each token.","docstring_summary":"Args:\n is_training: bool, whether in training mode.\n use_tpu: bool, whether TPUs are used.\n use_bfloat16: bool, use bfloat16 instead of float32.\n dropout: float, dropout rate.\n dropatt: float, dropout rate on attention probabilities.\n init: str, the initialization scheme, either \"normal\" or \"uniform\".\n init_range: float, initialize the parameters with a uniform distribution\n in [-init_range, init_range]. Only effective when init=\"uniform\".\n init_std: float, initialize the parameters with a normal distribution\n with mean 0 and stddev init_std. Only effective when init=\"normal\".\n mem_len: int, the number of tokens to cache.\n reuse_len: int, the number of tokens in the currect batch to be cached\n and reused in the future.\n bi_data: bool, whether to use bidirectional input pipeline.\n Usually set to True during pretraining and False during finetuning.\n clamp_len: int, clamp all relative distances larger than clamp_len.\n -1 means no clamping.\n same_length: bool, whether to use the same attention length for each token.","docstring_tokens":["Args",":","is_training",":","bool","whether","in","training","mode",".","use_tpu",":","bool","whether","TPUs","are","used",".","use_bfloat16",":","bool","use","bfloat16","instead","of","float32",".","dropout",":","float","dropout","rate",".","dropatt",":","float","dropout","rate","on","attention","probabilities",".","init",":","str","the","initialization","scheme","either","normal","or","uniform",".","init_range",":","float","initialize","the","parameters","with","a","uniform","distribution","in","[","-","init_range","init_range","]",".","Only","effective","when","init","=","uniform",".","init_std",":","float","initialize","the","parameters","with","a","normal","distribution","with","mean","0","and","stddev","init_std",".","Only","effective","when","init","=","normal",".","mem_len",":","int","the","number","of","tokens","to","cache",".","reuse_len",":","int","the","number","of","tokens","in","the","currect","batch","to","be","cached","and","reused","in","the","future",".","bi_data",":","bool","whether","to","use","bidirectional","input","pipeline",".","Usually","set","to","True","during","pretraining","and","False","during","finetuning",".","clamp_len",":","int","clamp","all","relative","distances","larger","than","clamp_len",".","-","1","means","no","clamping",".","same_length",":","bool","whether","to","use","the","same","attention","length","for","each","token","."],"function":"def __init__(self, is_training, use_tpu, use_bfloat16, dropout, dropatt,\n init=\"normal\", init_range=0.1, init_std=0.02, mem_len=None,\n reuse_len=None, bi_data=False, clamp_len=-1, same_length=False):\n \"\"\"\n Args:\n is_training: bool, whether in training mode.\n use_tpu: bool, whether TPUs are used.\n use_bfloat16: bool, use bfloat16 instead of float32.\n dropout: float, dropout rate.\n dropatt: float, dropout rate on attention probabilities.\n init: str, the initialization scheme, either \"normal\" or \"uniform\".\n init_range: float, initialize the parameters with a uniform distribution\n in [-init_range, init_range]. Only effective when init=\"uniform\".\n init_std: float, initialize the parameters with a normal distribution\n with mean 0 and stddev init_std. Only effective when init=\"normal\".\n mem_len: int, the number of tokens to cache.\n reuse_len: int, the number of tokens in the currect batch to be cached\n and reused in the future.\n bi_data: bool, whether to use bidirectional input pipeline.\n Usually set to True during pretraining and False during finetuning.\n clamp_len: int, clamp all relative distances larger than clamp_len.\n -1 means no clamping.\n same_length: bool, whether to use the same attention length for each token.\n \"\"\"\n\n self.init = init\n self.init_range = init_range\n self.init_std = init_std\n self.is_training = is_training\n self.dropout = dropout\n self.dropatt = dropatt\n self.use_tpu = use_tpu\n self.use_bfloat16 = use_bfloat16\n self.mem_len = mem_len\n self.reuse_len = reuse_len\n self.bi_data = bi_data\n self.clamp_len = clamp_len\n self.same_length = same_length","function_tokens":["def","__init__","(","self",",","is_training",",","use_tpu",",","use_bfloat16",",","dropout",",","dropatt",",","init","=","\"normal\"",",","init_range","=","0.1",",","init_std","=","0.02",",","mem_len","=","None",",","reuse_len","=","None",",","bi_data","=","False",",","clamp_len","=","-","1",",","same_length","=","False",")",":","self",".","init","=","init","self",".","init_range","=","init_range","self",".","init_std","=","init_std","self",".","is_training","=","is_training","self",".","dropout","=","dropout","self",".","dropatt","=","dropatt","self",".","use_tpu","=","use_tpu","self",".","use_bfloat16","=","use_bfloat16","self",".","mem_len","=","mem_len","self",".","reuse_len","=","reuse_len","self",".","bi_data","=","bi_data","self",".","clamp_len","=","clamp_len","self",".","same_length","=","same_length"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/xlnet.py#L111-L148"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/xlnet.py","language":"python","identifier":"XLNetModel.__init__","parameters":"(self, xlnet_config, run_config, input_ids, seg_ids, input_mask,\n mems=None, perm_mask=None, target_mapping=None, inp_q=None,\n **kwargs)","argument_list":"","return_statement":"","docstring":"Args:\n xlnet_config: XLNetConfig,\n run_config: RunConfig,\n input_ids: int32 Tensor in shape [len, bsz], the input token IDs.\n seg_ids: int32 Tensor in shape [len, bsz], the input segment IDs.\n input_mask: float32 Tensor in shape [len, bsz], the input mask.\n 0 for real tokens and 1 for padding.\n mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory\n from previous batches. The length of the list equals n_layer.\n If None, no memory is used.\n perm_mask: float32 Tensor in shape [len, len, bsz].\n If perm_mask[i, j, k] = 0, i attend to j in batch k;\n if perm_mask[i, j, k] = 1, i does not attend to j in batch k.\n If None, each position attends to all the others.\n target_mapping: float32 Tensor in shape [num_predict, len, bsz].\n If target_mapping[i, j, k] = 1, the i-th predict in batch k is\n on the j-th token.\n Only used during pretraining for partial prediction.\n Set to None during finetuning.\n inp_q: float32 Tensor in shape [len, bsz].\n 1 for tokens with losses and 0 for tokens without losses.\n Only used during pretraining for two-stream attention.\n Set to None during finetuning.","docstring_summary":"Args:\n xlnet_config: XLNetConfig,\n run_config: RunConfig,\n input_ids: int32 Tensor in shape [len, bsz], the input token IDs.\n seg_ids: int32 Tensor in shape [len, bsz], the input segment IDs.\n input_mask: float32 Tensor in shape [len, bsz], the input mask.\n 0 for real tokens and 1 for padding.\n mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory\n from previous batches. The length of the list equals n_layer.\n If None, no memory is used.\n perm_mask: float32 Tensor in shape [len, len, bsz].\n If perm_mask[i, j, k] = 0, i attend to j in batch k;\n if perm_mask[i, j, k] = 1, i does not attend to j in batch k.\n If None, each position attends to all the others.\n target_mapping: float32 Tensor in shape [num_predict, len, bsz].\n If target_mapping[i, j, k] = 1, the i-th predict in batch k is\n on the j-th token.\n Only used during pretraining for partial prediction.\n Set to None during finetuning.\n inp_q: float32 Tensor in shape [len, bsz].\n 1 for tokens with losses and 0 for tokens without losses.\n Only used during pretraining for two-stream attention.\n Set to None during finetuning.","docstring_tokens":["Args",":","xlnet_config",":","XLNetConfig","run_config",":","RunConfig","input_ids",":","int32","Tensor","in","shape","[","len","bsz","]","the","input","token","IDs",".","seg_ids",":","int32","Tensor","in","shape","[","len","bsz","]","the","input","segment","IDs",".","input_mask",":","float32","Tensor","in","shape","[","len","bsz","]","the","input","mask",".","0","for","real","tokens","and","1","for","padding",".","mems",":","a","list","of","float32","Tensors","in","shape","[","mem_len","bsz","d_model","]","memory","from","previous","batches",".","The","length","of","the","list","equals","n_layer",".","If","None","no","memory","is","used",".","perm_mask",":","float32","Tensor","in","shape","[","len","len","bsz","]",".","If","perm_mask","[","i","j","k","]","=","0","i","attend","to","j","in","batch","k",";","if","perm_mask","[","i","j","k","]","=","1","i","does","not","attend","to","j","in","batch","k",".","If","None","each","position","attends","to","all","the","others",".","target_mapping",":","float32","Tensor","in","shape","[","num_predict","len","bsz","]",".","If","target_mapping","[","i","j","k","]","=","1","the","i","-","th","predict","in","batch","k","is","on","the","j","-","th","token",".","Only","used","during","pretraining","for","partial","prediction",".","Set","to","None","during","finetuning",".","inp_q",":","float32","Tensor","in","shape","[","len","bsz","]",".","1","for","tokens","with","losses","and","0","for","tokens","without","losses",".","Only","used","during","pretraining","for","two","-","stream","attention",".","Set","to","None","during","finetuning","."],"function":"def __init__(self, xlnet_config, run_config, input_ids, seg_ids, input_mask,\n mems=None, perm_mask=None, target_mapping=None, inp_q=None,\n **kwargs):\n \"\"\"\n Args:\n xlnet_config: XLNetConfig,\n run_config: RunConfig,\n input_ids: int32 Tensor in shape [len, bsz], the input token IDs.\n seg_ids: int32 Tensor in shape [len, bsz], the input segment IDs.\n input_mask: float32 Tensor in shape [len, bsz], the input mask.\n 0 for real tokens and 1 for padding.\n mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory\n from previous batches. The length of the list equals n_layer.\n If None, no memory is used.\n perm_mask: float32 Tensor in shape [len, len, bsz].\n If perm_mask[i, j, k] = 0, i attend to j in batch k;\n if perm_mask[i, j, k] = 1, i does not attend to j in batch k.\n If None, each position attends to all the others.\n target_mapping: float32 Tensor in shape [num_predict, len, bsz].\n If target_mapping[i, j, k] = 1, the i-th predict in batch k is\n on the j-th token.\n Only used during pretraining for partial prediction.\n Set to None during finetuning.\n inp_q: float32 Tensor in shape [len, bsz].\n 1 for tokens with losses and 0 for tokens without losses.\n Only used during pretraining for two-stream attention.\n Set to None during finetuning.\n \"\"\"\n\n initializer = _get_initializer(run_config)\n\n tfm_args = dict(\n n_token=xlnet_config.n_token,\n initializer=initializer,\n attn_type=\"bi\",\n n_layer=xlnet_config.n_layer,\n d_model=xlnet_config.d_model,\n n_head=xlnet_config.n_head,\n d_head=xlnet_config.d_head,\n d_inner=xlnet_config.d_inner,\n ff_activation=xlnet_config.ff_activation,\n untie_r=xlnet_config.untie_r,\n\n is_training=run_config.is_training,\n use_bfloat16=run_config.use_bfloat16,\n use_tpu=run_config.use_tpu,\n dropout=run_config.dropout,\n dropatt=run_config.dropatt,\n\n mem_len=run_config.mem_len,\n reuse_len=run_config.reuse_len,\n bi_data=run_config.bi_data,\n clamp_len=run_config.clamp_len,\n same_length=run_config.same_length\n )\n\n input_args = dict(\n inp_k=input_ids,\n seg_id=seg_ids,\n input_mask=input_mask,\n mems=mems,\n perm_mask=perm_mask,\n target_mapping=target_mapping,\n inp_q=inp_q)\n tfm_args.update(input_args)\n\n with tf.variable_scope(\"model\", reuse=tf.AUTO_REUSE):\n (self.output, self.new_mems, self.lookup_table\n ) = modeling.transformer_xl(**tfm_args)\n\n self.input_mask = input_mask\n self.initializer = initializer\n self.xlnet_config = xlnet_config\n self.run_config = run_config","function_tokens":["def","__init__","(","self",",","xlnet_config",",","run_config",",","input_ids",",","seg_ids",",","input_mask",",","mems","=","None",",","perm_mask","=","None",",","target_mapping","=","None",",","inp_q","=","None",",","*","*","kwargs",")",":","initializer","=","_get_initializer","(","run_config",")","tfm_args","=","dict","(","n_token","=","xlnet_config",".","n_token",",","initializer","=","initializer",",","attn_type","=","\"bi\"",",","n_layer","=","xlnet_config",".","n_layer",",","d_model","=","xlnet_config",".","d_model",",","n_head","=","xlnet_config",".","n_head",",","d_head","=","xlnet_config",".","d_head",",","d_inner","=","xlnet_config",".","d_inner",",","ff_activation","=","xlnet_config",".","ff_activation",",","untie_r","=","xlnet_config",".","untie_r",",","is_training","=","run_config",".","is_training",",","use_bfloat16","=","run_config",".","use_bfloat16",",","use_tpu","=","run_config",".","use_tpu",",","dropout","=","run_config",".","dropout",",","dropatt","=","run_config",".","dropatt",",","mem_len","=","run_config",".","mem_len",",","reuse_len","=","run_config",".","reuse_len",",","bi_data","=","run_config",".","bi_data",",","clamp_len","=","run_config",".","clamp_len",",","same_length","=","run_config",".","same_length",")","input_args","=","dict","(","inp_k","=","input_ids",",","seg_id","=","seg_ids",",","input_mask","=","input_mask",",","mems","=","mems",",","perm_mask","=","perm_mask",",","target_mapping","=","target_mapping",",","inp_q","=","inp_q",")","tfm_args",".","update","(","input_args",")","with","tf",".","variable_scope","(","\"model\"",",","reuse","=","tf",".","AUTO_REUSE",")",":","(","self",".","output",",","self",".","new_mems",",","self",".","lookup_table",")","=","modeling",".","transformer_xl","(","*","*","tfm_args",")","self",".","input_mask","=","input_mask","self",".","initializer","=","initializer","self",".","xlnet_config","=","xlnet_config","self",".","run_config","=","run_config"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/xlnet.py#L154-L227"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/xlnet.py","language":"python","identifier":"XLNetModel.get_pooled_out","parameters":"(self, summary_type, use_summ_proj=True)","argument_list":"","return_statement":"return summary","docstring":"Args:\n summary_type: str, \"last\", \"first\", \"mean\", or \"attn\". The method\n to pool the input to get a vector representation.\n use_summ_proj: bool, whether to use a linear projection during pooling.\n\n Returns:\n float32 Tensor in shape [bsz, d_model], the pooled representation.","docstring_summary":"Args:\n summary_type: str, \"last\", \"first\", \"mean\", or \"attn\". The method\n to pool the input to get a vector representation.\n use_summ_proj: bool, whether to use a linear projection during pooling.","docstring_tokens":["Args",":","summary_type",":","str","last","first","mean","or","attn",".","The","method","to","pool","the","input","to","get","a","vector","representation",".","use_summ_proj",":","bool","whether","to","use","a","linear","projection","during","pooling","."],"function":"def get_pooled_out(self, summary_type, use_summ_proj=True):\n \"\"\"\n Args:\n summary_type: str, \"last\", \"first\", \"mean\", or \"attn\". The method\n to pool the input to get a vector representation.\n use_summ_proj: bool, whether to use a linear projection during pooling.\n\n Returns:\n float32 Tensor in shape [bsz, d_model], the pooled representation.\n \"\"\"\n\n xlnet_config = self.xlnet_config\n run_config = self.run_config\n\n with tf.variable_scope(\"model\", reuse=tf.AUTO_REUSE):\n summary = modeling.summarize_sequence(\n summary_type=summary_type,\n hidden=self.output,\n d_model=xlnet_config.d_model,\n n_head=xlnet_config.n_head,\n d_head=xlnet_config.d_head,\n dropout=run_config.dropout,\n dropatt=run_config.dropatt,\n is_training=run_config.is_training,\n input_mask=self.input_mask,\n initializer=self.initializer,\n use_proj=use_summ_proj)\n\n return summary","function_tokens":["def","get_pooled_out","(","self",",","summary_type",",","use_summ_proj","=","True",")",":","xlnet_config","=","self",".","xlnet_config","run_config","=","self",".","run_config","with","tf",".","variable_scope","(","\"model\"",",","reuse","=","tf",".","AUTO_REUSE",")",":","summary","=","modeling",".","summarize_sequence","(","summary_type","=","summary_type",",","hidden","=","self",".","output",",","d_model","=","xlnet_config",".","d_model",",","n_head","=","xlnet_config",".","n_head",",","d_head","=","xlnet_config",".","d_head",",","dropout","=","run_config",".","dropout",",","dropatt","=","run_config",".","dropatt",",","is_training","=","run_config",".","is_training",",","input_mask","=","self",".","input_mask",",","initializer","=","self",".","initializer",",","use_proj","=","use_summ_proj",")","return","summary"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/xlnet.py#L229-L257"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/xlnet.py","language":"python","identifier":"XLNetModel.get_sequence_output","parameters":"(self)","argument_list":"","return_statement":"return self.output","docstring":"Returns:\n float32 Tensor in shape [len, bsz, d_model]. The last layer hidden\n representation of XLNet.","docstring_summary":"Returns:\n float32 Tensor in shape [len, bsz, d_model]. The last layer hidden\n representation of XLNet.","docstring_tokens":["Returns",":","float32","Tensor","in","shape","[","len","bsz","d_model","]",".","The","last","layer","hidden","representation","of","XLNet","."],"function":"def get_sequence_output(self):\n \"\"\"\n Returns:\n float32 Tensor in shape [len, bsz, d_model]. The last layer hidden\n representation of XLNet.\n \"\"\"\n\n return self.output","function_tokens":["def","get_sequence_output","(","self",")",":","return","self",".","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/xlnet.py#L259-L266"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/xlnet.py","language":"python","identifier":"XLNetModel.get_new_memory","parameters":"(self)","argument_list":"","return_statement":"return self.new_mems","docstring":"Returns:\n list of float32 Tensors in shape [mem_len, bsz, d_model], the new\n memory that concatenates the previous memory with the current input\n representations.\n The length of the list equals n_layer.","docstring_summary":"Returns:\n list of float32 Tensors in shape [mem_len, bsz, d_model], the new\n memory that concatenates the previous memory with the current input\n representations.\n The length of the list equals n_layer.","docstring_tokens":["Returns",":","list","of","float32","Tensors","in","shape","[","mem_len","bsz","d_model","]","the","new","memory","that","concatenates","the","previous","memory","with","the","current","input","representations",".","The","length","of","the","list","equals","n_layer","."],"function":"def get_new_memory(self):\n \"\"\"\n Returns:\n list of float32 Tensors in shape [mem_len, bsz, d_model], the new\n memory that concatenates the previous memory with the current input\n representations.\n The length of the list equals n_layer.\n \"\"\"\n return self.new_mems","function_tokens":["def","get_new_memory","(","self",")",":","return","self",".","new_mems"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/xlnet.py#L268-L276"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/xlnet.py","language":"python","identifier":"XLNetModel.get_embedding_table","parameters":"(self)","argument_list":"","return_statement":"return self.lookup_table","docstring":"Returns:\n float32 Tensor in shape [n_token, d_model]. The embedding lookup table.\n Used for tying embeddings between input and output layers.","docstring_summary":"Returns:\n float32 Tensor in shape [n_token, d_model]. The embedding lookup table.\n Used for tying embeddings between input and output layers.","docstring_tokens":["Returns",":","float32","Tensor","in","shape","[","n_token","d_model","]",".","The","embedding","lookup","table",".","Used","for","tying","embeddings","between","input","and","output","layers","."],"function":"def get_embedding_table(self):\n \"\"\"\n Returns:\n float32 Tensor in shape [n_token, d_model]. The embedding lookup table.\n Used for tying embeddings between input and output layers.\n \"\"\"\n return self.lookup_table","function_tokens":["def","get_embedding_table","(","self",")",":","return","self",".","lookup_table"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/xlnet.py#L278-L284"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/xlnet.py","language":"python","identifier":"XLNetModel.get_initializer","parameters":"(self)","argument_list":"","return_statement":"return self.initializer","docstring":"Returns:\n A tf initializer. Used to initialize variables in layers on top of XLNet.","docstring_summary":"Returns:\n A tf initializer. Used to initialize variables in layers on top of XLNet.","docstring_tokens":["Returns",":","A","tf","initializer",".","Used","to","initialize","variables","in","layers","on","top","of","XLNet","."],"function":"def get_initializer(self):\n \"\"\"\n Returns:\n A tf initializer. Used to initialize variables in layers on top of XLNet.\n \"\"\"\n return self.initializer","function_tokens":["def","get_initializer","(","self",")",":","return","self",".","initializer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/xlnet.py#L286-L291"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_classifier.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_classifier.py#L185-L199"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_classifier.py","language":"python","identifier":"convert_single_example","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenize_fn)","argument_list":"","return_statement":"return feature","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenize_fn):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[1] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n if label_list is not None:\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenize_fn(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenize_fn(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for two [SEP] & one [CLS] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for one [SEP] & one [CLS] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:max_seq_length - 2]\n\n tokens = []\n segment_ids = []\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(SEG_ID_A)\n tokens.append(SEP_ID)\n segment_ids.append(SEG_ID_A)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(SEG_ID_B)\n tokens.append(SEP_ID)\n segment_ids.append(SEG_ID_B)\n\n tokens.append(CLS_ID)\n segment_ids.append(SEG_ID_CLS)\n\n input_ids = tokens\n\n # The mask has 0 for real tokens and 1 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n if len(input_ids) < max_seq_length:\n delta_len = max_seq_length - len(input_ids)\n input_ids = [0] * delta_len + input_ids\n input_mask = [1] * delta_len + input_mask\n segment_ids = [SEG_ID_PAD] * delta_len + segment_ids\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if label_list is not None:\n label_id = label_map[example.label]\n else:\n label_id = example.label\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: {} (id = {})\".format(example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id)\n return feature","function_tokens":["def","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenize_fn",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","1","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","if","label_list","is","not","None",":","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenize_fn","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenize_fn","(","example",".","text_b",")","if","tokens_b",":","# Modifies `tokens_a` and `tokens_b` in place so that the total","# length is less than the specified length.","# Account for two [SEP] & one [CLS] with \"- 3\"","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_seq_length","-","3",")","else",":","# Account for one [SEP] & one [CLS] with \"- 2\"","if","len","(","tokens_a",")",">","max_seq_length","-","2",":","tokens_a","=","tokens_a","[",":","max_seq_length","-","2","]","tokens","=","[","]","segment_ids","=","[","]","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","SEG_ID_A",")","tokens",".","append","(","SEP_ID",")","segment_ids",".","append","(","SEG_ID_A",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","SEG_ID_B",")","tokens",".","append","(","SEP_ID",")","segment_ids",".","append","(","SEG_ID_B",")","tokens",".","append","(","CLS_ID",")","segment_ids",".","append","(","SEG_ID_CLS",")","input_ids","=","tokens","# The mask has 0 for real tokens and 1 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","0","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","if","len","(","input_ids",")","<","max_seq_length",":","delta_len","=","max_seq_length","-","len","(","input_ids",")","input_ids","=","[","0","]","*","delta_len","+","input_ids","input_mask","=","[","1","]","*","delta_len","+","input_mask","segment_ids","=","[","SEG_ID_PAD","]","*","delta_len","+","segment_ids","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","if","label_list","is","not","None",":","label_id","=","label_map","[","example",".","label","]","else",":","label_id","=","example",".","label","if","ex_index","<","5",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"guid: %s\"","%","(","example",".","guid",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","tf",".","logging",".","info","(","\"label: {} (id = {})\"",".","format","(","example",".","label",",","label_id",")",")","feature","=","InputFeatures","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","label_id","=","label_id",")","return","feature"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_classifier.py#L202-L286"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_classifier.py","language":"python","identifier":"convert_example_list_for_inews","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature_list","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_example_list_for_inews(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return [InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)]\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer(example.text_b)\n must_len = len(tokens_a) + 3\n extra_len = max_seq_length - must_len\n feature_list = []\n if example.text_b and extra_len > 0:\n extra_num = int((len(tokens_b) - 1) \/ extra_len) + 1\n for num in range(extra_num):\n max_len = min((num + 1) * extra_len, len(tokens_b))\n tokens_b_sub = tokens_b[num * extra_len: max_len]\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b_sub, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n else:\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n return feature_list","function_tokens":["def","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","[","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","]","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer","(","example",".","text_b",")","must_len","=","len","(","tokens_a",")","+","3","extra_len","=","max_seq_length","-","must_len","feature_list","=","[","]","if","example",".","text_b","and","extra_len",">","0",":","extra_num","=","int","(","(","len","(","tokens_b",")","-","1",")","\/","extra_len",")","+","1","for","num","in","range","(","extra_num",")",":","max_len","=","min","(","(","num","+","1",")","*","extra_len",",","len","(","tokens_b",")",")","tokens_b_sub","=","tokens_b","[","num","*","extra_len",":","max_len","]","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b_sub",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","else",":","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","return","feature_list"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_classifier.py#L356-L391"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features_for_inews","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file, num_passes=1)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features_for_inews(\n examples, label_list, max_seq_length, tokenizer, output_file, num_passes=1):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n num_example = 0\n if num_passes > 1:\n examples *= num_passes\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 1000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature_list = convert_example_list_for_inews(ex_index, example, label_list,\n max_seq_length, tokenizer)\n num_example += len(feature_list)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n for feature in feature_list:\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n tf.logging.info(\"feature num: %s\", num_example)\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features_for_inews","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",",","num_passes","=","1",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","num_example","=","0","if","num_passes",">","1",":","examples","*=","num_passes","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","1000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature_list","=","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","num_example","+=","len","(","feature_list",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","for","feature","in","feature_list",":","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","tf",".","logging",".","info","(","\"feature num: %s\"",",","num_example",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_classifier.py#L394-L427"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features","parameters":"(\n examples, label_list, max_seq_length, tokenize_fn, output_file,\n num_passes=1)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenize_fn, output_file,\n num_passes=1):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n print(len(examples))\n sys.stdout.flush()\n # do not create duplicated records\n if tf.gfile.Exists(output_file) and not FLAGS.overwrite_data:\n tf.logging.info(\"Do not overwrite tfrecord {} exists.\".format(output_file))\n return\n\n tf.logging.info(\"Create new tfrecord {}.\".format(output_file))\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n if num_passes > 1:\n examples *= num_passes\n\n print(len(examples))\n sys.stdout.flush()\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example {} of {}\".format(ex_index,\n len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenize_fn)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n def create_float_feature(values):\n f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_float_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n if label_list is not None:\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n else:\n features[\"label_ids\"] = create_float_feature([float(feature.label_id)])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenize_fn",",","output_file",",","num_passes","=","1",")",":","print","(","len","(","examples",")",")","sys",".","stdout",".","flush","(",")","# do not create duplicated records","if","tf",".","gfile",".","Exists","(","output_file",")","and","not","FLAGS",".","overwrite_data",":","tf",".","logging",".","info","(","\"Do not overwrite tfrecord {} exists.\"",".","format","(","output_file",")",")","return","tf",".","logging",".","info","(","\"Create new tfrecord {}.\"",".","format","(","output_file",")",")","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","if","num_passes",">","1",":","examples","*=","num_passes","print","(","len","(","examples",")",")","sys",".","stdout",".","flush","(",")","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example {} of {}\"",".","format","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenize_fn",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","def","create_float_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","float_list","=","tf",".","train",".","FloatList","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_float_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","if","label_list","is","not","None",":","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","else",":","features","[","\"label_ids\"","]","=","create_float_feature","(","[","float","(","feature",".","label_id",")","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_classifier.py#L430-L479"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_classifier.py","language":"python","identifier":"file_based_input_fn_builder","parameters":"(input_file, seq_length, is_training,\n drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.float32),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n if FLAGS.is_regression:\n name_to_features[\"label_ids\"] = tf.FixedLenFeature([], tf.float32)\n\n tf.logging.info(\"Input tfrecord file {}\".format(input_file))\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, tf.int32)\n example[name] = t\n\n return example\n\n def input_fn(params, input_context=None):\n \"\"\"The actual input function.\"\"\"\n if FLAGS.use_tpu:\n batch_size = params[\"batch_size\"]\n elif is_training:\n batch_size = FLAGS.train_batch_size\n elif FLAGS.do_eval:\n batch_size = FLAGS.eval_batch_size\n else:\n batch_size = FLAGS.predict_batch_size\n\n d = tf.data.TFRecordDataset(input_file)\n # Shard the dataset to difference devices\n if input_context is not None:\n tf.logging.info(\"Input pipeline id %d out of %d\",\n input_context.input_pipeline_id, input_context.num_replicas_in_sync)\n d = d.shard(input_context.num_input_pipelines,\n input_context.input_pipeline_id)\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n if is_training:\n d = d.shuffle(buffer_size=FLAGS.shuffle_buffer)\n d = d.repeat()\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn","function_tokens":["def","file_based_input_fn_builder","(","input_file",",","seq_length",",","is_training",",","drop_remainder",")",":","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","float32",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"label_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"is_real_example\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","}","if","FLAGS",".","is_regression",":","name_to_features","[","\"label_ids\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","float32",")","tf",".","logging",".","info","(","\"Input tfrecord file {}\"",".","format","(","input_file",")",")","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","cast","(","t",",","tf",".","int32",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",",","input_context","=","None",")",":","\"\"\"The actual input function.\"\"\"","if","FLAGS",".","use_tpu",":","batch_size","=","params","[","\"batch_size\"","]","elif","is_training",":","batch_size","=","FLAGS",".","train_batch_size","elif","FLAGS",".","do_eval",":","batch_size","=","FLAGS",".","eval_batch_size","else",":","batch_size","=","FLAGS",".","predict_batch_size","d","=","tf",".","data",".","TFRecordDataset","(","input_file",")","# Shard the dataset to difference devices","if","input_context","is","not","None",":","tf",".","logging",".","info","(","\"Input pipeline id %d out of %d\"",",","input_context",".","input_pipeline_id",",","input_context",".","num_replicas_in_sync",")","d","=","d",".","shard","(","input_context",".","num_input_pipelines",",","input_context",".","input_pipeline_id",")","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","if","is_training",":","d","=","d",".","shuffle","(","buffer_size","=","FLAGS",".","shuffle_buffer",")","d","=","d",".","repeat","(",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_classifier.py#L482-L545"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_cmrc_drcd.py","language":"python","identifier":"read_squad_examples","parameters":"(input_file, is_training)","argument_list":"","return_statement":"return examples","docstring":"Read a SQuAD json file into a list of SquadExample.","docstring_summary":"Read a SQuAD json file into a list of SquadExample.","docstring_tokens":["Read","a","SQuAD","json","file","into","a","list","of","SquadExample","."],"function":"def read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n examples = []\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n orig_answer_text = None\n is_impossible = False\n\n if is_training:\n if \"is_impossible\" in qa:\n is_impossible = qa[\"is_impossible\"]\n else:\n is_impossible = False\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\")\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n start_position = answer[\"answer_start\"]\n else:\n start_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n paragraph_text=paragraph_text,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n is_impossible=is_impossible)\n examples.append(example)\n\n return examples","function_tokens":["def","read_squad_examples","(","input_file",",","is_training",")",":","with","tf",".","gfile",".","Open","(","input_file",",","\"r\"",")","as","reader",":","input_data","=","json",".","load","(","reader",")","[","\"data\"","]","examples","=","[","]","for","entry","in","input_data",":","for","paragraph","in","entry","[","\"paragraphs\"","]",":","paragraph_text","=","paragraph","[","\"context\"","]","for","qa","in","paragraph","[","\"qas\"","]",":","qas_id","=","qa","[","\"id\"","]","question_text","=","qa","[","\"question\"","]","start_position","=","None","orig_answer_text","=","None","is_impossible","=","False","if","is_training",":","if","\"is_impossible\"","in","qa",":","is_impossible","=","qa","[","\"is_impossible\"","]","else",":","is_impossible","=","False","if","(","len","(","qa","[","\"answers\"","]",")","!=","1",")","and","(","not","is_impossible",")",":","raise","ValueError","(","\"For training, each question should have exactly 1 answer.\"",")","if","not","is_impossible",":","answer","=","qa","[","\"answers\"","]","[","0","]","orig_answer_text","=","answer","[","\"text\"","]","start_position","=","answer","[","\"answer_start\"","]","else",":","start_position","=","-","1","orig_answer_text","=","\"\"","example","=","SquadExample","(","qas_id","=","qas_id",",","question_text","=","question_text",",","paragraph_text","=","paragraph_text",",","orig_answer_text","=","orig_answer_text",",","start_position","=","start_position",",","is_impossible","=","is_impossible",")","examples",".","append","(","example",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_cmrc_drcd.py#L233-L275"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_cmrc_drcd.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, sp_model, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn)","argument_list":"","return_statement":"","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, sp_model, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n cnt_pos, cnt_neg = 0, 0\n unique_id = 1000000000\n max_N, max_M = 1024, 1024\n f = np.zeros((max_N, max_M), dtype=np.float32)\n\n for (example_index, example) in enumerate(examples):\n\n if example_index % 100 == 0:\n tf.logging.info('Converting {}\/{} pos {} neg {}'.format(\n example_index, len(examples), cnt_pos, cnt_neg))\n\n query_tokens = encode_ids(\n sp_model,\n preprocess_text(example.question_text, lower=FLAGS.uncased))\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n paragraph_text = example.paragraph_text\n para_tokens = encode_pieces(\n sp_model,\n preprocess_text(example.paragraph_text, lower=FLAGS.uncased))\n\n chartok_to_tok_index = []\n tok_start_to_chartok_index = []\n tok_end_to_chartok_index = []\n char_cnt = 0\n for i, token in enumerate(para_tokens):\n chartok_to_tok_index.extend([i] * len(token))\n tok_start_to_chartok_index.append(char_cnt)\n char_cnt += len(token)\n tok_end_to_chartok_index.append(char_cnt - 1)\n\n tok_cat_text = ''.join(para_tokens).replace(SPIECE_UNDERLINE, ' ')\n N, M = len(paragraph_text), len(tok_cat_text)\n\n if N > max_N or M > max_M:\n max_N = max(N, max_N)\n max_M = max(M, max_M)\n f = np.zeros((max_N, max_M), dtype=np.float32)\n gc.collect()\n\n g = {}\n\n def _lcs_match(max_dist):\n f.fill(0)\n g.clear()\n\n ### longest common sub sequence\n # f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j))\n for i in range(N):\n\n # note(zhiliny):\n # unlike standard LCS, this is specifically optimized for the setting\n # because the mismatch between sentence pieces and original text will\n # be small\n for j in range(i - max_dist, i + max_dist):\n if j >= M or j < 0: continue\n\n if i > 0:\n g[(i, j)] = 0\n f[i, j] = f[i - 1, j]\n\n if j > 0 and f[i, j - 1] > f[i, j]:\n g[(i, j)] = 1\n f[i, j] = f[i, j - 1]\n\n f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0\n if (preprocess_text(paragraph_text[i], lower=FLAGS.uncased,\n remove_space=False)\n == tok_cat_text[j]\n and f_prev + 1 > f[i, j]):\n g[(i, j)] = 2\n f[i, j] = f_prev + 1\n\n max_dist = abs(N - M) + 5\n for _ in range(2):\n _lcs_match(max_dist)\n if f[N - 1, M - 1] > 0.8 * N: break\n max_dist *= 2\n\n orig_to_chartok_index = [None] * N\n chartok_to_orig_index = [None] * M\n i, j = N - 1, M - 1\n while i >= 0 and j >= 0:\n if (i, j) not in g: break\n if g[(i, j)] == 2:\n orig_to_chartok_index[i] = j\n chartok_to_orig_index[j] = i\n i, j = i - 1, j - 1\n elif g[(i, j)] == 1:\n j = j - 1\n else:\n i = i - 1\n\n if all(v is None for v in orig_to_chartok_index) or f[N - 1, M - 1] < 0.8 * N:\n print('MISMATCH DETECTED!')\n continue\n\n tok_start_to_orig_index = []\n tok_end_to_orig_index = []\n for i in range(len(para_tokens)):\n start_chartok_pos = tok_start_to_chartok_index[i]\n end_chartok_pos = tok_end_to_chartok_index[i]\n start_orig_pos = _convert_index(chartok_to_orig_index, start_chartok_pos,\n N, is_start=True)\n end_orig_pos = _convert_index(chartok_to_orig_index, end_chartok_pos,\n N, is_start=False)\n\n tok_start_to_orig_index.append(start_orig_pos)\n tok_end_to_orig_index.append(end_orig_pos)\n\n if not is_training:\n tok_start_position = tok_end_position = None\n\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n\n if is_training and not example.is_impossible:\n start_position = example.start_position\n end_position = start_position + len(example.orig_answer_text) - 1\n\n start_chartok_pos = _convert_index(orig_to_chartok_index, start_position,\n is_start=True)\n tok_start_position = chartok_to_tok_index[start_chartok_pos]\n\n end_chartok_pos = _convert_index(orig_to_chartok_index, end_position,\n is_start=False)\n tok_end_position = chartok_to_tok_index[end_chartok_pos]\n assert tok_start_position <= tok_end_position\n\n def _piece_to_id(x):\n if six.PY2 and isinstance(x, unicode):\n x = x.encode('utf-8')\n return sp_model.PieceToId(x)\n\n all_doc_tokens = list(map(_piece_to_id, para_tokens))\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_is_max_context = {}\n segment_ids = []\n p_mask = []\n\n cur_tok_start_to_orig_index = []\n cur_tok_end_to_orig_index = []\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n\n cur_tok_start_to_orig_index.append(\n tok_start_to_orig_index[split_token_index])\n cur_tok_end_to_orig_index.append(\n tok_end_to_orig_index[split_token_index])\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(SEG_ID_P)\n p_mask.append(0)\n\n paragraph_len = len(tokens)\n\n tokens.append(SEP_ID)\n segment_ids.append(SEG_ID_P)\n p_mask.append(1)\n\n # note(zhiliny): we put P before Q\n # because during pretraining, B is always shorter than A\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(SEG_ID_Q)\n p_mask.append(1)\n tokens.append(SEP_ID)\n segment_ids.append(SEG_ID_Q)\n p_mask.append(1)\n\n cls_index = len(segment_ids)\n tokens.append(CLS_ID)\n segment_ids.append(SEG_ID_CLS)\n p_mask.append(0)\n\n input_ids = tokens\n\n # The mask has 0 for real tokens and 1 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(1)\n segment_ids.append(SEG_ID_PAD)\n p_mask.append(1)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(p_mask) == max_seq_length\n\n span_is_impossible = example.is_impossible\n start_position = None\n end_position = None\n if is_training and not span_is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n # continue\n start_position = 0\n end_position = 0\n span_is_impossible = True\n else:\n # note(zhiliny): we put P before Q, so doc_offset should be zero.\n # doc_offset = len(query_tokens) + 2\n doc_offset = 0\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and span_is_impossible:\n start_position = cls_index\n end_position = cls_index\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\"tok_start_to_orig_index: %s\" % \" \".join(\n [str(x) for x in cur_tok_start_to_orig_index]))\n tf.logging.info(\"tok_end_to_orig_index: %s\" % \" \".join(\n [str(x) for x in cur_tok_end_to_orig_index]))\n tf.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n\n if is_training and span_is_impossible:\n tf.logging.info(\"impossible example span\")\n\n if is_training and not span_is_impossible:\n pieces = [sp_model.IdToPiece(token) for token in\n tokens[start_position: (end_position + 1)]]\n answer_text = sp_model.DecodePieces(pieces)\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\n \"answer: %s\" % (printable_text(answer_text)))\n\n # note(zhiliny): With multi processing,\n # the example_index is actually the index within the current process\n # therefore we use example_index=None to avoid being used in the future.\n # The current code does not use example_index of training data.\n if is_training:\n feat_example_index = None\n else:\n feat_example_index = example_index\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=feat_example_index,\n doc_span_index=doc_span_index,\n tok_start_to_orig_index=cur_tok_start_to_orig_index,\n tok_end_to_orig_index=cur_tok_end_to_orig_index,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n p_mask=p_mask,\n segment_ids=segment_ids,\n paragraph_len=paragraph_len,\n cls_index=cls_index,\n start_position=start_position,\n end_position=end_position,\n is_impossible=span_is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1\n if span_is_impossible:\n cnt_neg += 1\n else:\n cnt_pos += 1\n\n tf.logging.info(\"Total number of instances: {} = pos {} neg {}\".format(\n cnt_pos + cnt_neg, cnt_pos, cnt_neg))","function_tokens":["def","convert_examples_to_features","(","examples",",","sp_model",",","max_seq_length",",","doc_stride",",","max_query_length",",","is_training",",","output_fn",")",":","cnt_pos",",","cnt_neg","=","0",",","0","unique_id","=","1000000000","max_N",",","max_M","=","1024",",","1024","f","=","np",".","zeros","(","(","max_N",",","max_M",")",",","dtype","=","np",".","float32",")","for","(","example_index",",","example",")","in","enumerate","(","examples",")",":","if","example_index","%","100","==","0",":","tf",".","logging",".","info","(","'Converting {}\/{} pos {} neg {}'",".","format","(","example_index",",","len","(","examples",")",",","cnt_pos",",","cnt_neg",")",")","query_tokens","=","encode_ids","(","sp_model",",","preprocess_text","(","example",".","question_text",",","lower","=","FLAGS",".","uncased",")",")","if","len","(","query_tokens",")",">","max_query_length",":","query_tokens","=","query_tokens","[","0",":","max_query_length","]","paragraph_text","=","example",".","paragraph_text","para_tokens","=","encode_pieces","(","sp_model",",","preprocess_text","(","example",".","paragraph_text",",","lower","=","FLAGS",".","uncased",")",")","chartok_to_tok_index","=","[","]","tok_start_to_chartok_index","=","[","]","tok_end_to_chartok_index","=","[","]","char_cnt","=","0","for","i",",","token","in","enumerate","(","para_tokens",")",":","chartok_to_tok_index",".","extend","(","[","i","]","*","len","(","token",")",")","tok_start_to_chartok_index",".","append","(","char_cnt",")","char_cnt","+=","len","(","token",")","tok_end_to_chartok_index",".","append","(","char_cnt","-","1",")","tok_cat_text","=","''",".","join","(","para_tokens",")",".","replace","(","SPIECE_UNDERLINE",",","' '",")","N",",","M","=","len","(","paragraph_text",")",",","len","(","tok_cat_text",")","if","N",">","max_N","or","M",">","max_M",":","max_N","=","max","(","N",",","max_N",")","max_M","=","max","(","M",",","max_M",")","f","=","np",".","zeros","(","(","max_N",",","max_M",")",",","dtype","=","np",".","float32",")","gc",".","collect","(",")","g","=","{","}","def","_lcs_match","(","max_dist",")",":","f",".","fill","(","0",")","g",".","clear","(",")","### longest common sub sequence","# f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j))","for","i","in","range","(","N",")",":","# note(zhiliny):","# unlike standard LCS, this is specifically optimized for the setting","# because the mismatch between sentence pieces and original text will","# be small","for","j","in","range","(","i","-","max_dist",",","i","+","max_dist",")",":","if","j",">=","M","or","j","<","0",":","continue","if","i",">","0",":","g","[","(","i",",","j",")","]","=","0","f","[","i",",","j","]","=","f","[","i","-","1",",","j","]","if","j",">","0","and","f","[","i",",","j","-","1","]",">","f","[","i",",","j","]",":","g","[","(","i",",","j",")","]","=","1","f","[","i",",","j","]","=","f","[","i",",","j","-","1","]","f_prev","=","f","[","i","-","1",",","j","-","1","]","if","i",">","0","and","j",">","0","else","0","if","(","preprocess_text","(","paragraph_text","[","i","]",",","lower","=","FLAGS",".","uncased",",","remove_space","=","False",")","==","tok_cat_text","[","j","]","and","f_prev","+","1",">","f","[","i",",","j","]",")",":","g","[","(","i",",","j",")","]","=","2","f","[","i",",","j","]","=","f_prev","+","1","max_dist","=","abs","(","N","-","M",")","+","5","for","_","in","range","(","2",")",":","_lcs_match","(","max_dist",")","if","f","[","N","-","1",",","M","-","1","]",">","0.8","*","N",":","break","max_dist","*=","2","orig_to_chartok_index","=","[","None","]","*","N","chartok_to_orig_index","=","[","None","]","*","M","i",",","j","=","N","-","1",",","M","-","1","while","i",">=","0","and","j",">=","0",":","if","(","i",",","j",")","not","in","g",":","break","if","g","[","(","i",",","j",")","]","==","2",":","orig_to_chartok_index","[","i","]","=","j","chartok_to_orig_index","[","j","]","=","i","i",",","j","=","i","-","1",",","j","-","1","elif","g","[","(","i",",","j",")","]","==","1",":","j","=","j","-","1","else",":","i","=","i","-","1","if","all","(","v","is","None","for","v","in","orig_to_chartok_index",")","or","f","[","N","-","1",",","M","-","1","]","<","0.8","*","N",":","print","(","'MISMATCH DETECTED!'",")","continue","tok_start_to_orig_index","=","[","]","tok_end_to_orig_index","=","[","]","for","i","in","range","(","len","(","para_tokens",")",")",":","start_chartok_pos","=","tok_start_to_chartok_index","[","i","]","end_chartok_pos","=","tok_end_to_chartok_index","[","i","]","start_orig_pos","=","_convert_index","(","chartok_to_orig_index",",","start_chartok_pos",",","N",",","is_start","=","True",")","end_orig_pos","=","_convert_index","(","chartok_to_orig_index",",","end_chartok_pos",",","N",",","is_start","=","False",")","tok_start_to_orig_index",".","append","(","start_orig_pos",")","tok_end_to_orig_index",".","append","(","end_orig_pos",")","if","not","is_training",":","tok_start_position","=","tok_end_position","=","None","if","is_training","and","example",".","is_impossible",":","tok_start_position","=","-","1","tok_end_position","=","-","1","if","is_training","and","not","example",".","is_impossible",":","start_position","=","example",".","start_position","end_position","=","start_position","+","len","(","example",".","orig_answer_text",")","-","1","start_chartok_pos","=","_convert_index","(","orig_to_chartok_index",",","start_position",",","is_start","=","True",")","tok_start_position","=","chartok_to_tok_index","[","start_chartok_pos","]","end_chartok_pos","=","_convert_index","(","orig_to_chartok_index",",","end_position",",","is_start","=","False",")","tok_end_position","=","chartok_to_tok_index","[","end_chartok_pos","]","assert","tok_start_position","<=","tok_end_position","def","_piece_to_id","(","x",")",":","if","six",".","PY2","and","isinstance","(","x",",","unicode",")",":","x","=","x",".","encode","(","'utf-8'",")","return","sp_model",".","PieceToId","(","x",")","all_doc_tokens","=","list","(","map","(","_piece_to_id",",","para_tokens",")",")","# The -3 accounts for [CLS], [SEP] and [SEP]","max_tokens_for_doc","=","max_seq_length","-","len","(","query_tokens",")","-","3","# We can have documents that are longer than the maximum sequence length.","# To deal with this we do a sliding window approach, where we take chunks","# of the up to our max length with a stride of `doc_stride`.","_DocSpan","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"DocSpan\"",",","[","\"start\"",",","\"length\"","]",")","doc_spans","=","[","]","start_offset","=","0","while","start_offset","<","len","(","all_doc_tokens",")",":","length","=","len","(","all_doc_tokens",")","-","start_offset","if","length",">","max_tokens_for_doc",":","length","=","max_tokens_for_doc","doc_spans",".","append","(","_DocSpan","(","start","=","start_offset",",","length","=","length",")",")","if","start_offset","+","length","==","len","(","all_doc_tokens",")",":","break","start_offset","+=","min","(","length",",","doc_stride",")","for","(","doc_span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","tokens","=","[","]","token_is_max_context","=","{","}","segment_ids","=","[","]","p_mask","=","[","]","cur_tok_start_to_orig_index","=","[","]","cur_tok_end_to_orig_index","=","[","]","for","i","in","range","(","doc_span",".","length",")",":","split_token_index","=","doc_span",".","start","+","i","cur_tok_start_to_orig_index",".","append","(","tok_start_to_orig_index","[","split_token_index","]",")","cur_tok_end_to_orig_index",".","append","(","tok_end_to_orig_index","[","split_token_index","]",")","is_max_context","=","_check_is_max_context","(","doc_spans",",","doc_span_index",",","split_token_index",")","token_is_max_context","[","len","(","tokens",")","]","=","is_max_context","tokens",".","append","(","all_doc_tokens","[","split_token_index","]",")","segment_ids",".","append","(","SEG_ID_P",")","p_mask",".","append","(","0",")","paragraph_len","=","len","(","tokens",")","tokens",".","append","(","SEP_ID",")","segment_ids",".","append","(","SEG_ID_P",")","p_mask",".","append","(","1",")","# note(zhiliny): we put P before Q","# because during pretraining, B is always shorter than A","for","token","in","query_tokens",":","tokens",".","append","(","token",")","segment_ids",".","append","(","SEG_ID_Q",")","p_mask",".","append","(","1",")","tokens",".","append","(","SEP_ID",")","segment_ids",".","append","(","SEG_ID_Q",")","p_mask",".","append","(","1",")","cls_index","=","len","(","segment_ids",")","tokens",".","append","(","CLS_ID",")","segment_ids",".","append","(","SEG_ID_CLS",")","p_mask",".","append","(","0",")","input_ids","=","tokens","# The mask has 0 for real tokens and 1 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","0","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","1",")","segment_ids",".","append","(","SEG_ID_PAD",")","p_mask",".","append","(","1",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","assert","len","(","p_mask",")","==","max_seq_length","span_is_impossible","=","example",".","is_impossible","start_position","=","None","end_position","=","None","if","is_training","and","not","span_is_impossible",":","# For training, if our document chunk does not contain an annotation","# we throw it out, since there is nothing to predict.","doc_start","=","doc_span",".","start","doc_end","=","doc_span",".","start","+","doc_span",".","length","-","1","out_of_span","=","False","if","not","(","tok_start_position",">=","doc_start","and","tok_end_position","<=","doc_end",")",":","out_of_span","=","True","if","out_of_span",":","# continue","start_position","=","0","end_position","=","0","span_is_impossible","=","True","else",":","# note(zhiliny): we put P before Q, so doc_offset should be zero.","# doc_offset = len(query_tokens) + 2","doc_offset","=","0","start_position","=","tok_start_position","-","doc_start","+","doc_offset","end_position","=","tok_end_position","-","doc_start","+","doc_offset","if","is_training","and","span_is_impossible",":","start_position","=","cls_index","end_position","=","cls_index","if","example_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"unique_id: %s\"","%","(","unique_id",")",")","tf",".","logging",".","info","(","\"example_index: %s\"","%","(","example_index",")",")","tf",".","logging",".","info","(","\"doc_span_index: %s\"","%","(","doc_span_index",")",")","tf",".","logging",".","info","(","\"tok_start_to_orig_index: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","cur_tok_start_to_orig_index","]",")",")","tf",".","logging",".","info","(","\"tok_end_to_orig_index: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","cur_tok_end_to_orig_index","]",")",")","tf",".","logging",".","info","(","\"token_is_max_context: %s\"","%","\" \"",".","join","(","[","\"%d:%s\"","%","(","x",",","y",")","for","(","x",",","y",")","in","six",".","iteritems","(","token_is_max_context",")","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","if","is_training","and","span_is_impossible",":","tf",".","logging",".","info","(","\"impossible example span\"",")","if","is_training","and","not","span_is_impossible",":","pieces","=","[","sp_model",".","IdToPiece","(","token",")","for","token","in","tokens","[","start_position",":","(","end_position","+","1",")","]","]","answer_text","=","sp_model",".","DecodePieces","(","pieces",")","tf",".","logging",".","info","(","\"start_position: %d\"","%","(","start_position",")",")","tf",".","logging",".","info","(","\"end_position: %d\"","%","(","end_position",")",")","tf",".","logging",".","info","(","\"answer: %s\"","%","(","printable_text","(","answer_text",")",")",")","# note(zhiliny): With multi processing,","# the example_index is actually the index within the current process","# therefore we use example_index=None to avoid being used in the future.","# The current code does not use example_index of training data.","if","is_training",":","feat_example_index","=","None","else",":","feat_example_index","=","example_index","feature","=","InputFeatures","(","unique_id","=","unique_id",",","example_index","=","feat_example_index",",","doc_span_index","=","doc_span_index",",","tok_start_to_orig_index","=","cur_tok_start_to_orig_index",",","tok_end_to_orig_index","=","cur_tok_end_to_orig_index",",","token_is_max_context","=","token_is_max_context",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","p_mask","=","p_mask",",","segment_ids","=","segment_ids",",","paragraph_len","=","paragraph_len",",","cls_index","=","cls_index",",","start_position","=","start_position",",","end_position","=","end_position",",","is_impossible","=","span_is_impossible",")","# Run callback","output_fn","(","feature",")","unique_id","+=","1","if","span_is_impossible",":","cnt_neg","+=","1","else",":","cnt_pos","+=","1","tf",".","logging",".","info","(","\"Total number of instances: {} = pos {} neg {}\"",".","format","(","cnt_pos","+","cnt_neg",",","cnt_pos",",","cnt_neg",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_cmrc_drcd.py#L317-L637"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_cmrc_drcd.py","language":"python","identifier":"_check_is_max_context","parameters":"(doc_spans, cur_span_index, position)","argument_list":"","return_statement":"return cur_span_index == best_span_index","docstring":"Check if this is the 'max context' doc span for the token.","docstring_summary":"Check if this is the 'max context' doc span for the token.","docstring_tokens":["Check","if","this","is","the","max","context","doc","span","for","the","token","."],"function":"def _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index","function_tokens":["def","_check_is_max_context","(","doc_spans",",","cur_span_index",",","position",")",":","# Because of the sliding window approach taken to scoring documents, a single","# token can appear in multiple documents. E.g.","# Doc: the man went to the store and bought a gallon of milk","# Span A: the man went to the","# Span B: to the store and bought","# Span C: and bought a gallon of","# ...","#","# Now the word 'bought' will have two scores from spans B and C. We only","# want to consider the score with \"maximum context\", which we define as","# the *minimum* of its left and right context (the *sum* of left and","# right context will always be the same, of course).","#","# In the example the maximum context for 'bought' would be span C since","# it has 1 left context and 3 right context, while span B has 4 left context","# and 0 right context.","best_score","=","None","best_span_index","=","None","for","(","span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","end","=","doc_span",".","start","+","doc_span",".","length","-","1","if","position","<","doc_span",".","start",":","continue","if","position",">","end",":","continue","num_left_context","=","position","-","doc_span",".","start","num_right_context","=","end","-","position","score","=","min","(","num_left_context",",","num_right_context",")","+","0.01","*","doc_span",".","length","if","best_score","is","None","or","score",">","best_score",":","best_score","=","score","best_span_index","=","span_index","return","cur_span_index","==","best_span_index"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_cmrc_drcd.py#L640-L674"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_cmrc_drcd.py","language":"python","identifier":"write_predictions","parameters":"(all_examples, all_features, all_results, n_best_size,\n max_answer_length, output_prediction_file,\n output_nbest_file,\n orig_data)","argument_list":"","return_statement":"return out_eval","docstring":"Write final predictions to the json file and log-odds of null if needed.","docstring_summary":"Write final predictions to the json file and log-odds of null if needed.","docstring_tokens":["Write","final","predictions","to","the","json","file","and","log","-","odds","of","null","if","needed","."],"function":"def write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, output_prediction_file,\n output_nbest_file,\n orig_data):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n # tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n\n for i in range(FLAGS.start_n_top):\n for j in range(FLAGS.end_n_top):\n start_log_prob = result.start_top_log_probs[i]\n start_index = result.start_top_index[i]\n\n j_index = i * FLAGS.end_n_top + j\n\n end_log_prob = result.end_top_log_probs[j_index]\n end_index = result.end_top_index[j_index]\n\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= feature.paragraph_len - 1:\n continue\n if end_index >= feature.paragraph_len - 1:\n continue\n\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_log_prob=start_log_prob,\n end_log_prob=end_log_prob))\n\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_log_prob + x.end_log_prob),\n reverse=True)\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n\n tok_start_to_orig_index = feature.tok_start_to_orig_index\n tok_end_to_orig_index = feature.tok_end_to_orig_index\n start_orig_pos = tok_start_to_orig_index[pred.start_index]\n end_orig_pos = tok_end_to_orig_index[pred.end_index]\n\n paragraph_text = example.paragraph_text\n final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()\n\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_log_prob=pred.start_log_prob,\n end_log_prob=pred.end_log_prob))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"\", start_log_prob=-1e6,\n end_log_prob=-1e6))\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_log_prob + entry.end_log_prob)\n if not best_non_null_entry:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_log_prob\"] = entry.start_log_prob\n output[\"end_log_prob\"] = entry.end_log_prob\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n assert best_non_null_entry is not None\n\n score_diff = 0 #score_null\n scores_diff_json[example.qas_id] = score_diff\n # note(zhiliny): always predict best_non_null_entry\n # and the evaluation script will search for the best threshold\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n qid_to_has_ans = squad_utils.make_qid_to_has_ans(orig_data)\n has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]\n no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]\n exact_raw, f1_raw = squad_utils.get_raw_scores(orig_data, all_predictions)\n out_eval = {}\n\n squad_utils.find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw,\n scores_diff_json, qid_to_has_ans)\n\n return out_eval","function_tokens":["def","write_predictions","(","all_examples",",","all_features",",","all_results",",","n_best_size",",","max_answer_length",",","output_prediction_file",",","output_nbest_file",",","orig_data",")",":","tf",".","logging",".","info","(","\"Writing predictions to: %s\"","%","(","output_prediction_file",")",")","# tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))","example_index_to_features","=","collections",".","defaultdict","(","list",")","for","feature","in","all_features",":","example_index_to_features","[","feature",".","example_index","]",".","append","(","feature",")","unique_id_to_result","=","{","}","for","result","in","all_results",":","unique_id_to_result","[","result",".","unique_id","]","=","result","all_predictions","=","collections",".","OrderedDict","(",")","all_nbest_json","=","collections",".","OrderedDict","(",")","scores_diff_json","=","collections",".","OrderedDict","(",")","for","(","example_index",",","example",")","in","enumerate","(","all_examples",")",":","features","=","example_index_to_features","[","example_index","]","prelim_predictions","=","[","]","# keep track of the minimum score of null start+end of position 0","for","(","feature_index",",","feature",")","in","enumerate","(","features",")",":","result","=","unique_id_to_result","[","feature",".","unique_id","]","for","i","in","range","(","FLAGS",".","start_n_top",")",":","for","j","in","range","(","FLAGS",".","end_n_top",")",":","start_log_prob","=","result",".","start_top_log_probs","[","i","]","start_index","=","result",".","start_top_index","[","i","]","j_index","=","i","*","FLAGS",".","end_n_top","+","j","end_log_prob","=","result",".","end_top_log_probs","[","j_index","]","end_index","=","result",".","end_top_index","[","j_index","]","# We could hypothetically create invalid predictions, e.g., predict","# that the start of the span is in the question. We throw out all","# invalid predictions.","if","start_index",">=","feature",".","paragraph_len","-","1",":","continue","if","end_index",">=","feature",".","paragraph_len","-","1",":","continue","if","not","feature",".","token_is_max_context",".","get","(","start_index",",","False",")",":","continue","if","end_index","<","start_index",":","continue","length","=","end_index","-","start_index","+","1","if","length",">","max_answer_length",":","continue","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","feature_index",",","start_index","=","start_index",",","end_index","=","end_index",",","start_log_prob","=","start_log_prob",",","end_log_prob","=","end_log_prob",")",")","prelim_predictions","=","sorted","(","prelim_predictions",",","key","=","lambda","x",":","(","x",".","start_log_prob","+","x",".","end_log_prob",")",",","reverse","=","True",")","seen_predictions","=","{","}","nbest","=","[","]","for","pred","in","prelim_predictions",":","if","len","(","nbest",")",">=","n_best_size",":","break","feature","=","features","[","pred",".","feature_index","]","tok_start_to_orig_index","=","feature",".","tok_start_to_orig_index","tok_end_to_orig_index","=","feature",".","tok_end_to_orig_index","start_orig_pos","=","tok_start_to_orig_index","[","pred",".","start_index","]","end_orig_pos","=","tok_end_to_orig_index","[","pred",".","end_index","]","paragraph_text","=","example",".","paragraph_text","final_text","=","paragraph_text","[","start_orig_pos",":","end_orig_pos","+","1","]",".","strip","(",")","if","final_text","in","seen_predictions",":","continue","seen_predictions","[","final_text","]","=","True","nbest",".","append","(","_NbestPrediction","(","text","=","final_text",",","start_log_prob","=","pred",".","start_log_prob",",","end_log_prob","=","pred",".","end_log_prob",")",")","# In very rare edge cases we could have no valid predictions. So we","# just create a nonce prediction in this case to avoid failure.","if","not","nbest",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"\"",",","start_log_prob","=","-","1e6",",","end_log_prob","=","-","1e6",")",")","total_scores","=","[","]","best_non_null_entry","=","None","for","entry","in","nbest",":","total_scores",".","append","(","entry",".","start_log_prob","+","entry",".","end_log_prob",")","if","not","best_non_null_entry",":","best_non_null_entry","=","entry","probs","=","_compute_softmax","(","total_scores",")","nbest_json","=","[","]","for","(","i",",","entry",")","in","enumerate","(","nbest",")",":","output","=","collections",".","OrderedDict","(",")","output","[","\"text\"","]","=","entry",".","text","output","[","\"probability\"","]","=","probs","[","i","]","output","[","\"start_log_prob\"","]","=","entry",".","start_log_prob","output","[","\"end_log_prob\"","]","=","entry",".","end_log_prob","nbest_json",".","append","(","output",")","assert","len","(","nbest_json",")",">=","1","assert","best_non_null_entry","is","not","None","score_diff","=","0","#score_null","scores_diff_json","[","example",".","qas_id","]","=","score_diff","# note(zhiliny): always predict best_non_null_entry","# and the evaluation script will search for the best threshold","all_predictions","[","example",".","qas_id","]","=","best_non_null_entry",".","text","all_nbest_json","[","example",".","qas_id","]","=","nbest_json","with","tf",".","gfile",".","GFile","(","output_prediction_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_predictions",",","indent","=","4",")","+","\"\\n\"",")","with","tf",".","gfile",".","GFile","(","output_nbest_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_nbest_json",",","indent","=","4",")","+","\"\\n\"",")","qid_to_has_ans","=","squad_utils",".","make_qid_to_has_ans","(","orig_data",")","has_ans_qids","=","[","k","for","k",",","v","in","qid_to_has_ans",".","items","(",")","if","v","]","no_ans_qids","=","[","k","for","k",",","v","in","qid_to_has_ans",".","items","(",")","if","not","v","]","exact_raw",",","f1_raw","=","squad_utils",".","get_raw_scores","(","orig_data",",","all_predictions",")","out_eval","=","{","}","squad_utils",".","find_all_best_thresh_v2","(","out_eval",",","all_predictions",",","exact_raw",",","f1_raw",",","scores_diff_json",",","qid_to_has_ans",")","return","out_eval"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_cmrc_drcd.py#L735-L879"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_cmrc_drcd.py","language":"python","identifier":"_get_best_indexes","parameters":"(logits, n_best_size)","argument_list":"","return_statement":"return best_indexes","docstring":"Get the n-best logits from a list.","docstring_summary":"Get the n-best logits from a list.","docstring_tokens":["Get","the","n","-","best","logits","from","a","list","."],"function":"def _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes","function_tokens":["def","_get_best_indexes","(","logits",",","n_best_size",")",":","index_and_score","=","sorted","(","enumerate","(","logits",")",",","key","=","lambda","x",":","x","[","1","]",",","reverse","=","True",")","best_indexes","=","[","]","for","i","in","range","(","len","(","index_and_score",")",")",":","if","i",">=","n_best_size",":","break","best_indexes",".","append","(","index_and_score","[","i","]","[","0","]",")","return","best_indexes"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_cmrc_drcd.py#L882-L891"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_cmrc_drcd.py","language":"python","identifier":"_compute_softmax","parameters":"(scores)","argument_list":"","return_statement":"return probs","docstring":"Compute softmax probability over raw logits.","docstring_summary":"Compute softmax probability over raw logits.","docstring_tokens":["Compute","softmax","probability","over","raw","logits","."],"function":"def _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score \/ total_sum)\n return probs","function_tokens":["def","_compute_softmax","(","scores",")",":","if","not","scores",":","return","[","]","max_score","=","None","for","score","in","scores",":","if","max_score","is","None","or","score",">","max_score",":","max_score","=","score","exp_scores","=","[","]","total_sum","=","0.0","for","score","in","scores",":","x","=","math",".","exp","(","score","-","max_score",")","exp_scores",".","append","(","x",")","total_sum","+=","x","probs","=","[","]","for","score","in","exp_scores",":","probs",".","append","(","score","\/","total_sum",")","return","probs"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_cmrc_drcd.py#L894-L914"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_cmrc_drcd.py","language":"python","identifier":"input_fn_builder","parameters":"(input_glob, seq_length, is_training, drop_remainder,\n num_hosts, num_threads=8)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(input_glob, seq_length, is_training, drop_remainder,\n num_hosts, num_threads=8):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.float32),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"cls_index\": tf.FixedLenFeature([], tf.int64),\n \"p_mask\": tf.FixedLenFeature([seq_length], tf.float32)\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"is_impossible\"] = tf.FixedLenFeature([], tf.float32)\n\n tf.logging.info(\"Input tfrecord file glob {}\".format(input_glob))\n global_input_paths = tf.gfile.Glob(input_glob)\n tf.logging.info(\"Find {} input paths {}\".format(\n len(global_input_paths), global_input_paths))\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, tf.int32)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n if FLAGS.use_tpu:\n batch_size = params[\"batch_size\"]\n elif is_training:\n batch_size = FLAGS.train_batch_size\n else:\n batch_size = FLAGS.predict_batch_size\n\n # Split tfrecords across hosts\n if num_hosts > 1:\n host_id = params[\"context\"].current_host\n num_files = len(global_input_paths)\n if num_files >= num_hosts:\n num_files_per_host = (num_files + num_hosts - 1) \/\/ num_hosts\n my_start_file_id = host_id * num_files_per_host\n my_end_file_id = min((host_id + 1) * num_files_per_host, num_files)\n input_paths = global_input_paths[my_start_file_id: my_end_file_id]\n tf.logging.info(\"Host {} handles {} files\".format(host_id,\n len(input_paths)))\n else:\n input_paths = global_input_paths\n\n if len(input_paths) == 1:\n d = tf.data.TFRecordDataset(input_paths[0])\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n if is_training:\n d = d.shuffle(buffer_size=FLAGS.shuffle_buffer)\n d = d.repeat()\n else:\n d = tf.data.Dataset.from_tensor_slices(input_paths)\n # file level shuffle\n d = d.shuffle(len(input_paths)).repeat()\n\n # `cycle_length` is the number of parallel files that get read.\n cycle_length = min(num_threads, len(input_paths))\n\n d = d.apply(\n tf.contrib.data.parallel_interleave(\n tf.data.TFRecordDataset,\n sloppy=is_training,\n cycle_length=cycle_length))\n\n if is_training:\n # sample level shuffle\n d = d.shuffle(buffer_size=FLAGS.shuffle_buffer)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n num_parallel_batches=num_threads,\n drop_remainder=drop_remainder))\n d = d.prefetch(1024)\n\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","input_glob",",","seq_length",",","is_training",",","drop_remainder",",","num_hosts",",","num_threads","=","8",")",":","name_to_features","=","{","\"unique_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","float32",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"cls_index\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"p_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","float32",")","}","if","is_training",":","name_to_features","[","\"start_positions\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")","name_to_features","[","\"end_positions\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")","name_to_features","[","\"is_impossible\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","float32",")","tf",".","logging",".","info","(","\"Input tfrecord file glob {}\"",".","format","(","input_glob",")",")","global_input_paths","=","tf",".","gfile",".","Glob","(","input_glob",")","tf",".","logging",".","info","(","\"Find {} input paths {}\"",".","format","(","len","(","global_input_paths",")",",","global_input_paths",")",")","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","cast","(","t",",","tf",".","int32",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","if","FLAGS",".","use_tpu",":","batch_size","=","params","[","\"batch_size\"","]","elif","is_training",":","batch_size","=","FLAGS",".","train_batch_size","else",":","batch_size","=","FLAGS",".","predict_batch_size","# Split tfrecords across hosts","if","num_hosts",">","1",":","host_id","=","params","[","\"context\"","]",".","current_host","num_files","=","len","(","global_input_paths",")","if","num_files",">=","num_hosts",":","num_files_per_host","=","(","num_files","+","num_hosts","-","1",")","\/\/","num_hosts","my_start_file_id","=","host_id","*","num_files_per_host","my_end_file_id","=","min","(","(","host_id","+","1",")","*","num_files_per_host",",","num_files",")","input_paths","=","global_input_paths","[","my_start_file_id",":","my_end_file_id","]","tf",".","logging",".","info","(","\"Host {} handles {} files\"",".","format","(","host_id",",","len","(","input_paths",")",")",")","else",":","input_paths","=","global_input_paths","if","len","(","input_paths",")","==","1",":","d","=","tf",".","data",".","TFRecordDataset","(","input_paths","[","0","]",")","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","if","is_training",":","d","=","d",".","shuffle","(","buffer_size","=","FLAGS",".","shuffle_buffer",")","d","=","d",".","repeat","(",")","else",":","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","input_paths",")","# file level shuffle","d","=","d",".","shuffle","(","len","(","input_paths",")",")",".","repeat","(",")","# `cycle_length` is the number of parallel files that get read.","cycle_length","=","min","(","num_threads",",","len","(","input_paths",")",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","parallel_interleave","(","tf",".","data",".","TFRecordDataset",",","sloppy","=","is_training",",","cycle_length","=","cycle_length",")",")","if","is_training",":","# sample level shuffle","d","=","d",".","shuffle","(","buffer_size","=","FLAGS",".","shuffle_buffer",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","num_parallel_batches","=","num_threads",",","drop_remainder","=","drop_remainder",")",")","d","=","d",".","prefetch","(","1024",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_cmrc_drcd.py#L917-L1012"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/run_cmrc_drcd.py","language":"python","identifier":"FeatureWriter.process_feature","parameters":"(self, feature)","argument_list":"","return_statement":"","docstring":"Write a InputFeature to the TFRecordWriter as a tf.train.Example.","docstring_summary":"Write a InputFeature to the TFRecordWriter as a tf.train.Example.","docstring_tokens":["Write","a","InputFeature","to","the","TFRecordWriter","as","a","tf",".","train",".","Example","."],"function":"def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n def create_float_feature(values):\n f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_float_feature(feature.input_mask)\n features[\"p_mask\"] = create_float_feature(feature.p_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n features[\"cls_index\"] = create_int_feature([feature.cls_index])\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_float_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())","function_tokens":["def","process_feature","(","self",",","feature",")",":","self",".","num_features","+=","1","def","create_int_feature","(","values",")",":","feature","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","feature","def","create_float_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","float_list","=","tf",".","train",".","FloatList","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","features","[","\"unique_ids\"","]","=","create_int_feature","(","[","feature",".","unique_id","]",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_float_feature","(","feature",".","input_mask",")","features","[","\"p_mask\"","]","=","create_float_feature","(","feature",".","p_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"cls_index\"","]","=","create_int_feature","(","[","feature",".","cls_index","]",")","if","self",".","is_training",":","features","[","\"start_positions\"","]","=","create_int_feature","(","[","feature",".","start_position","]",")","features","[","\"end_positions\"","]","=","create_int_feature","(","[","feature",".","end_position","]",")","impossible","=","0","if","feature",".","is_impossible",":","impossible","=","1","features","[","\"is_impossible\"","]","=","create_float_feature","(","[","impossible","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","self",".","_writer",".","write","(","tf_example",".","SerializeToString","(",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/run_cmrc_drcd.py#L686-L717"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/data_utils.py","language":"python","identifier":"format_filename","parameters":"(prefix, bsz_per_host, seq_len, bi_data, suffix,\n mask_alpha=5, mask_beta=1, reuse_len=None, uncased=False,\n fixed_num_predict=None)","argument_list":"","return_statement":"return file_name","docstring":"docs.","docstring_summary":"docs.","docstring_tokens":["docs","."],"function":"def format_filename(prefix, bsz_per_host, seq_len, bi_data, suffix,\n mask_alpha=5, mask_beta=1, reuse_len=None, uncased=False,\n fixed_num_predict=None):\n \"\"\"docs.\"\"\"\n if reuse_len is None:\n reuse_len_str = \"\"\n else:\n reuse_len_str = \"reuse-{}.\".format(reuse_len)\n if not uncased:\n uncased_str = \"\"\n else:\n uncased_str = \"uncased.\"\n if bi_data:\n bi_data_str = \"bi\"\n else:\n bi_data_str = \"uni\"\n if fixed_num_predict is not None:\n fnp_str = \"fnp-{}.\".format(fixed_num_predict)\n else:\n fnp_str = \"\"\n\n file_name = \"{}.bsz-{}.seqlen-{}.{}{}{}.alpha-{}.beta-{}.{}{}\".format(\n prefix, bsz_per_host, seq_len, reuse_len_str, uncased_str, bi_data_str,\n mask_alpha, mask_beta, fnp_str, suffix)\n\n return file_name","function_tokens":["def","format_filename","(","prefix",",","bsz_per_host",",","seq_len",",","bi_data",",","suffix",",","mask_alpha","=","5",",","mask_beta","=","1",",","reuse_len","=","None",",","uncased","=","False",",","fixed_num_predict","=","None",")",":","if","reuse_len","is","None",":","reuse_len_str","=","\"\"","else",":","reuse_len_str","=","\"reuse-{}.\"",".","format","(","reuse_len",")","if","not","uncased",":","uncased_str","=","\"\"","else",":","uncased_str","=","\"uncased.\"","if","bi_data",":","bi_data_str","=","\"bi\"","else",":","bi_data_str","=","\"uni\"","if","fixed_num_predict","is","not","None",":","fnp_str","=","\"fnp-{}.\"",".","format","(","fixed_num_predict",")","else",":","fnp_str","=","\"\"","file_name","=","\"{}.bsz-{}.seqlen-{}.{}{}{}.alpha-{}.beta-{}.{}{}\"",".","format","(","prefix",",","bsz_per_host",",","seq_len",",","reuse_len_str",",","uncased_str",",","bi_data_str",",","mask_alpha",",","mask_beta",",","fnp_str",",","suffix",")","return","file_name"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/data_utils.py#L51-L76"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/data_utils.py","language":"python","identifier":"_split_a_and_b","parameters":"(data, sent_ids, begin_idx, tot_len, extend_target=False)","argument_list":"","return_statement":"return ret","docstring":"Split two segments from `data` starting from the index `begin_idx`.","docstring_summary":"Split two segments from `data` starting from the index `begin_idx`.","docstring_tokens":["Split","two","segments","from","data","starting","from","the","index","begin_idx","."],"function":"def _split_a_and_b(data, sent_ids, begin_idx, tot_len, extend_target=False):\n \"\"\"Split two segments from `data` starting from the index `begin_idx`.\"\"\"\n\n data_len = data.shape[0]\n if begin_idx + tot_len >= data_len:\n tf.logging.info(\"[_split_a_and_b] returns None: \"\n \"begin_idx %d + tot_len %d >= data_len %d\",\n begin_idx, tot_len, data_len)\n return None\n\n end_idx = begin_idx + 1\n cut_points = []\n while end_idx < data_len:\n if sent_ids[end_idx] != sent_ids[end_idx - 1]:\n if end_idx - begin_idx >= tot_len: break\n cut_points.append(end_idx)\n end_idx += 1\n\n a_begin = begin_idx\n if len(cut_points) == 0 or random.random() < 0.5:\n label = 0\n if len(cut_points) == 0:\n a_end = end_idx\n else:\n a_end = random.choice(cut_points)\n\n b_len = max(1, tot_len - (a_end - a_begin))\n # (zihang): `data_len - 1` to account for extend_target\n b_begin = random.randint(0, data_len - 1 - b_len)\n b_end = b_begin + b_len\n while b_begin > 0 and sent_ids[b_begin - 1] == sent_ids[b_begin]:\n b_begin -= 1\n # (zihang): `data_len - 1` to account for extend_target\n while b_end < data_len - 1 and sent_ids[b_end - 1] == sent_ids[b_end]:\n b_end += 1\n\n new_begin = a_end\n else:\n label = 1\n a_end = random.choice(cut_points)\n b_begin = a_end\n b_end = end_idx\n\n new_begin = b_end\n\n while a_end - a_begin + b_end - b_begin > tot_len:\n if a_end - a_begin > b_end - b_begin:\n # delete the right side only for the LM objective\n a_end -= 1\n else:\n b_end -= 1\n\n ret = [data[a_begin: a_end], data[b_begin: b_end], label, new_begin]\n\n if extend_target:\n if a_end >= data_len or b_end >= data_len:\n tf.logging.info(\"[_split_a_and_b] returns None: \"\n \"a_end %d or b_end %d >= data_len %d\",\n a_end, b_end, data_len)\n return None\n a_target = data[a_begin + 1: a_end + 1]\n b_target = data[b_begin: b_end + 1]\n ret.extend([a_target, b_target])\n\n return ret","function_tokens":["def","_split_a_and_b","(","data",",","sent_ids",",","begin_idx",",","tot_len",",","extend_target","=","False",")",":","data_len","=","data",".","shape","[","0","]","if","begin_idx","+","tot_len",">=","data_len",":","tf",".","logging",".","info","(","\"[_split_a_and_b] returns None: \"","\"begin_idx %d + tot_len %d >= data_len %d\"",",","begin_idx",",","tot_len",",","data_len",")","return","None","end_idx","=","begin_idx","+","1","cut_points","=","[","]","while","end_idx","<","data_len",":","if","sent_ids","[","end_idx","]","!=","sent_ids","[","end_idx","-","1","]",":","if","end_idx","-","begin_idx",">=","tot_len",":","break","cut_points",".","append","(","end_idx",")","end_idx","+=","1","a_begin","=","begin_idx","if","len","(","cut_points",")","==","0","or","random",".","random","(",")","<","0.5",":","label","=","0","if","len","(","cut_points",")","==","0",":","a_end","=","end_idx","else",":","a_end","=","random",".","choice","(","cut_points",")","b_len","=","max","(","1",",","tot_len","-","(","a_end","-","a_begin",")",")","# (zihang): `data_len - 1` to account for extend_target","b_begin","=","random",".","randint","(","0",",","data_len","-","1","-","b_len",")","b_end","=","b_begin","+","b_len","while","b_begin",">","0","and","sent_ids","[","b_begin","-","1","]","==","sent_ids","[","b_begin","]",":","b_begin","-=","1","# (zihang): `data_len - 1` to account for extend_target","while","b_end","<","data_len","-","1","and","sent_ids","[","b_end","-","1","]","==","sent_ids","[","b_end","]",":","b_end","+=","1","new_begin","=","a_end","else",":","label","=","1","a_end","=","random",".","choice","(","cut_points",")","b_begin","=","a_end","b_end","=","end_idx","new_begin","=","b_end","while","a_end","-","a_begin","+","b_end","-","b_begin",">","tot_len",":","if","a_end","-","a_begin",">","b_end","-","b_begin",":","# delete the right side only for the LM objective","a_end","-=","1","else",":","b_end","-=","1","ret","=","[","data","[","a_begin",":","a_end","]",",","data","[","b_begin",":","b_end","]",",","label",",","new_begin","]","if","extend_target",":","if","a_end",">=","data_len","or","b_end",">=","data_len",":","tf",".","logging",".","info","(","\"[_split_a_and_b] returns None: \"","\"a_end %d or b_end %d >= data_len %d\"",",","a_end",",","b_end",",","data_len",")","return","None","a_target","=","data","[","a_begin","+","1",":","a_end","+","1","]","b_target","=","data","[","b_begin",":","b_end","+","1","]","ret",".","extend","(","[","a_target",",","b_target","]",")","return","ret"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/data_utils.py#L255-L319"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/data_utils.py","language":"python","identifier":"_sample_mask","parameters":"(sp, seg, reverse=False, max_gram=5, goal_num_predict=None)","argument_list":"","return_statement":"return mask","docstring":"Sample `goal_num_predict` tokens for partial prediction.\n About `mask_beta` tokens are chosen in a context of `mask_alpha` tokens.","docstring_summary":"Sample `goal_num_predict` tokens for partial prediction.\n About `mask_beta` tokens are chosen in a context of `mask_alpha` tokens.","docstring_tokens":["Sample","goal_num_predict","tokens","for","partial","prediction",".","About","mask_beta","tokens","are","chosen","in","a","context","of","mask_alpha","tokens","."],"function":"def _sample_mask(sp, seg, reverse=False, max_gram=5, goal_num_predict=None):\n \"\"\"Sample `goal_num_predict` tokens for partial prediction.\n About `mask_beta` tokens are chosen in a context of `mask_alpha` tokens.\"\"\"\n\n seg_len = len(seg)\n mask = np.array([False] * seg_len, dtype=np.bool)\n\n num_predict = 0\n\n ngrams = np.arange(1, max_gram + 1, dtype=np.int64)\n pvals = 1. \/ np.arange(1, max_gram + 1)\n pvals \/= pvals.sum(keepdims=True)\n\n if reverse:\n seg = np.flip(seg, 0)\n\n cur_len = 0\n while cur_len < seg_len:\n if goal_num_predict is not None and num_predict >= goal_num_predict: break\n\n n = np.random.choice(ngrams, p=pvals)\n if goal_num_predict is not None:\n n = min(n, goal_num_predict - num_predict)\n ctx_size = (n * FLAGS.mask_alpha) \/\/ FLAGS.mask_beta\n l_ctx = np.random.choice(ctx_size)\n r_ctx = ctx_size - l_ctx\n\n # Find the start position of a complete token\n beg = cur_len + l_ctx\n while beg < seg_len and not _is_start_piece(sp.IdToPiece(seg[beg].item())):\n beg += 1\n if beg >= seg_len:\n break\n\n # Find the end position of the n-gram (start pos of the n+1-th gram)\n end = beg + 1\n cnt_ngram = 1\n while end < seg_len:\n if _is_start_piece(sp.IdToPiece(seg[beg].item())):\n cnt_ngram += 1\n if cnt_ngram > n:\n break\n end += 1\n if end >= seg_len:\n break\n\n # Update\n mask[beg:end] = True\n num_predict += end - beg\n\n cur_len = end + r_ctx\n\n while goal_num_predict is not None and num_predict < goal_num_predict:\n i = np.random.randint(seg_len)\n if not mask[i]:\n mask[i] = True\n num_predict += 1\n\n if reverse:\n mask = np.flip(mask, 0)\n\n return mask","function_tokens":["def","_sample_mask","(","sp",",","seg",",","reverse","=","False",",","max_gram","=","5",",","goal_num_predict","=","None",")",":","seg_len","=","len","(","seg",")","mask","=","np",".","array","(","[","False","]","*","seg_len",",","dtype","=","np",".","bool",")","num_predict","=","0","ngrams","=","np",".","arange","(","1",",","max_gram","+","1",",","dtype","=","np",".","int64",")","pvals","=","1.","\/","np",".","arange","(","1",",","max_gram","+","1",")","pvals","\/=","pvals",".","sum","(","keepdims","=","True",")","if","reverse",":","seg","=","np",".","flip","(","seg",",","0",")","cur_len","=","0","while","cur_len","<","seg_len",":","if","goal_num_predict","is","not","None","and","num_predict",">=","goal_num_predict",":","break","n","=","np",".","random",".","choice","(","ngrams",",","p","=","pvals",")","if","goal_num_predict","is","not","None",":","n","=","min","(","n",",","goal_num_predict","-","num_predict",")","ctx_size","=","(","n","*","FLAGS",".","mask_alpha",")","\/\/","FLAGS",".","mask_beta","l_ctx","=","np",".","random",".","choice","(","ctx_size",")","r_ctx","=","ctx_size","-","l_ctx","# Find the start position of a complete token","beg","=","cur_len","+","l_ctx","while","beg","<","seg_len","and","not","_is_start_piece","(","sp",".","IdToPiece","(","seg","[","beg","]",".","item","(",")",")",")",":","beg","+=","1","if","beg",">=","seg_len",":","break","# Find the end position of the n-gram (start pos of the n+1-th gram)","end","=","beg","+","1","cnt_ngram","=","1","while","end","<","seg_len",":","if","_is_start_piece","(","sp",".","IdToPiece","(","seg","[","beg","]",".","item","(",")",")",")",":","cnt_ngram","+=","1","if","cnt_ngram",">","n",":","break","end","+=","1","if","end",">=","seg_len",":","break","# Update","mask","[","beg",":","end","]","=","True","num_predict","+=","end","-","beg","cur_len","=","end","+","r_ctx","while","goal_num_predict","is","not","None","and","num_predict","<","goal_num_predict",":","i","=","np",".","random",".","randint","(","seg_len",")","if","not","mask","[","i","]",":","mask","[","i","]","=","True","num_predict","+=","1","if","reverse",":","mask","=","np",".","flip","(","mask",",","0",")","return","mask"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/data_utils.py#L331-L392"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/data_utils.py","language":"python","identifier":"_convert_example","parameters":"(example, use_bfloat16)","argument_list":"","return_statement":"","docstring":"Cast int64 into int32 and float32 to bfloat16 if use_bfloat16.","docstring_summary":"Cast int64 into int32 and float32 to bfloat16 if use_bfloat16.","docstring_tokens":["Cast","int64","into","int32","and","float32","to","bfloat16","if","use_bfloat16","."],"function":"def _convert_example(example, use_bfloat16):\n \"\"\"Cast int64 into int32 and float32 to bfloat16 if use_bfloat16.\"\"\"\n for key in list(example.keys()):\n val = example[key]\n if tf.keras.backend.is_sparse(val):\n val = tf.sparse.to_dense(val)\n if val.dtype == tf.int64:\n val = tf.cast(val, tf.int32)\n if use_bfloat16 and val.dtype == tf.float32:\n val = tf.cast(val, tf.bfloat16)\n\n example[key] = val","function_tokens":["def","_convert_example","(","example",",","use_bfloat16",")",":","for","key","in","list","(","example",".","keys","(",")",")",":","val","=","example","[","key","]","if","tf",".","keras",".","backend",".","is_sparse","(","val",")",":","val","=","tf",".","sparse",".","to_dense","(","val",")","if","val",".","dtype","==","tf",".","int64",":","val","=","tf",".","cast","(","val",",","tf",".","int32",")","if","use_bfloat16","and","val",".","dtype","==","tf",".","float32",":","val","=","tf",".","cast","(","val",",","tf",".","bfloat16",")","example","[","key","]","=","val"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/data_utils.py#L531-L542"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/data_utils.py","language":"python","identifier":"_local_perm","parameters":"(inputs, targets, is_masked, perm_size, seq_len)","argument_list":"","return_statement":"return perm_mask, new_targets, target_mask, inputs_k, inputs_q","docstring":"Sample a permutation of the factorization order, and create an\n attention mask accordingly.\n\n Args:\n inputs: int64 Tensor in shape [seq_len], input ids.\n targets: int64 Tensor in shape [seq_len], target ids.\n is_masked: bool Tensor in shape [seq_len]. True means being selected\n for partial prediction.\n perm_size: the length of longest permutation. Could be set to be reuse_len.\n Should not be larger than reuse_len or there will be data leaks.\n seq_len: int, sequence length.","docstring_summary":"Sample a permutation of the factorization order, and create an\n attention mask accordingly.","docstring_tokens":["Sample","a","permutation","of","the","factorization","order","and","create","an","attention","mask","accordingly","."],"function":"def _local_perm(inputs, targets, is_masked, perm_size, seq_len):\n \"\"\"\n Sample a permutation of the factorization order, and create an\n attention mask accordingly.\n\n Args:\n inputs: int64 Tensor in shape [seq_len], input ids.\n targets: int64 Tensor in shape [seq_len], target ids.\n is_masked: bool Tensor in shape [seq_len]. True means being selected\n for partial prediction.\n perm_size: the length of longest permutation. Could be set to be reuse_len.\n Should not be larger than reuse_len or there will be data leaks.\n seq_len: int, sequence length.\n \"\"\"\n\n # Generate permutation indices\n index = tf.range(seq_len, dtype=tf.int64)\n index = tf.transpose(tf.reshape(index, [-1, perm_size]))\n index = tf.random_shuffle(index)\n index = tf.reshape(tf.transpose(index), [-1])\n\n # `perm_mask` and `target_mask`\n # non-functional tokens\n non_func_tokens = tf.logical_not(tf.logical_or(\n tf.equal(inputs, SEP_ID),\n tf.equal(inputs, CLS_ID)))\n\n non_mask_tokens = tf.logical_and(tf.logical_not(is_masked), non_func_tokens)\n masked_or_func_tokens = tf.logical_not(non_mask_tokens)\n\n # Set the permutation indices of non-masked (& non-funcional) tokens to the\n # smallest index (-1):\n # (1) they can be seen by all other positions\n # (2) they cannot see masked positions, so there won\"t be information leak\n smallest_index = -tf.ones([seq_len], dtype=tf.int64)\n rev_index = tf.where(non_mask_tokens, smallest_index, index)\n\n # Create `target_mask`: non-funcional and maksed tokens\n # 1: use mask as input and have loss\n # 0: use token (or [SEP], [CLS]) as input and do not have loss\n target_tokens = tf.logical_and(masked_or_func_tokens, non_func_tokens)\n target_mask = tf.cast(target_tokens, tf.float32)\n\n # Create `perm_mask`\n # `target_tokens` cannot see themselves\n self_rev_index = tf.where(target_tokens, rev_index, rev_index + 1)\n\n # 1: cannot attend if i <= j and j is not non-masked (masked_or_func_tokens)\n # 0: can attend if i > j or j is non-masked\n perm_mask = tf.logical_and(\n self_rev_index[:, None] <= rev_index[None, :],\n masked_or_func_tokens)\n perm_mask = tf.cast(perm_mask, tf.float32)\n\n # new target: [next token] for LM and [curr token] (self) for PLM\n new_targets = tf.concat([inputs[0: 1], targets[: -1]],\n axis=0)\n\n # construct inputs_k\n inputs_k = inputs\n\n # construct inputs_q\n inputs_q = target_mask\n\n return perm_mask, new_targets, target_mask, inputs_k, inputs_q","function_tokens":["def","_local_perm","(","inputs",",","targets",",","is_masked",",","perm_size",",","seq_len",")",":","# Generate permutation indices","index","=","tf",".","range","(","seq_len",",","dtype","=","tf",".","int64",")","index","=","tf",".","transpose","(","tf",".","reshape","(","index",",","[","-","1",",","perm_size","]",")",")","index","=","tf",".","random_shuffle","(","index",")","index","=","tf",".","reshape","(","tf",".","transpose","(","index",")",",","[","-","1","]",")","# `perm_mask` and `target_mask`","# non-functional tokens","non_func_tokens","=","tf",".","logical_not","(","tf",".","logical_or","(","tf",".","equal","(","inputs",",","SEP_ID",")",",","tf",".","equal","(","inputs",",","CLS_ID",")",")",")","non_mask_tokens","=","tf",".","logical_and","(","tf",".","logical_not","(","is_masked",")",",","non_func_tokens",")","masked_or_func_tokens","=","tf",".","logical_not","(","non_mask_tokens",")","# Set the permutation indices of non-masked (& non-funcional) tokens to the","# smallest index (-1):","# (1) they can be seen by all other positions","# (2) they cannot see masked positions, so there won\"t be information leak","smallest_index","=","-","tf",".","ones","(","[","seq_len","]",",","dtype","=","tf",".","int64",")","rev_index","=","tf",".","where","(","non_mask_tokens",",","smallest_index",",","index",")","# Create `target_mask`: non-funcional and maksed tokens","# 1: use mask as input and have loss","# 0: use token (or [SEP], [CLS]) as input and do not have loss","target_tokens","=","tf",".","logical_and","(","masked_or_func_tokens",",","non_func_tokens",")","target_mask","=","tf",".","cast","(","target_tokens",",","tf",".","float32",")","# Create `perm_mask`","# `target_tokens` cannot see themselves","self_rev_index","=","tf",".","where","(","target_tokens",",","rev_index",",","rev_index","+","1",")","# 1: cannot attend if i <= j and j is not non-masked (masked_or_func_tokens)","# 0: can attend if i > j or j is non-masked","perm_mask","=","tf",".","logical_and","(","self_rev_index","[",":",",","None","]","<=","rev_index","[","None",",",":","]",",","masked_or_func_tokens",")","perm_mask","=","tf",".","cast","(","perm_mask",",","tf",".","float32",")","# new target: [next token] for LM and [curr token] (self) for PLM","new_targets","=","tf",".","concat","(","[","inputs","[","0",":","1","]",",","targets","[",":","-","1","]","]",",","axis","=","0",")","# construct inputs_k","inputs_k","=","inputs","# construct inputs_q","inputs_q","=","target_mask","return","perm_mask",",","new_targets",",","target_mask",",","inputs_k",",","inputs_q"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/data_utils.py#L579-L643"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/model_utils.py","language":"python","identifier":"get_assignment_map_from_checkpoint","parameters":"(tvars, init_checkpoint)","argument_list":"","return_statement":"return (assignment_map, initialized_variable_names)","docstring":"Compute the union of the current variables and checkpoint variables.","docstring_summary":"Compute the union of the current variables and checkpoint variables.","docstring_tokens":["Compute","the","union","of","the","current","variables","and","checkpoint","variables","."],"function":"def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n # tf.logging.info('original name: %s', name)\n if name not in name_to_variable:\n continue\n # assignment_map[name] = name\n assignment_map[name] = name_to_variable[name]\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)","function_tokens":["def","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")",":","assignment_map","=","{","}","initialized_variable_names","=","{","}","name_to_variable","=","collections",".","OrderedDict","(",")","for","var","in","tvars",":","name","=","var",".","name","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","name",")","if","m","is","not","None",":","name","=","m",".","group","(","1",")","name_to_variable","[","name","]","=","var","init_vars","=","tf",".","train",".","list_variables","(","init_checkpoint",")","assignment_map","=","collections",".","OrderedDict","(",")","for","x","in","init_vars",":","(","name",",","var",")","=","(","x","[","0","]",",","x","[","1","]",")","# tf.logging.info('original name: %s', name)","if","name","not","in","name_to_variable",":","continue","# assignment_map[name] = name","assignment_map","[","name","]","=","name_to_variable","[","name","]","initialized_variable_names","[","name","]","=","1","initialized_variable_names","[","name","+","\":0\"","]","=","1","return","(","assignment_map",",","initialized_variable_names",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/model_utils.py#L266-L292"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/model_utils.py","language":"python","identifier":"AdamWeightDecayOptimizer.__init__","parameters":"(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n include_in_weight_decay=[\"r_s_bias\", \"r_r_bias\", \"r_w_bias\"],\n name=\"AdamWeightDecayOptimizer\")","argument_list":"","return_statement":"","docstring":"Constructs a AdamWeightDecayOptimizer.","docstring_summary":"Constructs a AdamWeightDecayOptimizer.","docstring_tokens":["Constructs","a","AdamWeightDecayOptimizer","."],"function":"def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n include_in_weight_decay=[\"r_s_bias\", \"r_r_bias\", \"r_w_bias\"],\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n self.include_in_weight_decay = include_in_weight_decay","function_tokens":["def","__init__","(","self",",","learning_rate",",","weight_decay_rate","=","0.0",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","None",",","include_in_weight_decay","=","[","\"r_s_bias\"",",","\"r_r_bias\"",",","\"r_w_bias\"","]",",","name","=","\"AdamWeightDecayOptimizer\"",")",":","super","(","AdamWeightDecayOptimizer",",","self",")",".","__init__","(","False",",","name",")","self",".","learning_rate","=","learning_rate","self",".","weight_decay_rate","=","weight_decay_rate","self",".","beta_1","=","beta_1","self",".","beta_2","=","beta_2","self",".","epsilon","=","epsilon","self",".","exclude_from_weight_decay","=","exclude_from_weight_decay","self",".","include_in_weight_decay","=","include_in_weight_decay"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/model_utils.py#L298-L316"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/model_utils.py","language":"python","identifier":"AdamWeightDecayOptimizer.apply_gradients","parameters":"(self, grads_and_vars, global_step=None, name=None)","argument_list":"","return_statement":"return tf.group(*assignments, name=name)","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"\/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"\/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m \/ (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization\/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m\/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n\n return tf.group(*assignments, name=name)","function_tokens":["def","apply_gradients","(","self",",","grads_and_vars",",","global_step","=","None",",","name","=","None",")",":","assignments","=","[","]","for","(","grad",",","param",")","in","grads_and_vars",":","if","grad","is","None","or","param","is","None",":","continue","param_name","=","self",".","_get_variable_name","(","param",".","name",")","m","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_m\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","v","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_v\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","# Standard Adam update.","next_m","=","(","tf",".","multiply","(","self",".","beta_1",",","m",")","+","tf",".","multiply","(","1.0","-","self",".","beta_1",",","grad",")",")","next_v","=","(","tf",".","multiply","(","self",".","beta_2",",","v",")","+","tf",".","multiply","(","1.0","-","self",".","beta_2",",","tf",".","square","(","grad",")",")",")","update","=","next_m","\/","(","tf",".","sqrt","(","next_v",")","+","self",".","epsilon",")","# Just adding the square of the weights to the loss function is *not*","# the correct way of using L2 regularization\/weight decay with Adam,","# since that will interact with the m and v parameters in strange ways.","#","# Instead we want ot decay the weights in a manner that doesn't interact","# with the m\/v parameters. This is equivalent to adding the square","# of the weights to the loss with plain (non-momentum) SGD.","if","self",".","_do_use_weight_decay","(","param_name",")",":","update","+=","self",".","weight_decay_rate","*","param","update_with_lr","=","self",".","learning_rate","*","update","next_param","=","param","-","update_with_lr","assignments",".","extend","(","[","param",".","assign","(","next_param",")",",","m",".","assign","(","next_m",")",",","v",".","assign","(","next_v",")","]",")","return","tf",".","group","(","*","assignments",",","name","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/model_utils.py#L318-L368"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/model_utils.py","language":"python","identifier":"AdamWeightDecayOptimizer._do_use_weight_decay","parameters":"(self, param_name)","argument_list":"","return_statement":"return True","docstring":"Whether to use L2 weight decay for `param_name`.","docstring_summary":"Whether to use L2 weight decay for `param_name`.","docstring_tokens":["Whether","to","use","L2","weight","decay","for","param_name","."],"function":"def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n for r in self.include_in_weight_decay:\n if re.search(r, param_name) is not None:\n return True\n\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Adam WD excludes {}'.format(param_name))\n return False\n return True","function_tokens":["def","_do_use_weight_decay","(","self",",","param_name",")",":","if","not","self",".","weight_decay_rate",":","return","False","for","r","in","self",".","include_in_weight_decay",":","if","re",".","search","(","r",",","param_name",")","is","not","None",":","return","True","if","self",".","exclude_from_weight_decay",":","for","r","in","self",".","exclude_from_weight_decay",":","if","re",".","search","(","r",",","param_name",")","is","not","None",":","tf",".","logging",".","info","(","'Adam WD excludes {}'",".","format","(","param_name",")",")","return","False","return","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/model_utils.py#L370-L383"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/model_utils.py","language":"python","identifier":"AdamWeightDecayOptimizer._get_variable_name","parameters":"(self, param_name)","argument_list":"","return_statement":"return param_name","docstring":"Get the variable name from the tensor name.","docstring_summary":"Get the variable name from the tensor name.","docstring_tokens":["Get","the","variable","name","from","the","tensor","name","."],"function":"def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name","function_tokens":["def","_get_variable_name","(","self",",","param_name",")",":","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","param_name",")","if","m","is","not","None",":","param_name","=","m",".","group","(","1",")","return","param_name"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/model_utils.py#L385-L390"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_is_iterable","parameters":"(obj)","argument_list":"","return_statement":"","docstring":"A Python 2 and 3 compatible util to check whether `obj` is iterable.","docstring_summary":"A Python 2 and 3 compatible util to check whether `obj` is iterable.","docstring_tokens":["A","Python","2","and","3","compatible","util","to","check","whether","obj","is","iterable","."],"function":"def _is_iterable(obj):\n \"\"\"A Python 2 and 3 compatible util to check whether `obj` is iterable.\"\"\"\n try:\n iter(obj)\n return True\n except TypeError:\n return False","function_tokens":["def","_is_iterable","(","obj",")",":","try",":","iter","(","obj",")","return","True","except","TypeError",":","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L114-L120"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_create_or_get_iterations_per_loop","parameters":"()","argument_list":"","return_statement":"","docstring":"Creates or gets the iterations_per_loop variable.\n\n In TPUEstimator, the user provided computation, the model_fn, is wrapped\n inside a tf.while_loop for peak performance. The iterations of the loop are\n specified by this variable, which adjusts its value on the CPU after each TPU\n program execution and before the next TPU execution.\n\n The purpose of using a variable, rather then a constant, is to allow\n TPUEstimator adapt the TPU training iterations according to the final steps\n specified by users. For example, if the user sets the iterations_per_loop as 4\n in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop\n variable will have the following value before each TPU training.\n\n - 1-th TPU execution: iterations_per_loop = 4\n - 2-th TPU execution: iterations_per_loop = 4\n - 3-th TPU execution: iterations_per_loop = 2\n\n As model_fn increases the global step once per train_op invocation, the global\n step is 10 after all TPU executions, matching the steps=10 inputs passed in by\n users.\n\n Returns:\n A TF non-trainable resource variable.\n\n Raises:\n RuntimeError: If multi iterations_per_loop variables were found.","docstring_summary":"Creates or gets the iterations_per_loop variable.","docstring_tokens":["Creates","or","gets","the","iterations_per_loop","variable","."],"function":"def _create_or_get_iterations_per_loop():\n \"\"\"Creates or gets the iterations_per_loop variable.\n\n In TPUEstimator, the user provided computation, the model_fn, is wrapped\n inside a tf.while_loop for peak performance. The iterations of the loop are\n specified by this variable, which adjusts its value on the CPU after each TPU\n program execution and before the next TPU execution.\n\n The purpose of using a variable, rather then a constant, is to allow\n TPUEstimator adapt the TPU training iterations according to the final steps\n specified by users. For example, if the user sets the iterations_per_loop as 4\n in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop\n variable will have the following value before each TPU training.\n\n - 1-th TPU execution: iterations_per_loop = 4\n - 2-th TPU execution: iterations_per_loop = 4\n - 3-th TPU execution: iterations_per_loop = 2\n\n As model_fn increases the global step once per train_op invocation, the global\n step is 10 after all TPU executions, matching the steps=10 inputs passed in by\n users.\n\n Returns:\n A TF non-trainable resource variable.\n\n Raises:\n RuntimeError: If multi iterations_per_loop variables were found.\n \"\"\"\n graph = ops.get_default_graph()\n collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)\n iter_vars = graph.get_collection(collection_name)\n if len(iter_vars) == 1:\n return iter_vars[0]\n elif len(iter_vars) > 1:\n raise RuntimeError('Multiple iterations_per_loop_var in collection.')\n\n with ops.colocate_with(training_util.get_global_step()):\n with variable_scope.variable_scope(\n _TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):\n return variable_scope.get_variable(\n _ITERATIONS_PER_LOOP_VAR,\n initializer=init_ops.zeros_initializer(),\n shape=[],\n dtype=dtypes.int32,\n trainable=False,\n collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],\n use_resource=True)","function_tokens":["def","_create_or_get_iterations_per_loop","(",")",":","graph","=","ops",".","get_default_graph","(",")","collection_name","=","'{}_{}'",".","format","(","_TPU_ESTIMATOR",",","_ITERATIONS_PER_LOOP_VAR",")","iter_vars","=","graph",".","get_collection","(","collection_name",")","if","len","(","iter_vars",")","==","1",":","return","iter_vars","[","0","]","elif","len","(","iter_vars",")",">","1",":","raise","RuntimeError","(","'Multiple iterations_per_loop_var in collection.'",")","with","ops",".","colocate_with","(","training_util",".","get_global_step","(",")",")",":","with","variable_scope",".","variable_scope","(","_TPU_ESTIMATOR",",","reuse","=","variable_scope",".","AUTO_REUSE",")",":","return","variable_scope",".","get_variable","(","_ITERATIONS_PER_LOOP_VAR",",","initializer","=","init_ops",".","zeros_initializer","(",")",",","shape","=","[","]",",","dtype","=","dtypes",".","int32",",","trainable","=","False",",","collections","=","[","collection_name",",","ops",".","GraphKeys",".","LOCAL_VARIABLES","]",",","use_resource","=","True",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L139-L185"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_sync_variables_ops","parameters":"(ctx)","argument_list":"","return_statement":"","docstring":"Create varriables synchronization ops.\n\n Gets the variables back from TPU nodes. This means the variables updated\n by TPU will now be *synced* to host memory.\n In BROADCAST mode, we skip this sync since the variables are ususally too\n big to transmit via RPC.\n\n Args:\n ctx: A `_InternalTPUContext` instance with mode.\n\n Returns:\n A list of sync ops.","docstring_summary":"Create varriables synchronization ops.","docstring_tokens":["Create","varriables","synchronization","ops","."],"function":"def _sync_variables_ops(ctx):\n \"\"\"Create varriables synchronization ops.\n\n Gets the variables back from TPU nodes. This means the variables updated\n by TPU will now be *synced* to host memory.\n In BROADCAST mode, we skip this sync since the variables are ususally too\n big to transmit via RPC.\n\n Args:\n ctx: A `_InternalTPUContext` instance with mode.\n\n Returns:\n A list of sync ops.\n \"\"\"\n\n if not ctx.is_input_broadcast_with_iterators():\n return [\n array_ops.check_numerics(v.read_value(),\n 'Gradient for %s is NaN' % v.name).op\n for v in variables.trainable_variables()\n ]\n else:\n return [control_flow_ops.no_op()]","function_tokens":["def","_sync_variables_ops","(","ctx",")",":","if","not","ctx",".","is_input_broadcast_with_iterators","(",")",":","return","[","array_ops",".","check_numerics","(","v",".","read_value","(",")",",","'Gradient for %s is NaN'","%","v",".","name",")",".","op","for","v","in","variables",".","trainable_variables","(",")","]","else",":","return","[","control_flow_ops",".","no_op","(",")","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L188-L210"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_increase_eval_step_op","parameters":"(iterations_per_loop)","argument_list":"","return_statement":"return state_ops.assign_add(\n eval_step,\n math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),\n use_locking=True)","docstring":"Returns an op to increase the eval step for TPU evaluation.\n\n Args:\n iterations_per_loop: Tensor. The number of eval steps running in TPU system\n before returning to CPU host for each `Session.run`.\n\n Returns:\n An operation","docstring_summary":"Returns an op to increase the eval step for TPU evaluation.","docstring_tokens":["Returns","an","op","to","increase","the","eval","step","for","TPU","evaluation","."],"function":"def _increase_eval_step_op(iterations_per_loop):\n \"\"\"Returns an op to increase the eval step for TPU evaluation.\n\n Args:\n iterations_per_loop: Tensor. The number of eval steps running in TPU system\n before returning to CPU host for each `Session.run`.\n\n Returns:\n An operation\n \"\"\"\n eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access\n # Estimator evaluate increases 1 by default. So, we increase the difference.\n return state_ops.assign_add(\n eval_step,\n math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),\n use_locking=True)","function_tokens":["def","_increase_eval_step_op","(","iterations_per_loop",")",":","eval_step","=","evaluation",".","_get_or_create_eval_step","(",")","# pylint: disable=protected-access","# Estimator evaluate increases 1 by default. So, we increase the difference.","return","state_ops",".","assign_add","(","eval_step",",","math_ops",".","cast","(","iterations_per_loop","-","1",",","dtype","=","eval_step",".","dtype",")",",","use_locking","=","True",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L213-L228"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"generate_per_core_enqueue_ops_fn_for_host","parameters":"(\n ctx, input_fn, inputs_structure_recorder, host_device, host_id)","argument_list":"","return_statement":"return enqueue_ops_fn, captured_infeed_queue","docstring":"Generates infeed enqueue ops for per-core input_fn on a single host.","docstring_summary":"Generates infeed enqueue ops for per-core input_fn on a single host.","docstring_tokens":["Generates","infeed","enqueue","ops","for","per","-","core","input_fn","on","a","single","host","."],"function":"def generate_per_core_enqueue_ops_fn_for_host(\n ctx, input_fn, inputs_structure_recorder, host_device, host_id):\n \"\"\"Generates infeed enqueue ops for per-core input_fn on a single host.\"\"\"\n captured_infeed_queue = _CapturedObject()\n tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)\n\n def enqueue_ops_fn():\n \"\"\"A fn returns enqueue_ops.\"\"\"\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n user_context = tpu_context.TPUContext(\n internal_ctx=ctx,\n input_device=host_device,\n invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal)\n inputs = _Inputs.from_input_fn(input_fn(user_context))\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)\n return per_host_enqueue_ops\n\n return enqueue_ops_fn, captured_infeed_queue","function_tokens":["def","generate_per_core_enqueue_ops_fn_for_host","(","ctx",",","input_fn",",","inputs_structure_recorder",",","host_device",",","host_id",")",":","captured_infeed_queue","=","_CapturedObject","(",")","tpu_ordinal_function_impl","=","ctx",".","tpu_ordinal_function","(","host_id",")","def","enqueue_ops_fn","(",")",":","\"\"\"A fn returns enqueue_ops.\"\"\"","num_cores_per_host","=","ctx",".","num_of_cores_per_host","per_host_sharded_inputs","=","[","]","for","core_ordinal","in","range","(","num_cores_per_host",")",":","with","ops",".","name_scope","(","'ordinal_%d'","%","(","core_ordinal",")",")",":","user_context","=","tpu_context",".","TPUContext","(","internal_ctx","=","ctx",",","input_device","=","host_device",",","invocation_index","=","host_id","*","ctx",".","num_of_cores_per_host","+","core_ordinal",")","inputs","=","_Inputs",".","from_input_fn","(","input_fn","(","user_context",")",")","if","inputs",".","is_dataset",":","raise","TypeError","(","'`input_fn` returning `Dataset` is not yet supported in '","'per-Core input pipeline deployment yet. Please set '","'TPUConfig.per_host_input_for_training to True or return '","'`features` and `labels` from `input_fn`'",")","features",",","labels","=","inputs",".","features_and_labels","(",")","inputs_structure_recorder",".","validate_and_record_structure","(","features",",","labels",")","flattened_inputs","=","(","inputs_structure_recorder",".","flatten_features_and_labels","(","features",",","labels",")",")","per_host_sharded_inputs",".","append","(","flattened_inputs",")","infeed_queue","=","tpu_feed",".","InfeedQueue","(","number_of_tuple_elements","=","len","(","per_host_sharded_inputs","[","0","]",")",")","captured_infeed_queue",".","capture","(","infeed_queue",")","per_host_enqueue_ops","=","infeed_queue",".","generate_enqueue_ops","(","per_host_sharded_inputs",",","tpu_ordinal_function","=","tpu_ordinal_function_impl",")","return","per_host_enqueue_ops","return","enqueue_ops_fn",",","captured_infeed_queue"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L703-L743"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"generate_per_host_enqueue_ops_fn_for_host","parameters":"(\n ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id)","argument_list":"","return_statement":"return enqueue_ops_fn, captured_infeed_queue, dataset_initializer","docstring":"Generates infeed enqueue ops for per-host input_fn on a single host.","docstring_summary":"Generates infeed enqueue ops for per-host input_fn on a single host.","docstring_tokens":["Generates","infeed","enqueue","ops","for","per","-","host","input_fn","on","a","single","host","."],"function":"def generate_per_host_enqueue_ops_fn_for_host(\n ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):\n \"\"\"Generates infeed enqueue ops for per-host input_fn on a single host.\"\"\"\n captured_infeed_queue = _CapturedObject()\n\n dataset_initializer = None\n\n with ops.device(device):\n user_context = tpu_context.TPUContext(\n internal_ctx=ctx, input_device=device, invocation_index=host_id)\n inputs = _Inputs.from_input_fn(input_fn(user_context))\n\n is_dataset = inputs.is_dataset\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n if not is_dataset:\n raise TypeError(\n 'For mode PREDICT, `input_fn` must return `Dataset` instead of '\n '`features` and `labels`.')\n if batch_axis is not None:\n raise TypeError('For mode PREDICT, batch_axis is not supported yet.')\n inputs = _InputsWithStoppingSignals(\n dataset=inputs.dataset,\n batch_size=ctx.batch_size_for_input_fn,\n add_padding=True)\n\n if is_dataset:\n dataset_initializer = inputs.dataset_initializer()\n\n tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)\n\n def enqueue_ops_fn():\n \"\"\"A Fn returning the TPU infeed enqueue ops.\n\n By providing as a Fn, it can be invoked inside the tf.while_loop such that\n the input pipeline for multiple iterations can be executed by one\n Session.run call.\n\n Returns:\n list of dict of ops.\n \"\"\"\n with ops.device(device):\n num_of_replicas_per_host = ctx.num_of_replicas_per_host\n # Convert user input to features and labels. If the user returns a\n # dataset, it is initialized and the features and labels extracted via\n # `dataset.iterator.get_next()`\n features, labels = inputs.features_and_labels()\n signals = inputs.signals()\n\n inputs_structure_recorder.validate_and_record_structure(features, labels)\n unsharded_tensor_list = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels, signals))\n\n infeed_queue = tpu_feed.InfeedQueue(\n tuple_types=[t.dtype for t in unsharded_tensor_list],\n tuple_shapes=[t.shape for t in unsharded_tensor_list],\n shard_dimensions=batch_axis)\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_number_of_shards(num_of_replicas_per_host)\n per_host_enqueue_ops = (\n infeed_queue.split_inputs_and_generate_enqueue_ops(\n unsharded_tensor_list,\n placement_function=lambda x: device,\n tpu_ordinal_function=tpu_ordinal_function_impl))\n if signals is None:\n return per_host_enqueue_ops\n else:\n return {\n 'ops': per_host_enqueue_ops,\n 'signals': signals,\n }\n\n return enqueue_ops_fn, captured_infeed_queue, dataset_initializer","function_tokens":["def","generate_per_host_enqueue_ops_fn_for_host","(","ctx",",","input_fn",",","inputs_structure_recorder",",","batch_axis",",","device",",","host_id",")",":","captured_infeed_queue","=","_CapturedObject","(",")","dataset_initializer","=","None","with","ops",".","device","(","device",")",":","user_context","=","tpu_context",".","TPUContext","(","internal_ctx","=","ctx",",","input_device","=","device",",","invocation_index","=","host_id",")","inputs","=","_Inputs",".","from_input_fn","(","input_fn","(","user_context",")",")","is_dataset","=","inputs",".","is_dataset","if","ctx",".","mode","==","model_fn_lib",".","ModeKeys",".","PREDICT",":","if","not","is_dataset",":","raise","TypeError","(","'For mode PREDICT, `input_fn` must return `Dataset` instead of '","'`features` and `labels`.'",")","if","batch_axis","is","not","None",":","raise","TypeError","(","'For mode PREDICT, batch_axis is not supported yet.'",")","inputs","=","_InputsWithStoppingSignals","(","dataset","=","inputs",".","dataset",",","batch_size","=","ctx",".","batch_size_for_input_fn",",","add_padding","=","True",")","if","is_dataset",":","dataset_initializer","=","inputs",".","dataset_initializer","(",")","tpu_ordinal_function_impl","=","ctx",".","tpu_ordinal_function","(","host_id",")","def","enqueue_ops_fn","(",")",":","\"\"\"A Fn returning the TPU infeed enqueue ops.\n\n By providing as a Fn, it can be invoked inside the tf.while_loop such that\n the input pipeline for multiple iterations can be executed by one\n Session.run call.\n\n Returns:\n list of dict of ops.\n \"\"\"","with","ops",".","device","(","device",")",":","num_of_replicas_per_host","=","ctx",".","num_of_replicas_per_host","# Convert user input to features and labels. If the user returns a","# dataset, it is initialized and the features and labels extracted via","# `dataset.iterator.get_next()`","features",",","labels","=","inputs",".","features_and_labels","(",")","signals","=","inputs",".","signals","(",")","inputs_structure_recorder",".","validate_and_record_structure","(","features",",","labels",")","unsharded_tensor_list","=","(","inputs_structure_recorder",".","flatten_features_and_labels","(","features",",","labels",",","signals",")",")","infeed_queue","=","tpu_feed",".","InfeedQueue","(","tuple_types","=","[","t",".","dtype","for","t","in","unsharded_tensor_list","]",",","tuple_shapes","=","[","t",".","shape","for","t","in","unsharded_tensor_list","]",",","shard_dimensions","=","batch_axis",")","captured_infeed_queue",".","capture","(","infeed_queue",")","infeed_queue",".","set_number_of_shards","(","num_of_replicas_per_host",")","per_host_enqueue_ops","=","(","infeed_queue",".","split_inputs_and_generate_enqueue_ops","(","unsharded_tensor_list",",","placement_function","=","lambda","x",":","device",",","tpu_ordinal_function","=","tpu_ordinal_function_impl",")",")","if","signals","is","None",":","return","per_host_enqueue_ops","else",":","return","{","'ops'",":","per_host_enqueue_ops",",","'signals'",":","signals",",","}","return","enqueue_ops_fn",",","captured_infeed_queue",",","dataset_initializer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L746-L818"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"generate_per_host_v2_enqueue_ops_fn_for_host","parameters":"(\n ctx, input_fn, inputs_structure_recorder, device, host_id)","argument_list":"","return_statement":"return enqueue_ops_fn, captured_infeed_queue, dataset_initializer","docstring":"Generates infeed enqueue ops for per-host input_fn on a single host.","docstring_summary":"Generates infeed enqueue ops for per-host input_fn on a single host.","docstring_tokens":["Generates","infeed","enqueue","ops","for","per","-","host","input_fn","on","a","single","host","."],"function":"def generate_per_host_v2_enqueue_ops_fn_for_host(\n ctx, input_fn, inputs_structure_recorder, device, host_id):\n \"\"\"Generates infeed enqueue ops for per-host input_fn on a single host.\"\"\"\n captured_infeed_queue = _CapturedObject()\n dataset_initializer = None\n\n with ops.device(device):\n user_context = tpu_context.TPUContext(\n internal_ctx=ctx, input_device=device, invocation_index=host_id)\n inputs = _Inputs.from_input_fn(input_fn(user_context))\n\n is_dataset = inputs.is_dataset\n if not is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '\n 'input pipeline configuration.')\n\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n inputs = _InputsWithStoppingSignals(\n dataset=inputs.dataset,\n batch_size=ctx.batch_size_for_input_fn,\n add_padding=True,\n num_invocations_per_step=ctx.num_of_replicas_per_host)\n\n dataset_initializer = inputs.dataset_initializer()\n tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)\n\n def enqueue_ops_fn():\n \"\"\"Generates the per_host enqueue ops.\"\"\"\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n cached_signals = None\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n with ops.control_dependencies(control_deps):\n features, labels = inputs.features_and_labels() # Calls get_next()\n signals = inputs.signals()\n\n # All the replicas share the replica 0's stopping singal.\n # This avoids inconsistent state among different model replcias.\n if cached_signals:\n signals['stopping'] = cached_signals['stopping']\n else:\n cached_signals = signals\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels, signals))\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n if inputs_structure_recorder.flattened_input_dims:\n input_partition_dims = inputs_structure_recorder.flattened_input_dims\n if signals:\n input_partition_dims += [None] * len(signals)\n # pylint: disable=protected-access\n infeed_queue = tpu_feed._PartitionedInfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]),\n host_id=host_id,\n input_partition_dims=input_partition_dims,\n device_assignment=ctx.device_assignment)\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs)\n else:\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs,\n tpu_ordinal_function=tpu_ordinal_function_impl)\n captured_infeed_queue.capture(infeed_queue)\n\n if signals is None:\n return per_host_enqueue_ops\n else:\n return {\n 'ops': per_host_enqueue_ops,\n 'signals': signals,\n }\n\n return enqueue_ops_fn, captured_infeed_queue, dataset_initializer","function_tokens":["def","generate_per_host_v2_enqueue_ops_fn_for_host","(","ctx",",","input_fn",",","inputs_structure_recorder",",","device",",","host_id",")",":","captured_infeed_queue","=","_CapturedObject","(",")","dataset_initializer","=","None","with","ops",".","device","(","device",")",":","user_context","=","tpu_context",".","TPUContext","(","internal_ctx","=","ctx",",","input_device","=","device",",","invocation_index","=","host_id",")","inputs","=","_Inputs",".","from_input_fn","(","input_fn","(","user_context",")",")","is_dataset","=","inputs",".","is_dataset","if","not","is_dataset",":","raise","TypeError","(","'`input_fn` must return a `Dataset` for the PER_HOST_V2 '","'input pipeline configuration.'",")","if","ctx",".","mode","==","model_fn_lib",".","ModeKeys",".","PREDICT",":","inputs","=","_InputsWithStoppingSignals","(","dataset","=","inputs",".","dataset",",","batch_size","=","ctx",".","batch_size_for_input_fn",",","add_padding","=","True",",","num_invocations_per_step","=","ctx",".","num_of_replicas_per_host",")","dataset_initializer","=","inputs",".","dataset_initializer","(",")","tpu_ordinal_function_impl","=","ctx",".","tpu_ordinal_function","(","host_id",")","def","enqueue_ops_fn","(",")",":","\"\"\"Generates the per_host enqueue ops.\"\"\"","control_deps","=","[","]","per_host_sharded_inputs","=","[","]","num_replicas_per_host","=","ctx",".","num_of_replicas_per_host","cached_signals","=","None","with","ops",".","device","(","device",")",":","if","not","inputs",".","is_dataset",":","raise","TypeError","(","'`input_fn` must return a `Dataset` for this mode.'",")","for","_","in","range","(","num_replicas_per_host",")",":","# Use control dependencies to ensure a deterministic ordering.","with","ops",".","control_dependencies","(","control_deps",")",":","features",",","labels","=","inputs",".","features_and_labels","(",")","# Calls get_next()","signals","=","inputs",".","signals","(",")","# All the replicas share the replica 0's stopping singal.","# This avoids inconsistent state among different model replcias.","if","cached_signals",":","signals","[","'stopping'","]","=","cached_signals","[","'stopping'","]","else",":","cached_signals","=","signals","inputs_structure_recorder",".","validate_and_record_structure","(","features",",","labels",")","flattened_inputs","=","(","inputs_structure_recorder",".","flatten_features_and_labels","(","features",",","labels",",","signals",")",")","control_deps",".","extend","(","flattened_inputs",")","per_host_sharded_inputs",".","append","(","flattened_inputs",")","if","inputs_structure_recorder",".","flattened_input_dims",":","input_partition_dims","=","inputs_structure_recorder",".","flattened_input_dims","if","signals",":","input_partition_dims","+=","[","None","]","*","len","(","signals",")","# pylint: disable=protected-access","infeed_queue","=","tpu_feed",".","_PartitionedInfeedQueue","(","number_of_tuple_elements","=","len","(","per_host_sharded_inputs","[","0","]",")",",","host_id","=","host_id",",","input_partition_dims","=","input_partition_dims",",","device_assignment","=","ctx",".","device_assignment",")","per_host_enqueue_ops","=","infeed_queue",".","generate_enqueue_ops","(","per_host_sharded_inputs",")","else",":","infeed_queue","=","tpu_feed",".","InfeedQueue","(","number_of_tuple_elements","=","len","(","per_host_sharded_inputs","[","0","]",")",")","per_host_enqueue_ops","=","infeed_queue",".","generate_enqueue_ops","(","per_host_sharded_inputs",",","tpu_ordinal_function","=","tpu_ordinal_function_impl",")","captured_infeed_queue",".","capture","(","infeed_queue",")","if","signals","is","None",":","return","per_host_enqueue_ops","else",":","return","{","'ops'",":","per_host_enqueue_ops",",","'signals'",":","signals",",","}","return","enqueue_ops_fn",",","captured_infeed_queue",",","dataset_initializer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L821-L905"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"generate_broadcast_enqueue_ops_fn","parameters":"(ctx, input_fn, inputs_structure_recorder,\n num_hosts)","argument_list":"","return_statement":"return enqueue_ops_fn, captured_infeed_queue, dataset_initializer","docstring":"Generates infeed enqueue ops for one input_fn on all the hosts.","docstring_summary":"Generates infeed enqueue ops for one input_fn on all the hosts.","docstring_tokens":["Generates","infeed","enqueue","ops","for","one","input_fn","on","all","the","hosts","."],"function":"def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,\n num_hosts):\n \"\"\"Generates infeed enqueue ops for one input_fn on all the hosts.\"\"\"\n captured_infeed_queue = _CapturedObject()\n dataset_initializer = None\n device_0 = ctx.tpu_host_placement_function(host_id=0)\n with ops.device(device_0):\n user_context = tpu_context.TPUContext(\n internal_ctx=ctx, input_device=device_0, invocation_index=0)\n inputs = _Inputs.from_input_fn(input_fn(user_context))\n\n is_dataset = inputs.is_dataset\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n if not is_dataset:\n raise TypeError(\n 'For mode PREDICT, `input_fn` must return `Dataset` instead of '\n '`features` and `labels`.')\n\n inputs = _InputsWithStoppingSignals(\n dataset=inputs.dataset,\n batch_size=ctx.batch_size_for_input_fn,\n add_padding=True)\n\n if is_dataset:\n dataset_initializer = inputs.dataset_initializer()\n num_replicas_per_host = ctx.num_of_replicas_per_host\n\n def tpu_ordinal_function_impl(replica_id):\n if ctx.device_assignment:\n return ctx.device_assignment.tpu_ordinal(replica=replica_id)\n else:\n return replica_id % num_replicas_per_host\n\n def device_function_impl(replica_id):\n return ctx.tpu_host_placement_function(replica_id=replica_id)\n\n def enqueue_ops_fn():\n \"\"\"Generates enqueue ops for all the hosts.\"\"\"\n broadcasted_inputs = []\n flattened_inputs = None # Cache result from input_fn.\n signals = None\n for host_id in xrange(num_hosts):\n with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):\n for _ in xrange(ctx.num_of_replicas_per_host):\n # Note: input_fn is only called once at host 0 for the first replica.\n # The features and labels returned from that invocation are\n # broadcasted to other replicas(including the replicas on other\n # hosts).\n if flattened_inputs is None:\n features, labels = inputs.features_and_labels() # Calls get_next()\n signals = inputs.signals()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels, signals))\n broadcasted_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(broadcasted_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n enqueue_ops = infeed_queue.generate_enqueue_ops(\n broadcasted_inputs,\n tpu_ordinal_function=tpu_ordinal_function_impl,\n placement_function=device_function_impl)\n\n if signals is None:\n return enqueue_ops\n else:\n return {\n 'ops': enqueue_ops,\n 'signals': signals,\n }\n\n return enqueue_ops_fn, captured_infeed_queue, dataset_initializer","function_tokens":["def","generate_broadcast_enqueue_ops_fn","(","ctx",",","input_fn",",","inputs_structure_recorder",",","num_hosts",")",":","captured_infeed_queue","=","_CapturedObject","(",")","dataset_initializer","=","None","device_0","=","ctx",".","tpu_host_placement_function","(","host_id","=","0",")","with","ops",".","device","(","device_0",")",":","user_context","=","tpu_context",".","TPUContext","(","internal_ctx","=","ctx",",","input_device","=","device_0",",","invocation_index","=","0",")","inputs","=","_Inputs",".","from_input_fn","(","input_fn","(","user_context",")",")","is_dataset","=","inputs",".","is_dataset","if","ctx",".","mode","==","model_fn_lib",".","ModeKeys",".","PREDICT",":","if","not","is_dataset",":","raise","TypeError","(","'For mode PREDICT, `input_fn` must return `Dataset` instead of '","'`features` and `labels`.'",")","inputs","=","_InputsWithStoppingSignals","(","dataset","=","inputs",".","dataset",",","batch_size","=","ctx",".","batch_size_for_input_fn",",","add_padding","=","True",")","if","is_dataset",":","dataset_initializer","=","inputs",".","dataset_initializer","(",")","num_replicas_per_host","=","ctx",".","num_of_replicas_per_host","def","tpu_ordinal_function_impl","(","replica_id",")",":","if","ctx",".","device_assignment",":","return","ctx",".","device_assignment",".","tpu_ordinal","(","replica","=","replica_id",")","else",":","return","replica_id","%","num_replicas_per_host","def","device_function_impl","(","replica_id",")",":","return","ctx",".","tpu_host_placement_function","(","replica_id","=","replica_id",")","def","enqueue_ops_fn","(",")",":","\"\"\"Generates enqueue ops for all the hosts.\"\"\"","broadcasted_inputs","=","[","]","flattened_inputs","=","None","# Cache result from input_fn.","signals","=","None","for","host_id","in","xrange","(","num_hosts",")",":","with","ops",".","device","(","ctx",".","tpu_host_placement_function","(","host_id","=","host_id",")",")",":","for","_","in","xrange","(","ctx",".","num_of_replicas_per_host",")",":","# Note: input_fn is only called once at host 0 for the first replica.","# The features and labels returned from that invocation are","# broadcasted to other replicas(including the replicas on other","# hosts).","if","flattened_inputs","is","None",":","features",",","labels","=","inputs",".","features_and_labels","(",")","# Calls get_next()","signals","=","inputs",".","signals","(",")","inputs_structure_recorder",".","validate_and_record_structure","(","features",",","labels",")","flattened_inputs","=","(","inputs_structure_recorder",".","flatten_features_and_labels","(","features",",","labels",",","signals",")",")","broadcasted_inputs",".","append","(","flattened_inputs",")","infeed_queue","=","tpu_feed",".","InfeedQueue","(","number_of_tuple_elements","=","len","(","broadcasted_inputs","[","0","]",")",")","captured_infeed_queue",".","capture","(","infeed_queue",")","enqueue_ops","=","infeed_queue",".","generate_enqueue_ops","(","broadcasted_inputs",",","tpu_ordinal_function","=","tpu_ordinal_function_impl",",","placement_function","=","device_function_impl",")","if","signals","is","None",":","return","enqueue_ops","else",":","return","{","'ops'",":","enqueue_ops",",","'signals'",":","signals",",","}","return","enqueue_ops_fn",",","captured_infeed_queue",",","dataset_initializer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L908-L983"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_export_output_to_tensors","parameters":"(export_output)","argument_list":"","return_statement":"","docstring":"Get a list of `Tensors` used in `export_output`.\n\n Args:\n export_output: an `ExportOutput` object such as `ClassificationOutput`,\n `RegressionOutput`, or `PredictOutput`.\n\n Returns:\n a list of tensors used in export_output.\n\n Raises:\n ValueError: if `export_output` is not one of `ClassificationOutput`,\n `RegressionOutput`, or `PredictOutput`.","docstring_summary":"Get a list of `Tensors` used in `export_output`.","docstring_tokens":["Get","a","list","of","Tensors","used","in","export_output","."],"function":"def _export_output_to_tensors(export_output):\n \"\"\"Get a list of `Tensors` used in `export_output`.\n\n Args:\n export_output: an `ExportOutput` object such as `ClassificationOutput`,\n `RegressionOutput`, or `PredictOutput`.\n\n Returns:\n a list of tensors used in export_output.\n\n Raises:\n ValueError: if `export_output` is not one of `ClassificationOutput`,\n `RegressionOutput`, or `PredictOutput`.\n \"\"\"\n if isinstance(export_output, export_output_lib.ClassificationOutput):\n return [export_output.scores, export_output.classes]\n elif isinstance(export_output, export_output_lib.RegressionOutput):\n return [export_output.value]\n elif isinstance(export_output, export_output_lib.PredictOutput):\n return list(export_output.outputs.values())\n else:\n raise ValueError(\n '`export_output` must be have type `ClassificationOutput`, '\n '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))","function_tokens":["def","_export_output_to_tensors","(","export_output",")",":","if","isinstance","(","export_output",",","export_output_lib",".","ClassificationOutput",")",":","return","[","export_output",".","scores",",","export_output",".","classes","]","elif","isinstance","(","export_output",",","export_output_lib",".","RegressionOutput",")",":","return","[","export_output",".","value","]","elif","isinstance","(","export_output",",","export_output_lib",".","PredictOutput",")",":","return","list","(","export_output",".","outputs",".","values","(",")",")","else",":","raise","ValueError","(","'`export_output` must be have type `ClassificationOutput`, '","'`RegressionOutput`, or `PredictOutput`; got {}.'",".","format","(","export_output",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L2816-L2839"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_clone_export_output_with_tensors","parameters":"(export_output, tensors)","argument_list":"","return_statement":"","docstring":"Clones `export_output` but with new `tensors`.\n\n Args:\n export_output: an `ExportOutput` object such as `ClassificationOutput`,\n `RegressionOutput`, or `PredictOutput`.\n tensors: a list of `Tensors` used to construct a new `export_output`.\n\n Returns:\n A dict similar to `export_output` but with `tensors`.\n\n Raises:\n ValueError: if `export_output` is not one of `ClassificationOutput`,\n `RegressionOutput`, or `PredictOutput`.","docstring_summary":"Clones `export_output` but with new `tensors`.","docstring_tokens":["Clones","export_output","but","with","new","tensors","."],"function":"def _clone_export_output_with_tensors(export_output, tensors):\n \"\"\"Clones `export_output` but with new `tensors`.\n\n Args:\n export_output: an `ExportOutput` object such as `ClassificationOutput`,\n `RegressionOutput`, or `PredictOutput`.\n tensors: a list of `Tensors` used to construct a new `export_output`.\n\n Returns:\n A dict similar to `export_output` but with `tensors`.\n\n Raises:\n ValueError: if `export_output` is not one of `ClassificationOutput`,\n `RegressionOutput`, or `PredictOutput`.\n \"\"\"\n if isinstance(export_output, export_output_lib.ClassificationOutput):\n if len(tensors) != 2:\n raise ValueError('tensors must be of length 2; '\n 'got {}.'.format(len(tensors)))\n return export_output_lib.ClassificationOutput(*tensors)\n elif isinstance(export_output, export_output_lib.RegressionOutput):\n if len(tensors) != 1:\n raise ValueError('tensors must be of length 1; '\n 'got {}'.format(len(tensors)))\n return export_output_lib.RegressionOutput(*tensors)\n elif isinstance(export_output, export_output_lib.PredictOutput):\n return export_output_lib.PredictOutput(\n dict(zip(export_output.outputs.keys(), tensors)))\n else:\n raise ValueError(\n '`export_output` must be have type `ClassificationOutput`, '\n '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))","function_tokens":["def","_clone_export_output_with_tensors","(","export_output",",","tensors",")",":","if","isinstance","(","export_output",",","export_output_lib",".","ClassificationOutput",")",":","if","len","(","tensors",")","!=","2",":","raise","ValueError","(","'tensors must be of length 2; '","'got {}.'",".","format","(","len","(","tensors",")",")",")","return","export_output_lib",".","ClassificationOutput","(","*","tensors",")","elif","isinstance","(","export_output",",","export_output_lib",".","RegressionOutput",")",":","if","len","(","tensors",")","!=","1",":","raise","ValueError","(","'tensors must be of length 1; '","'got {}'",".","format","(","len","(","tensors",")",")",")","return","export_output_lib",".","RegressionOutput","(","*","tensors",")","elif","isinstance","(","export_output",",","export_output_lib",".","PredictOutput",")",":","return","export_output_lib",".","PredictOutput","(","dict","(","zip","(","export_output",".","outputs",".","keys","(",")",",","tensors",")",")",")","else",":","raise","ValueError","(","'`export_output` must be have type `ClassificationOutput`, '","'`RegressionOutput`, or `PredictOutput`; got {}.'",".","format","(","export_output",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L2842-L2873"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_eval_on_tpu_system","parameters":"(ctx, model_fn_wrapper, dequeue_fn)","argument_list":"","return_statement":"return compile_op, loss, host_calls, scaffold, captured_eval_hooks.get()","docstring":"Executes `model_fn_wrapper` multiple times on all TPU shards.","docstring_summary":"Executes `model_fn_wrapper` multiple times on all TPU shards.","docstring_tokens":["Executes","model_fn_wrapper","multiple","times","on","all","TPU","shards","."],"function":"def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n \"\"\"Executes `model_fn_wrapper` multiple times on all TPU shards.\"\"\"\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n (single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks\n ) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)\n\n def multi_tpu_eval_steps_on_single_shard():\n loop_vars = [_ZERO_LOSS]\n if model_fn_wrapper._eval_cache_fn is not None:\n batch_size = ctx.global_batch_size\n num_shards = ctx._config._tpu_config.num_shards\n loop_vars += model_fn_wrapper._eval_cache_fn(batch_size \/\/ num_shards)\n\n return training_loop.repeat(\n iterations_per_loop_var,\n single_tpu_eval_step,\n loop_vars)\n\n compile_op, ret = tpu.split_compile_and_shard(\n multi_tpu_eval_steps_on_single_shard,\n inputs=[],\n num_shards=ctx.num_replicas,\n outputs_from_all_shards=False,\n device_assignment=ctx.device_assignment)\n\n loss = ret[0]\n scaffold = _get_scaffold(captured_scaffold_fn)\n return compile_op, loss, host_calls, scaffold, captured_eval_hooks.get()","function_tokens":["def","_eval_on_tpu_system","(","ctx",",","model_fn_wrapper",",","dequeue_fn",")",":","iterations_per_loop_var","=","_create_or_get_iterations_per_loop","(",")","(","single_tpu_eval_step",",","host_calls",",","captured_scaffold_fn",",","captured_eval_hooks",")","=","model_fn_wrapper",".","convert_to_single_tpu_eval_step","(","dequeue_fn",")","def","multi_tpu_eval_steps_on_single_shard","(",")",":","loop_vars","=","[","_ZERO_LOSS","]","if","model_fn_wrapper",".","_eval_cache_fn","is","not","None",":","batch_size","=","ctx",".","global_batch_size","num_shards","=","ctx",".","_config",".","_tpu_config",".","num_shards","loop_vars","+=","model_fn_wrapper",".","_eval_cache_fn","(","batch_size","\/\/","num_shards",")","return","training_loop",".","repeat","(","iterations_per_loop_var",",","single_tpu_eval_step",",","loop_vars",")","compile_op",",","ret","=","tpu",".","split_compile_and_shard","(","multi_tpu_eval_steps_on_single_shard",",","inputs","=","[","]",",","num_shards","=","ctx",".","num_replicas",",","outputs_from_all_shards","=","False",",","device_assignment","=","ctx",".","device_assignment",")","loss","=","ret","[","0","]","scaffold","=","_get_scaffold","(","captured_scaffold_fn",")","return","compile_op",",","loss",",","host_calls",",","scaffold",",","captured_eval_hooks",".","get","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L2876-L2904"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_train_on_tpu_system","parameters":"(ctx, model_fn_wrapper, dequeue_fn)","argument_list":"","return_statement":"return compile_op, loss, host_call, scaffold, captured_training_hooks.get()","docstring":"Executes `model_fn_wrapper` multiple times on all TPU shards.","docstring_summary":"Executes `model_fn_wrapper` multiple times on all TPU shards.","docstring_tokens":["Executes","model_fn_wrapper","multiple","times","on","all","TPU","shards","."],"function":"def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n \"\"\"Executes `model_fn_wrapper` multiple times on all TPU shards.\"\"\"\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n (single_tpu_train_step, host_call, captured_scaffold_fn,\n captured_training_hooks) = (\n model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))\n\n def multi_tpu_train_steps_on_single_shard():\n loop_vars = [_INITIAL_LOSS]\n if model_fn_wrapper._train_cache_fn is not None:\n batch_size = ctx.global_batch_size\n num_shards = ctx._config._tpu_config.num_shards\n loop_vars += model_fn_wrapper._train_cache_fn(batch_size \/\/ num_shards)\n\n return training_loop.repeat(\n iterations_per_loop_var,\n single_tpu_train_step,\n loop_vars)\n\n compile_op, ret = tpu.split_compile_and_shard(\n multi_tpu_train_steps_on_single_shard,\n inputs=[],\n num_shards=ctx.num_replicas,\n outputs_from_all_shards=False,\n device_assignment=ctx.device_assignment)\n\n loss = ret[0]\n scaffold = _get_scaffold(captured_scaffold_fn)\n return compile_op, loss, host_call, scaffold, captured_training_hooks.get()","function_tokens":["def","_train_on_tpu_system","(","ctx",",","model_fn_wrapper",",","dequeue_fn",")",":","iterations_per_loop_var","=","_create_or_get_iterations_per_loop","(",")","(","single_tpu_train_step",",","host_call",",","captured_scaffold_fn",",","captured_training_hooks",")","=","(","model_fn_wrapper",".","convert_to_single_tpu_train_step","(","dequeue_fn",")",")","def","multi_tpu_train_steps_on_single_shard","(",")",":","loop_vars","=","[","_INITIAL_LOSS","]","if","model_fn_wrapper",".","_train_cache_fn","is","not","None",":","batch_size","=","ctx",".","global_batch_size","num_shards","=","ctx",".","_config",".","_tpu_config",".","num_shards","loop_vars","+=","model_fn_wrapper",".","_train_cache_fn","(","batch_size","\/\/","num_shards",")","return","training_loop",".","repeat","(","iterations_per_loop_var",",","single_tpu_train_step",",","loop_vars",")","compile_op",",","ret","=","tpu",".","split_compile_and_shard","(","multi_tpu_train_steps_on_single_shard",",","inputs","=","[","]",",","num_shards","=","ctx",".","num_replicas",",","outputs_from_all_shards","=","False",",","device_assignment","=","ctx",".","device_assignment",")","loss","=","ret","[","0","]","scaffold","=","_get_scaffold","(","captured_scaffold_fn",")","return","compile_op",",","loss",",","host_call",",","scaffold",",","captured_training_hooks",".","get","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L2907-L2936"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_predict_on_tpu_system","parameters":"(ctx, model_fn_wrapper, dequeue_fn)","argument_list":"","return_statement":"return (compile_op, dummy_predict_op, host_calls, scaffold,\n captured_predict_hooks.get())","docstring":"Executes `model_fn_wrapper` multiple times on all TPU shards.","docstring_summary":"Executes `model_fn_wrapper` multiple times on all TPU shards.","docstring_tokens":["Executes","model_fn_wrapper","multiple","times","on","all","TPU","shards","."],"function":"def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n \"\"\"Executes `model_fn_wrapper` multiple times on all TPU shards.\"\"\"\n (single_tpu_predict_step, host_calls, captured_scaffold_fn,\n captured_predict_hooks\n ) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)\n\n def multi_tpu_predict_steps_on_single_shard():\n\n def cond(scalar_stopping_signal):\n return math_ops.logical_not(\n _StopSignals.should_stop(scalar_stopping_signal))\n\n inputs = [_StopSignals.NON_STOPPING_SIGNAL]\n outputs = training_loop.while_loop(\n cond, single_tpu_predict_step, inputs=inputs, name=b'loop')\n return outputs\n\n (compile_op, dummy_predict_op,) = tpu.split_compile_and_shard(\n multi_tpu_predict_steps_on_single_shard,\n inputs=[],\n num_shards=ctx.num_replicas,\n outputs_from_all_shards=False,\n device_assignment=ctx.device_assignment)\n\n dummy_predict_op = dummy_predict_op[0]\n scaffold = _get_scaffold(captured_scaffold_fn)\n return (compile_op, dummy_predict_op, host_calls, scaffold,\n captured_predict_hooks.get())","function_tokens":["def","_predict_on_tpu_system","(","ctx",",","model_fn_wrapper",",","dequeue_fn",")",":","(","single_tpu_predict_step",",","host_calls",",","captured_scaffold_fn",",","captured_predict_hooks",")","=","model_fn_wrapper",".","convert_to_single_tpu_predict_step","(","dequeue_fn",")","def","multi_tpu_predict_steps_on_single_shard","(",")",":","def","cond","(","scalar_stopping_signal",")",":","return","math_ops",".","logical_not","(","_StopSignals",".","should_stop","(","scalar_stopping_signal",")",")","inputs","=","[","_StopSignals",".","NON_STOPPING_SIGNAL","]","outputs","=","training_loop",".","while_loop","(","cond",",","single_tpu_predict_step",",","inputs","=","inputs",",","name","=","b'loop'",")","return","outputs","(","compile_op",",","dummy_predict_op",",",")","=","tpu",".","split_compile_and_shard","(","multi_tpu_predict_steps_on_single_shard",",","inputs","=","[","]",",","num_shards","=","ctx",".","num_replicas",",","outputs_from_all_shards","=","False",",","device_assignment","=","ctx",".","device_assignment",")","dummy_predict_op","=","dummy_predict_op","[","0","]","scaffold","=","_get_scaffold","(","captured_scaffold_fn",")","return","(","compile_op",",","dummy_predict_op",",","host_calls",",","scaffold",",","captured_predict_hooks",".","get","(",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L2939-L2966"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_wrap_computation_in_while_loop","parameters":"(device, op_fn)","argument_list":"","return_statement":"","docstring":"Wraps the ops generated by `op_fn` in tf.while_loop.","docstring_summary":"Wraps the ops generated by `op_fn` in tf.while_loop.","docstring_tokens":["Wraps","the","ops","generated","by","op_fn","in","tf",".","while_loop","."],"function":"def _wrap_computation_in_while_loop(device, op_fn):\n \"\"\"Wraps the ops generated by `op_fn` in tf.while_loop.\"\"\"\n\n def computation(i):\n with ops.control_dependencies(op_fn()):\n return i + 1\n\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n # By setting parallel_iterations=1, the parallel execution in while_loop is\n # basically turned off.\n with ops.device(device):\n iterations = array_ops.identity(iterations_per_loop_var)\n return control_flow_ops.while_loop(\n lambda i: i < iterations,\n computation, [constant_op.constant(0)],\n parallel_iterations=1)","function_tokens":["def","_wrap_computation_in_while_loop","(","device",",","op_fn",")",":","def","computation","(","i",")",":","with","ops",".","control_dependencies","(","op_fn","(",")",")",":","return","i","+","1","iterations_per_loop_var","=","_create_or_get_iterations_per_loop","(",")","# By setting parallel_iterations=1, the parallel execution in while_loop is","# basically turned off.","with","ops",".","device","(","device",")",":","iterations","=","array_ops",".","identity","(","iterations_per_loop_var",")","return","control_flow_ops",".","while_loop","(","lambda","i",":","i","<","iterations",",","computation",",","[","constant_op",".","constant","(","0",")","]",",","parallel_iterations","=","1",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L2969-L2984"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_wrap_computation_in_while_loop_with_stopping_signals","parameters":"(device, op_fn)","argument_list":"","return_statement":"","docstring":"Wraps the ops generated by `op_fn` in tf.while_loop.","docstring_summary":"Wraps the ops generated by `op_fn` in tf.while_loop.","docstring_tokens":["Wraps","the","ops","generated","by","op_fn","in","tf",".","while_loop","."],"function":"def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):\n \"\"\"Wraps the ops generated by `op_fn` in tf.while_loop.\"\"\"\n\n def cond(scalar_stopping_signal):\n return math_ops.logical_not(\n _StopSignals.should_stop(scalar_stopping_signal))\n\n def computation(unused_scalar_stopping_signal):\n return_value = op_fn()\n execute_ops = return_value['ops']\n signals = return_value['signals']\n with ops.control_dependencies(execute_ops):\n return _StopSignals.as_scalar_stopping_signal(signals)\n\n # By setting parallel_iterations=1, the parallel execution in while_loop is\n # basically turned off.\n with ops.device(device):\n return control_flow_ops.while_loop(\n cond,\n computation, [_StopSignals.NON_STOPPING_SIGNAL],\n parallel_iterations=1)","function_tokens":["def","_wrap_computation_in_while_loop_with_stopping_signals","(","device",",","op_fn",")",":","def","cond","(","scalar_stopping_signal",")",":","return","math_ops",".","logical_not","(","_StopSignals",".","should_stop","(","scalar_stopping_signal",")",")","def","computation","(","unused_scalar_stopping_signal",")",":","return_value","=","op_fn","(",")","execute_ops","=","return_value","[","'ops'","]","signals","=","return_value","[","'signals'","]","with","ops",".","control_dependencies","(","execute_ops",")",":","return","_StopSignals",".","as_scalar_stopping_signal","(","signals",")","# By setting parallel_iterations=1, the parallel execution in while_loop is","# basically turned off.","with","ops",".","device","(","device",")",":","return","control_flow_ops",".","while_loop","(","cond",",","computation",",","[","_StopSignals",".","NON_STOPPING_SIGNAL","]",",","parallel_iterations","=","1",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L2987-L3007"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_validate_tpu_training_graph","parameters":"()","argument_list":"","return_statement":"","docstring":"Validate graph before running distributed training.\n\n Raises:\n ValueError: If the graph seems invalid for running on device","docstring_summary":"Validate graph before running distributed training.","docstring_tokens":["Validate","graph","before","running","distributed","training","."],"function":"def _validate_tpu_training_graph():\n \"\"\"Validate graph before running distributed training.\n\n Raises:\n ValueError: If the graph seems invalid for running on device\n \"\"\"\n operations = ops.get_default_graph().get_operations()\n\n # Check if there is atleast one CrossReplicaSum operation in the graph\n # This should be introduced by using the CrossShardOptimizer wrapper\n cross_replica_sum_ops = [\n o for o in operations if o.type == _CROSS_REPLICA_SUM_OP\n ]\n if not cross_replica_sum_ops:\n raise ValueError(\n 'CrossShardOptimizer must be used for model training on TPUs.')","function_tokens":["def","_validate_tpu_training_graph","(",")",":","operations","=","ops",".","get_default_graph","(",")",".","get_operations","(",")","# Check if there is atleast one CrossReplicaSum operation in the graph","# This should be introduced by using the CrossShardOptimizer wrapper","cross_replica_sum_ops","=","[","o","for","o","in","operations","if","o",".","type","==","_CROSS_REPLICA_SUM_OP","]","if","not","cross_replica_sum_ops",":","raise","ValueError","(","'CrossShardOptimizer must be used for model training on TPUs.'",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3010-L3025"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_get_scaffold","parameters":"(captured_scaffold_fn)","argument_list":"","return_statement":"return scaffold","docstring":"Retrieves the Scaffold from `captured_scaffold_fn`.","docstring_summary":"Retrieves the Scaffold from `captured_scaffold_fn`.","docstring_tokens":["Retrieves","the","Scaffold","from","captured_scaffold_fn","."],"function":"def _get_scaffold(captured_scaffold_fn):\n \"\"\"Retrieves the Scaffold from `captured_scaffold_fn`.\"\"\"\n with _CapturingContext(message='Inside scaffold_fn'):\n scaffold_fn = captured_scaffold_fn.get()\n if scaffold_fn:\n scaffold = scaffold_fn()\n if scaffold is None:\n raise ValueError(\n 'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')\n else:\n scaffold = None\n\n if scaffold:\n wrapped_finalize = scaffold.finalize\n\n def _finalize():\n with _CapturingContext('Inside Scaffold.finalize'):\n wrapped_finalize()\n\n scaffold.finalize = _finalize\n return scaffold","function_tokens":["def","_get_scaffold","(","captured_scaffold_fn",")",":","with","_CapturingContext","(","message","=","'Inside scaffold_fn'",")",":","scaffold_fn","=","captured_scaffold_fn",".","get","(",")","if","scaffold_fn",":","scaffold","=","scaffold_fn","(",")","if","scaffold","is","None",":","raise","ValueError","(","'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed'",")","else",":","scaffold","=","None","if","scaffold",":","wrapped_finalize","=","scaffold",".","finalize","def","_finalize","(",")",":","with","_CapturingContext","(","'Inside Scaffold.finalize'",")",":","wrapped_finalize","(",")","scaffold",".","finalize","=","_finalize","return","scaffold"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3055-L3075"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_add_item_to_params","parameters":"(params, key, value)","argument_list":"","return_statement":"","docstring":"Adds a new item into `params`.","docstring_summary":"Adds a new item into `params`.","docstring_tokens":["Adds","a","new","item","into","params","."],"function":"def _add_item_to_params(params, key, value):\n \"\"\"Adds a new item into `params`.\"\"\"\n if isinstance(params, hparam.HParams):\n # For HParams, we need to use special API.\n if key in params:\n params.set_hparam(key, value)\n else:\n params.add_hparam(key, value)\n else:\n # Now params is Python dict.\n params[key] = value","function_tokens":["def","_add_item_to_params","(","params",",","key",",","value",")",":","if","isinstance","(","params",",","hparam",".","HParams",")",":","# For HParams, we need to use special API.","if","key","in","params",":","params",".","set_hparam","(","key",",","value",")","else",":","params",".","add_hparam","(","key",",","value",")","else",":","# Now params is Python dict.","params","[","key","]","=","value"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3470-L3480"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"export_estimator_savedmodel","parameters":"(estimator,\n export_dir_base,\n serving_input_receiver_fn,\n assets_extra=None,\n as_text=False,\n checkpoint_path=None,\n strip_default_attrs=False)","argument_list":"","return_statement":"return est.export_savedmodel(export_dir_base, serving_input_receiver_fn,\n assets_extra, as_text, checkpoint_path,\n strip_default_attrs)","docstring":"Export `Estimator` trained model for TPU inference.\n\n Args:\n estimator: `Estimator` with which model has been trained.\n export_dir_base: A string containing a directory in which to create\n timestamped subdirectories containing exported SavedModels.\n serving_input_receiver_fn: A function that takes no argument and returns a\n `ServingInputReceiver` or `TensorServingInputReceiver`.\n assets_extra: A dict specifying how to populate the assets.extra directory\n within the exported SavedModel, or `None` if no extra assets are needed.\n as_text: whether to write the SavedModel proto in text format.\n checkpoint_path: The checkpoint path to export. If `None` (the default),\n the most recent checkpoint found within the model directory is chosen.\n strip_default_attrs: Boolean. If `True`, default-valued attributes will be\n removed from the NodeDefs.\n\n Returns:\n The string path to the exported directory.","docstring_summary":"Export `Estimator` trained model for TPU inference.","docstring_tokens":["Export","Estimator","trained","model","for","TPU","inference","."],"function":"def export_estimator_savedmodel(estimator,\n export_dir_base,\n serving_input_receiver_fn,\n assets_extra=None,\n as_text=False,\n checkpoint_path=None,\n strip_default_attrs=False):\n \"\"\"Export `Estimator` trained model for TPU inference.\n\n Args:\n estimator: `Estimator` with which model has been trained.\n export_dir_base: A string containing a directory in which to create\n timestamped subdirectories containing exported SavedModels.\n serving_input_receiver_fn: A function that takes no argument and returns a\n `ServingInputReceiver` or `TensorServingInputReceiver`.\n assets_extra: A dict specifying how to populate the assets.extra directory\n within the exported SavedModel, or `None` if no extra assets are needed.\n as_text: whether to write the SavedModel proto in text format.\n checkpoint_path: The checkpoint path to export. If `None` (the default),\n the most recent checkpoint found within the model directory is chosen.\n strip_default_attrs: Boolean. If `True`, default-valued attributes will be\n removed from the NodeDefs.\n\n Returns:\n The string path to the exported directory.\n \"\"\"\n # `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use\n # `estimator.config`.\n config = tpu_config.RunConfig(model_dir=estimator.model_dir)\n est = TPUEstimator(\n estimator._model_fn, # pylint: disable=protected-access\n config=config,\n params=estimator.params,\n use_tpu=True,\n train_batch_size=2048, # Does not matter.\n eval_batch_size=2048, # Does not matter.\n )\n return est.export_savedmodel(export_dir_base, serving_input_receiver_fn,\n assets_extra, as_text, checkpoint_path,\n strip_default_attrs)","function_tokens":["def","export_estimator_savedmodel","(","estimator",",","export_dir_base",",","serving_input_receiver_fn",",","assets_extra","=","None",",","as_text","=","False",",","checkpoint_path","=","None",",","strip_default_attrs","=","False",")",":","# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use","# `estimator.config`.","config","=","tpu_config",".","RunConfig","(","model_dir","=","estimator",".","model_dir",")","est","=","TPUEstimator","(","estimator",".","_model_fn",",","# pylint: disable=protected-access","config","=","config",",","params","=","estimator",".","params",",","use_tpu","=","True",",","train_batch_size","=","2048",",","# Does not matter.","eval_batch_size","=","2048",",","# Does not matter.",")","return","est",".","export_savedmodel","(","export_dir_base",",","serving_input_receiver_fn",",","assets_extra",",","as_text",",","checkpoint_path",",","strip_default_attrs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3483-L3522"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"TPUEstimatorSpec.__new__","parameters":"(cls,\n mode,\n predictions=None,\n loss=None,\n train_op=None,\n eval_metrics=None,\n export_outputs=None,\n scaffold_fn=None,\n host_call=None,\n training_hooks=None,\n evaluation_hooks=None,\n prediction_hooks=None)","argument_list":"","return_statement":"return super(TPUEstimatorSpec, cls).__new__(\n cls,\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metrics=eval_metrics,\n export_outputs=export_outputs,\n scaffold_fn=scaffold_fn,\n host_call=host_call,\n training_hooks=training_hooks,\n evaluation_hooks=evaluation_hooks,\n prediction_hooks=prediction_hooks)","docstring":"Creates a validated `TPUEstimatorSpec` instance.","docstring_summary":"Creates a validated `TPUEstimatorSpec` instance.","docstring_tokens":["Creates","a","validated","TPUEstimatorSpec","instance","."],"function":"def __new__(cls,\n mode,\n predictions=None,\n loss=None,\n train_op=None,\n eval_metrics=None,\n export_outputs=None,\n scaffold_fn=None,\n host_call=None,\n training_hooks=None,\n evaluation_hooks=None,\n prediction_hooks=None):\n \"\"\"Creates a validated `TPUEstimatorSpec` instance.\"\"\"\n host_calls = {}\n if eval_metrics is not None:\n host_calls['eval_metrics'] = eval_metrics\n if host_call is not None:\n host_calls['host_call'] = host_call\n _OutfeedHostCall.validate(host_calls)\n\n training_hooks = tuple(training_hooks or [])\n evaluation_hooks = tuple(evaluation_hooks or [])\n prediction_hooks = tuple(prediction_hooks or [])\n\n for hook in training_hooks + evaluation_hooks + prediction_hooks:\n if not isinstance(hook, session_run_hook.SessionRunHook):\n raise TypeError('All hooks must be SessionRunHook instances, given: {}'\n .format(hook))\n\n return super(TPUEstimatorSpec, cls).__new__(\n cls,\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metrics=eval_metrics,\n export_outputs=export_outputs,\n scaffold_fn=scaffold_fn,\n host_call=host_call,\n training_hooks=training_hooks,\n evaluation_hooks=evaluation_hooks,\n prediction_hooks=prediction_hooks)","function_tokens":["def","__new__","(","cls",",","mode",",","predictions","=","None",",","loss","=","None",",","train_op","=","None",",","eval_metrics","=","None",",","export_outputs","=","None",",","scaffold_fn","=","None",",","host_call","=","None",",","training_hooks","=","None",",","evaluation_hooks","=","None",",","prediction_hooks","=","None",")",":","host_calls","=","{","}","if","eval_metrics","is","not","None",":","host_calls","[","'eval_metrics'","]","=","eval_metrics","if","host_call","is","not","None",":","host_calls","[","'host_call'","]","=","host_call","_OutfeedHostCall",".","validate","(","host_calls",")","training_hooks","=","tuple","(","training_hooks","or","[","]",")","evaluation_hooks","=","tuple","(","evaluation_hooks","or","[","]",")","prediction_hooks","=","tuple","(","prediction_hooks","or","[","]",")","for","hook","in","training_hooks","+","evaluation_hooks","+","prediction_hooks",":","if","not","isinstance","(","hook",",","session_run_hook",".","SessionRunHook",")",":","raise","TypeError","(","'All hooks must be SessionRunHook instances, given: {}'",".","format","(","hook",")",")","return","super","(","TPUEstimatorSpec",",","cls",")",".","__new__","(","cls",",","mode","=","mode",",","predictions","=","predictions",",","loss","=","loss",",","train_op","=","train_op",",","eval_metrics","=","eval_metrics",",","export_outputs","=","export_outputs",",","scaffold_fn","=","scaffold_fn",",","host_call","=","host_call",",","training_hooks","=","training_hooks",",","evaluation_hooks","=","evaluation_hooks",",","prediction_hooks","=","prediction_hooks",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L283-L324"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"TPUEstimatorSpec.as_estimator_spec","parameters":"(self)","argument_list":"","return_statement":"return model_fn_lib.EstimatorSpec(\n mode=self.mode,\n predictions=self.predictions,\n loss=self.loss,\n train_op=self.train_op,\n eval_metric_ops=eval_metric_ops,\n export_outputs=self.export_outputs,\n scaffold=scaffold,\n training_hooks=self.training_hooks + hooks,\n evaluation_hooks=self.evaluation_hooks + hooks,\n prediction_hooks=self.prediction_hooks + hooks)","docstring":"Creates an equivalent `EstimatorSpec` used by CPU train\/eval.","docstring_summary":"Creates an equivalent `EstimatorSpec` used by CPU train\/eval.","docstring_tokens":["Creates","an","equivalent","EstimatorSpec","used","by","CPU","train","\/","eval","."],"function":"def as_estimator_spec(self):\n \"\"\"Creates an equivalent `EstimatorSpec` used by CPU train\/eval.\"\"\"\n host_calls = {}\n if self.eval_metrics is not None:\n host_calls['eval_metrics'] = self.eval_metrics\n if self.host_call is not None:\n host_calls['host_call'] = self.host_call\n host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)\n eval_metric_ops = None\n if self.eval_metrics is not None:\n eval_metric_ops = host_call_ret['eval_metrics']\n hooks = None\n if self.host_call is not None:\n hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]\n if tensor_tracer.TensorTracer.is_enabled():\n tt = tensor_tracer.TensorTracer()\n tracing_calls = tt.trace_cpu(ops.get_default_graph())\n tracing_call_ret = _OutfeedHostCall.create_cpu_hostcall(tracing_calls)\n tracing_functions = tracing_call_ret.values()\n if tracing_functions:\n if hooks:\n hooks.extend([_OutfeedHostCallHook(tracing_functions)])\n else:\n hooks = [_OutfeedHostCallHook(tracing_functions)]\n hooks = tuple(hooks or [])\n scaffold = self.scaffold_fn() if self.scaffold_fn else None\n return model_fn_lib.EstimatorSpec(\n mode=self.mode,\n predictions=self.predictions,\n loss=self.loss,\n train_op=self.train_op,\n eval_metric_ops=eval_metric_ops,\n export_outputs=self.export_outputs,\n scaffold=scaffold,\n training_hooks=self.training_hooks + hooks,\n evaluation_hooks=self.evaluation_hooks + hooks,\n prediction_hooks=self.prediction_hooks + hooks)","function_tokens":["def","as_estimator_spec","(","self",")",":","host_calls","=","{","}","if","self",".","eval_metrics","is","not","None",":","host_calls","[","'eval_metrics'","]","=","self",".","eval_metrics","if","self",".","host_call","is","not","None",":","host_calls","[","'host_call'","]","=","self",".","host_call","host_call_ret","=","_OutfeedHostCall",".","create_cpu_hostcall","(","host_calls",")","eval_metric_ops","=","None","if","self",".","eval_metrics","is","not","None",":","eval_metric_ops","=","host_call_ret","[","'eval_metrics'","]","hooks","=","None","if","self",".","host_call","is","not","None",":","hooks","=","[","_OutfeedHostCallHook","(","host_call_ret","[","'host_call'","]",")","]","if","tensor_tracer",".","TensorTracer",".","is_enabled","(",")",":","tt","=","tensor_tracer",".","TensorTracer","(",")","tracing_calls","=","tt",".","trace_cpu","(","ops",".","get_default_graph","(",")",")","tracing_call_ret","=","_OutfeedHostCall",".","create_cpu_hostcall","(","tracing_calls",")","tracing_functions","=","tracing_call_ret",".","values","(",")","if","tracing_functions",":","if","hooks",":","hooks",".","extend","(","[","_OutfeedHostCallHook","(","tracing_functions",")","]",")","else",":","hooks","=","[","_OutfeedHostCallHook","(","tracing_functions",")","]","hooks","=","tuple","(","hooks","or","[","]",")","scaffold","=","self",".","scaffold_fn","(",")","if","self",".","scaffold_fn","else","None","return","model_fn_lib",".","EstimatorSpec","(","mode","=","self",".","mode",",","predictions","=","self",".","predictions",",","loss","=","self",".","loss",",","train_op","=","self",".","train_op",",","eval_metric_ops","=","eval_metric_ops",",","export_outputs","=","self",".","export_outputs",",","scaffold","=","scaffold",",","training_hooks","=","self",".","training_hooks","+","hooks",",","evaluation_hooks","=","self",".","evaluation_hooks","+","hooks",",","prediction_hooks","=","self",".","prediction_hooks","+","hooks",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L326-L362"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_TPUStopAtStepHook.__init__","parameters":"(self, iterations, num_steps=None, last_step=None)","argument_list":"","return_statement":"","docstring":"Initializes a `StopAtStepHook`.\n\n Args:\n iterations: The number of iterations to run optimizer per training loop.\n num_steps: Number of steps to execute.\n last_step: Step after which to stop.\n\n Raises:\n ValueError: If one of the arguments is invalid.","docstring_summary":"Initializes a `StopAtStepHook`.","docstring_tokens":["Initializes","a","StopAtStepHook","."],"function":"def __init__(self, iterations, num_steps=None, last_step=None):\n \"\"\"Initializes a `StopAtStepHook`.\n\n Args:\n iterations: The number of iterations to run optimizer per training loop.\n num_steps: Number of steps to execute.\n last_step: Step after which to stop.\n\n Raises:\n ValueError: If one of the arguments is invalid.\n \"\"\"\n if num_steps is None and last_step is None:\n raise ValueError('One of num_steps or last_step must be specified.')\n if num_steps is not None and last_step is not None:\n raise ValueError('Only one of num_steps or last_step can be specified.')\n self._num_steps = num_steps\n self._last_step = last_step\n self._iterations = iterations","function_tokens":["def","__init__","(","self",",","iterations",",","num_steps","=","None",",","last_step","=","None",")",":","if","num_steps","is","None","and","last_step","is","None",":","raise","ValueError","(","'One of num_steps or last_step must be specified.'",")","if","num_steps","is","not","None","and","last_step","is","not","None",":","raise","ValueError","(","'Only one of num_steps or last_step can be specified.'",")","self",".","_num_steps","=","num_steps","self",".","_last_step","=","last_step","self",".","_iterations","=","iterations"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L593-L610"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_SetEvalIterationsHook.__init__","parameters":"(self, num_steps)","argument_list":"","return_statement":"","docstring":"Initializes a `_SetEvalIterationsHook`.\n\n Args:\n num_steps: Number of steps to execute.","docstring_summary":"Initializes a `_SetEvalIterationsHook`.","docstring_tokens":["Initializes","a","_SetEvalIterationsHook","."],"function":"def __init__(self, num_steps):\n \"\"\"Initializes a `_SetEvalIterationsHook`.\n\n Args:\n num_steps: Number of steps to execute.\n \"\"\"\n self._num_steps = num_steps","function_tokens":["def","__init__","(","self",",","num_steps",")",":","self",".","_num_steps","=","num_steps"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L647-L653"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_InputPipeline.__init__","parameters":"(self, input_fn, batch_axis, ctx)","argument_list":"","return_statement":"","docstring":"Constructor.\n\n Args:\n input_fn: input fn for train or eval.\n batch_axis: A python tuple of int values describing how each tensor\n produced by the Estimator `input_fn` should be split across the TPU\n compute shards.\n ctx: A `_InternalTPUContext` instance with mode.\n\n Raises:\n ValueError: If both `sharded_features` and `num_cores` are `None`.","docstring_summary":"Constructor.","docstring_tokens":["Constructor","."],"function":"def __init__(self, input_fn, batch_axis, ctx):\n \"\"\"Constructor.\n\n Args:\n input_fn: input fn for train or eval.\n batch_axis: A python tuple of int values describing how each tensor\n produced by the Estimator `input_fn` should be split across the TPU\n compute shards.\n ctx: A `_InternalTPUContext` instance with mode.\n\n Raises:\n ValueError: If both `sharded_features` and `num_cores` are `None`.\n \"\"\"\n self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(\n ctx.input_partition_dims)\n\n self._sharded_per_core = ctx.is_input_sharded_per_core()\n self._input_fn = input_fn\n self._infeed_queue = None\n self._ctx = ctx\n self._batch_axis = batch_axis","function_tokens":["def","__init__","(","self",",","input_fn",",","batch_axis",",","ctx",")",":","self",".","_inputs_structure_recorder","=","_InputPipeline",".","InputsStructureRecorder","(","ctx",".","input_partition_dims",")","self",".","_sharded_per_core","=","ctx",".","is_input_sharded_per_core","(",")","self",".","_input_fn","=","input_fn","self",".","_infeed_queue","=","None","self",".","_ctx","=","ctx","self",".","_batch_axis","=","batch_axis"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1139-L1159"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_InputPipeline.generate_infeed_enqueue_ops_and_dequeue_fn","parameters":"(self)","argument_list":"","return_statement":"return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)","docstring":"Generates infeed enqueue ops and dequeue_fn.","docstring_summary":"Generates infeed enqueue ops and dequeue_fn.","docstring_tokens":["Generates","infeed","enqueue","ops","and","dequeue_fn","."],"function":"def generate_infeed_enqueue_ops_and_dequeue_fn(self):\n \"\"\"Generates infeed enqueue ops and dequeue_fn.\"\"\"\n # While tf.while_loop is called, the body function, which invokes\n # `enqueue_fn` passed in, is called to construct the graph. So, input_fn\n # structure is recorded.\n enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (\n self._invoke_input_fn_and_record_structure())\n\n self._validate_input_pipeline()\n\n def dequeue_fn():\n \"\"\"dequeue_fn is used by TPU to retrieve the tensors.\"\"\"\n # In the model-parallel case, both the host-side and device-side\n # computations must agree on the core on which infeed takes place. We\n # choose to perform infeed on logical core 0 of each replica.\n values = self._infeed_queue.generate_dequeue_op(tpu_device=0)\n # The unflatten process uses the structure information recorded above.\n return self._inputs_structure_recorder.unflatten_features_and_labels(\n values)\n\n return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)","function_tokens":["def","generate_infeed_enqueue_ops_and_dequeue_fn","(","self",")",":","# While tf.while_loop is called, the body function, which invokes","# `enqueue_fn` passed in, is called to construct the graph. So, input_fn","# structure is recorded.","enqueue_ops",",","all_hooks",",","run_infeed_loop_on_coordinator","=","(","self",".","_invoke_input_fn_and_record_structure","(",")",")","self",".","_validate_input_pipeline","(",")","def","dequeue_fn","(",")",":","\"\"\"dequeue_fn is used by TPU to retrieve the tensors.\"\"\"","# In the model-parallel case, both the host-side and device-side","# computations must agree on the core on which infeed takes place. We","# choose to perform infeed on logical core 0 of each replica.","values","=","self",".","_infeed_queue",".","generate_dequeue_op","(","tpu_device","=","0",")","# The unflatten process uses the structure information recorded above.","return","self",".","_inputs_structure_recorder",".","unflatten_features_and_labels","(","values",")","return","(","enqueue_ops",",","dequeue_fn",",","all_hooks",",","run_infeed_loop_on_coordinator",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1161-L1181"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_InputPipeline._invoke_input_fn_and_record_structure","parameters":"(self)","argument_list":"","return_statement":"return enqueue_ops, [\n util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)\n ], run_infeed_loop_on_coordinator","docstring":"Deploys the input pipeline and record input structure.","docstring_summary":"Deploys the input pipeline and record input structure.","docstring_tokens":["Deploys","the","input","pipeline","and","record","input","structure","."],"function":"def _invoke_input_fn_and_record_structure(self):\n \"\"\"Deploys the input pipeline and record input structure.\"\"\"\n enqueue_ops = []\n infeed_queues = []\n all_dataset_initializers = []\n num_hosts = self._ctx.num_hosts\n tpu_host_placement_fn = self._ctx.tpu_host_placement_function\n\n run_infeed_loop_on_coordinator = True\n\n if self._sharded_per_core:\n # Per-Core input pipeline deployment.\n # Invoke input pipeline for each core and placed on the corresponding\n # host.\n for host_id in range(num_hosts):\n host_device = tpu_host_placement_fn(host_id=host_id)\n with ops.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n enqueue_ops_fn, captured_infeed_queue = (\n generate_per_core_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn, self._inputs_structure_recorder,\n host_device, host_id))\n\n if _WRAP_INPUT_FN_INTO_WHILE_LOOP:\n run_infeed_loop_on_coordinator = False\n enqueue_ops.append(\n _wrap_computation_in_while_loop(\n device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n # Infeed_queue_getter must be called after enqueue_ops_fn is called.\n infeed_queues.append(captured_infeed_queue.get())\n\n elif self._ctx.is_input_broadcast_with_iterators():\n # Only calls input_fn in host 0.\n host_device = tpu_host_placement_fn(host_id=0)\n enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (\n generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,\n self._inputs_structure_recorder,\n num_hosts))\n if dataset_initializer:\n all_dataset_initializers.append(dataset_initializer)\n run_infeed_loop_on_coordinator = False\n wrap_fn = (\n _wrap_computation_in_while_loop\n if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else\n _wrap_computation_in_while_loop_with_stopping_signals)\n enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n infeed_queues.append(captured_infeed_queue.get())\n else:\n for host_id in range(num_hosts):\n host_device = tpu_host_placement_fn(host_id=host_id)\n with ops.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n if self._ctx.is_input_per_host_with_iterators():\n enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (\n generate_per_host_v2_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, host_device, host_id))\n else:\n enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (\n generate_per_host_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, self._batch_axis,\n host_device, host_id))\n\n # NOTE(xiejw): We dispatch here based on the return type of the\n # users `input_fn`.\n #\n # 1. If input_fn returns a Dataset instance, we initialize the\n # iterator outside of tf.while_loop, and call the iterator.get_next\n # inside tf.while_loop. This should be always safe.\n #\n # 2. If input_fn returns (features, labels), it is too late to wrap\n # them inside tf.while_loop, as resource initialization cannot be\n # handled in TF control flow properly. In this case, we will use\n # python loop to enqueue the data into TPU system. This may be\n # slow compared to the previous case.\n if dataset_initializer:\n all_dataset_initializers.append(dataset_initializer)\n run_infeed_loop_on_coordinator = False\n wrap_fn = (\n _wrap_computation_in_while_loop\n if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else\n _wrap_computation_in_while_loop_with_stopping_signals)\n enqueue_ops.append(\n wrap_fn(device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n infeed_queues.append(captured_infeed_queue.get())\n # infeed_queue is used to generate dequeue ops. The only thing it uses for\n # dequeue is dtypes and types. So, any one can be used. Here, grab the\n # first one.\n self._infeed_queue = infeed_queues[0]\n return enqueue_ops, [\n util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)\n ], run_infeed_loop_on_coordinator","function_tokens":["def","_invoke_input_fn_and_record_structure","(","self",")",":","enqueue_ops","=","[","]","infeed_queues","=","[","]","all_dataset_initializers","=","[","]","num_hosts","=","self",".","_ctx",".","num_hosts","tpu_host_placement_fn","=","self",".","_ctx",".","tpu_host_placement_function","run_infeed_loop_on_coordinator","=","True","if","self",".","_sharded_per_core",":","# Per-Core input pipeline deployment.","# Invoke input pipeline for each core and placed on the corresponding","# host.","for","host_id","in","range","(","num_hosts",")",":","host_device","=","tpu_host_placement_fn","(","host_id","=","host_id",")","with","ops",".","device","(","host_device",")",":","with","ops",".","name_scope","(","'input_pipeline_task%d'","%","(","host_id",")",")",":","enqueue_ops_fn",",","captured_infeed_queue","=","(","generate_per_core_enqueue_ops_fn_for_host","(","self",".","_ctx",",","self",".","_input_fn",",","self",".","_inputs_structure_recorder",",","host_device",",","host_id",")",")","if","_WRAP_INPUT_FN_INTO_WHILE_LOOP",":","run_infeed_loop_on_coordinator","=","False","enqueue_ops",".","append","(","_wrap_computation_in_while_loop","(","device","=","host_device",",","op_fn","=","enqueue_ops_fn",")",")","else",":","enqueue_ops",".","append","(","enqueue_ops_fn","(",")",")","# Infeed_queue_getter must be called after enqueue_ops_fn is called.","infeed_queues",".","append","(","captured_infeed_queue",".","get","(",")",")","elif","self",".","_ctx",".","is_input_broadcast_with_iterators","(",")",":","# Only calls input_fn in host 0.","host_device","=","tpu_host_placement_fn","(","host_id","=","0",")","enqueue_ops_fn",",","captured_infeed_queue",",","dataset_initializer","=","(","generate_broadcast_enqueue_ops_fn","(","self",".","_ctx",",","self",".","_input_fn",",","self",".","_inputs_structure_recorder",",","num_hosts",")",")","if","dataset_initializer",":","all_dataset_initializers",".","append","(","dataset_initializer",")","run_infeed_loop_on_coordinator","=","False","wrap_fn","=","(","_wrap_computation_in_while_loop","if","self",".","_ctx",".","mode","!=","model_fn_lib",".","ModeKeys",".","PREDICT","else","_wrap_computation_in_while_loop_with_stopping_signals",")","enqueue_ops",".","append","(","wrap_fn","(","device","=","host_device",",","op_fn","=","enqueue_ops_fn",")",")","else",":","enqueue_ops",".","append","(","enqueue_ops_fn","(",")",")","infeed_queues",".","append","(","captured_infeed_queue",".","get","(",")",")","else",":","for","host_id","in","range","(","num_hosts",")",":","host_device","=","tpu_host_placement_fn","(","host_id","=","host_id",")","with","ops",".","device","(","host_device",")",":","with","ops",".","name_scope","(","'input_pipeline_task%d'","%","(","host_id",")",")",":","if","self",".","_ctx",".","is_input_per_host_with_iterators","(",")",":","enqueue_ops_fn",",","captured_infeed_queue",",","dataset_initializer","=","(","generate_per_host_v2_enqueue_ops_fn_for_host","(","self",".","_ctx",",","self",".","_input_fn",",","self",".","_inputs_structure_recorder",",","host_device",",","host_id",")",")","else",":","enqueue_ops_fn",",","captured_infeed_queue",",","dataset_initializer","=","(","generate_per_host_enqueue_ops_fn_for_host","(","self",".","_ctx",",","self",".","_input_fn",",","self",".","_inputs_structure_recorder",",","self",".","_batch_axis",",","host_device",",","host_id",")",")","# NOTE(xiejw): We dispatch here based on the return type of the","# users `input_fn`.","#","# 1. If input_fn returns a Dataset instance, we initialize the","# iterator outside of tf.while_loop, and call the iterator.get_next","# inside tf.while_loop. This should be always safe.","#","# 2. If input_fn returns (features, labels), it is too late to wrap","# them inside tf.while_loop, as resource initialization cannot be","# handled in TF control flow properly. In this case, we will use","# python loop to enqueue the data into TPU system. This may be","# slow compared to the previous case.","if","dataset_initializer",":","all_dataset_initializers",".","append","(","dataset_initializer",")","run_infeed_loop_on_coordinator","=","False","wrap_fn","=","(","_wrap_computation_in_while_loop","if","self",".","_ctx",".","mode","!=","model_fn_lib",".","ModeKeys",".","PREDICT","else","_wrap_computation_in_while_loop_with_stopping_signals",")","enqueue_ops",".","append","(","wrap_fn","(","device","=","host_device",",","op_fn","=","enqueue_ops_fn",")",")","else",":","enqueue_ops",".","append","(","enqueue_ops_fn","(",")",")","infeed_queues",".","append","(","captured_infeed_queue",".","get","(",")",")","# infeed_queue is used to generate dequeue ops. The only thing it uses for","# dequeue is dtypes and types. So, any one can be used. Here, grab the","# first one.","self",".","_infeed_queue","=","infeed_queues","[","0","]","return","enqueue_ops",",","[","util_lib",".","MultiHostDatasetInitializerHook","(","all_dataset_initializers",")","]",",","run_infeed_loop_on_coordinator"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1183-L1281"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_InputPipeline._validate_input_pipeline","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Validates the input pipeline.\n\n Perform some sanity checks to log user friendly information. We should\n error out to give users better error message. But, if\n _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break\n user code, so, log a warning.\n\n Raises:\n RuntimeError: If the validation failed.","docstring_summary":"Validates the input pipeline.","docstring_tokens":["Validates","the","input","pipeline","."],"function":"def _validate_input_pipeline(self):\n \"\"\"Validates the input pipeline.\n\n Perform some sanity checks to log user friendly information. We should\n error out to give users better error message. But, if\n _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break\n user code, so, log a warning.\n\n Raises:\n RuntimeError: If the validation failed.\n \"\"\"\n if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):\n err_msg = ('Input pipeline contains one or more QueueRunners. '\n 'It could be slow and not scalable. Please consider '\n 'converting your input pipeline to use `tf.data` instead (see '\n 'https:\/\/www.tensorflow.org\/guide\/datasets for '\n 'instructions.')\n if _WRAP_INPUT_FN_INTO_WHILE_LOOP:\n raise RuntimeError(err_msg)\n else:\n logging.warn(err_msg)","function_tokens":["def","_validate_input_pipeline","(","self",")",":","if","ops",".","get_default_graph","(",")",".","get_collection","(","ops",".","GraphKeys",".","QUEUE_RUNNERS",")",":","err_msg","=","(","'Input pipeline contains one or more QueueRunners. '","'It could be slow and not scalable. Please consider '","'converting your input pipeline to use `tf.data` instead (see '","'https:\/\/www.tensorflow.org\/guide\/datasets for '","'instructions.'",")","if","_WRAP_INPUT_FN_INTO_WHILE_LOOP",":","raise","RuntimeError","(","err_msg",")","else",":","logging",".","warn","(","err_msg",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1283-L1303"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_ModelFnWrapper.convert_to_single_tpu_train_step","parameters":"(self, dequeue_fn)","argument_list":"","return_statement":"return (train_step, host_call, captured_scaffold_fn,\n captured_training_hooks)","docstring":"Converts user provided model_fn` as a single train step on TPU.\n\n The user provided `model_fn` takes input tuple\n (features, labels) and produces the EstimatorSpec with train_op and loss for\n train `mode`. This usually represents a single train computation on CPU.\n\n For TPU training, a train (computation) step is first wrapped in a\n tf.while_loop control flow to repeat for many times and then replicated to\n all TPU shards. Besides the input should be taken from TPU infeed rather\n than input pipeline (input_fn) directly. To fit TPU loop and replicate\n pattern, the original train computation should be reformed, which is the\n returned `train_step`.\n\n Args:\n dequeue_fn: The function to retrieve inputs, features and labels, from TPU\n infeed dequeue channel.\n\n Returns:\n A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn\n representing the train step for TPU.","docstring_summary":"Converts user provided model_fn` as a single train step on TPU.","docstring_tokens":["Converts","user","provided","model_fn","as","a","single","train","step","on","TPU","."],"function":"def convert_to_single_tpu_train_step(self, dequeue_fn):\n \"\"\"Converts user provided model_fn` as a single train step on TPU.\n\n The user provided `model_fn` takes input tuple\n (features, labels) and produces the EstimatorSpec with train_op and loss for\n train `mode`. This usually represents a single train computation on CPU.\n\n For TPU training, a train (computation) step is first wrapped in a\n tf.while_loop control flow to repeat for many times and then replicated to\n all TPU shards. Besides the input should be taken from TPU infeed rather\n than input pipeline (input_fn) directly. To fit TPU loop and replicate\n pattern, the original train computation should be reformed, which is the\n returned `train_step`.\n\n Args:\n dequeue_fn: The function to retrieve inputs, features and labels, from TPU\n infeed dequeue channel.\n\n Returns:\n A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn\n representing the train step for TPU.\n \"\"\"\n\n host_call = _OutfeedHostCall(self._ctx)\n captured_scaffold_fn = _CapturedObject()\n captured_training_hooks = _CapturedObject()\n\n def train_step(loss, *cache):\n \"\"\"Training step function for use inside a while loop.\"\"\"\n del loss # unused; required in function signature.\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n\n # Consume the current cache\n estimator_spec = self._verify_estimator_spec(\n self._call_model_fn(features, labels, cache=cache))\n\n # Retrieve the new returned cache\n \"\"\"\n `cache` consists of a list of tensors, potentially empty (of length 0)\n \"\"\"\n cache = estimator_spec.cache\n loss, train_op = estimator_spec.loss, estimator_spec.train_op\n\n if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access\n captured_scaffold_fn.capture(estimator_spec.scaffold_fn)\n else:\n captured_scaffold_fn.capture(None)\n\n captured_training_hooks.capture(estimator_spec.training_hooks)\n\n tracing_ops = []\n if tensor_tracer.TensorTracer.is_enabled():\n tt = tensor_tracer.TensorTracer()\n loss, tracing_ops = tt.trace_tpu(ops.get_default_graph(), loss,\n self._ctx.num_replicas)\n\n # We must run train_op to update the variables prior to running the\n # outfeed.\n with ops.control_dependencies([train_op]+tracing_ops):\n host_call_outfeed_ops = []\n if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access\n and estimator_spec.host_call is not None):\n host_call.record({'host_call': estimator_spec.host_call})\n host_call_outfeed_ops = host_call.create_enqueue_op()\n with ops.control_dependencies(host_call_outfeed_ops):\n return [array_ops.identity(loss)] + cache\n\n return (train_step, host_call, captured_scaffold_fn,\n captured_training_hooks)","function_tokens":["def","convert_to_single_tpu_train_step","(","self",",","dequeue_fn",")",":","host_call","=","_OutfeedHostCall","(","self",".","_ctx",")","captured_scaffold_fn","=","_CapturedObject","(",")","captured_training_hooks","=","_CapturedObject","(",")","def","train_step","(","loss",",","*","cache",")",":","\"\"\"Training step function for use inside a while loop.\"\"\"","del","loss","# unused; required in function signature.","inputs","=","dequeue_fn","(",")","features",",","labels","=","inputs",".","features_and_labels","(",")","# Consume the current cache","estimator_spec","=","self",".","_verify_estimator_spec","(","self",".","_call_model_fn","(","features",",","labels",",","cache","=","cache",")",")","# Retrieve the new returned cache","\"\"\"\n `cache` consists of a list of tensors, potentially empty (of length 0)\n \"\"\"","cache","=","estimator_spec",".","cache","loss",",","train_op","=","estimator_spec",".","loss",",","estimator_spec",".","train_op","if","isinstance","(","estimator_spec",",","model_fn_lib",".","_TPUEstimatorSpec",")",":","# pylint: disable=protected-access","captured_scaffold_fn",".","capture","(","estimator_spec",".","scaffold_fn",")","else",":","captured_scaffold_fn",".","capture","(","None",")","captured_training_hooks",".","capture","(","estimator_spec",".","training_hooks",")","tracing_ops","=","[","]","if","tensor_tracer",".","TensorTracer",".","is_enabled","(",")",":","tt","=","tensor_tracer",".","TensorTracer","(",")","loss",",","tracing_ops","=","tt",".","trace_tpu","(","ops",".","get_default_graph","(",")",",","loss",",","self",".","_ctx",".","num_replicas",")","# We must run train_op to update the variables prior to running the","# outfeed.","with","ops",".","control_dependencies","(","[","train_op","]","+","tracing_ops",")",":","host_call_outfeed_ops","=","[","]","if","(","isinstance","(","estimator_spec",",","model_fn_lib",".","_TPUEstimatorSpec",")","# pylint: disable=protected-access","and","estimator_spec",".","host_call","is","not","None",")",":","host_call",".","record","(","{","'host_call'",":","estimator_spec",".","host_call","}",")","host_call_outfeed_ops","=","host_call",".","create_enqueue_op","(",")","with","ops",".","control_dependencies","(","host_call_outfeed_ops",")",":","return","[","array_ops",".","identity","(","loss",")","]","+","cache","return","(","train_step",",","host_call",",","captured_scaffold_fn",",","captured_training_hooks",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1327-L1396"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_ModelFnWrapper.convert_to_single_tpu_eval_step","parameters":"(self, dequeue_fn)","argument_list":"","return_statement":"return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks","docstring":"Converts user provided model_fn` as a single eval step on TPU.\n\n Similar to training, the user provided `model_fn` takes input tuple\n (features, labels) and produces the TPUEstimatorSpec with eval_metrics for\n eval `mode`. This usually represents a single evaluation computation on CPU.\n\n For TPU evaluation, a eval (computation) step is first wrapped in a\n tf.while_loop control flow to repeat for many times and then replicated to\n all TPU shards. Besides the input and output are slightly different. Input,\n features and labels, should be taken from TPU infeed rather than input\n pipeline (input_fn) directly. Output is managed in two stages. First, the\n model outputs as the result of evaluation computation, usually model logits,\n should be transferred from TPU system to CPU. Then, all model outputs are\n concatenated first on CPU and sent to the metric_fn for metrics computation.\n To fit TPU evaluation pattern, the original eval computation should be\n reformed, which is the returned `eval_step`.\n\n Args:\n dequeue_fn: The function to retrieve inputs, features and labels, from TPU\n infeed dequeue channel.\n\n Returns:\n A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn\n representing the eval step for TPU.","docstring_summary":"Converts user provided model_fn` as a single eval step on TPU.","docstring_tokens":["Converts","user","provided","model_fn","as","a","single","eval","step","on","TPU","."],"function":"def convert_to_single_tpu_eval_step(self, dequeue_fn):\n \"\"\"Converts user provided model_fn` as a single eval step on TPU.\n\n Similar to training, the user provided `model_fn` takes input tuple\n (features, labels) and produces the TPUEstimatorSpec with eval_metrics for\n eval `mode`. This usually represents a single evaluation computation on CPU.\n\n For TPU evaluation, a eval (computation) step is first wrapped in a\n tf.while_loop control flow to repeat for many times and then replicated to\n all TPU shards. Besides the input and output are slightly different. Input,\n features and labels, should be taken from TPU infeed rather than input\n pipeline (input_fn) directly. Output is managed in two stages. First, the\n model outputs as the result of evaluation computation, usually model logits,\n should be transferred from TPU system to CPU. Then, all model outputs are\n concatenated first on CPU and sent to the metric_fn for metrics computation.\n To fit TPU evaluation pattern, the original eval computation should be\n reformed, which is the returned `eval_step`.\n\n Args:\n dequeue_fn: The function to retrieve inputs, features and labels, from TPU\n infeed dequeue channel.\n\n Returns:\n A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn\n representing the eval step for TPU.\n \"\"\"\n host_calls = _OutfeedHostCall(self._ctx)\n captured_scaffold_fn = _CapturedObject()\n captured_eval_hooks = _CapturedObject()\n\n def eval_step(total_loss, *cache):\n \"\"\"Evaluation step function for use inside a while loop.\"\"\"\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n\n # Consume the current cache\n tpu_estimator_spec = self._call_model_fn(features, labels, cache=cache)\n if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access\n raise RuntimeError(\n 'estimator_spec used by TPU evaluation must have type'\n '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))\n\n # Retrieve the new returned cache\n cache = tpu_estimator_spec.cache\n loss = tpu_estimator_spec.loss\n\n captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)\n captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)\n\n to_record = {}\n if tpu_estimator_spec.eval_metrics:\n to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics\n if tpu_estimator_spec.host_call is not None:\n # We assume that evaluate won't update global step, so we don't wrap\n # this host_call.\n to_record['host_call'] = tpu_estimator_spec.host_call\n host_calls.record(to_record)\n\n with ops.control_dependencies(host_calls.create_enqueue_op()):\n return [math_ops.add(total_loss, loss)] + cache\n\n return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks","function_tokens":["def","convert_to_single_tpu_eval_step","(","self",",","dequeue_fn",")",":","host_calls","=","_OutfeedHostCall","(","self",".","_ctx",")","captured_scaffold_fn","=","_CapturedObject","(",")","captured_eval_hooks","=","_CapturedObject","(",")","def","eval_step","(","total_loss",",","*","cache",")",":","\"\"\"Evaluation step function for use inside a while loop.\"\"\"","inputs","=","dequeue_fn","(",")","features",",","labels","=","inputs",".","features_and_labels","(",")","# Consume the current cache","tpu_estimator_spec","=","self",".","_call_model_fn","(","features",",","labels",",","cache","=","cache",")","if","not","isinstance","(","tpu_estimator_spec",",","model_fn_lib",".","_TPUEstimatorSpec",")",":","# pylint: disable=protected-access","raise","RuntimeError","(","'estimator_spec used by TPU evaluation must have type'","'`TPUEstimatorSpec`. Got {}'",".","format","(","type","(","tpu_estimator_spec",")",")",")","# Retrieve the new returned cache","cache","=","tpu_estimator_spec",".","cache","loss","=","tpu_estimator_spec",".","loss","captured_scaffold_fn",".","capture","(","tpu_estimator_spec",".","scaffold_fn",")","captured_eval_hooks",".","capture","(","tpu_estimator_spec",".","evaluation_hooks",")","to_record","=","{","}","if","tpu_estimator_spec",".","eval_metrics",":","to_record","[","'eval_metrics'","]","=","tpu_estimator_spec",".","eval_metrics","if","tpu_estimator_spec",".","host_call","is","not","None",":","# We assume that evaluate won't update global step, so we don't wrap","# this host_call.","to_record","[","'host_call'","]","=","tpu_estimator_spec",".","host_call","host_calls",".","record","(","to_record",")","with","ops",".","control_dependencies","(","host_calls",".","create_enqueue_op","(",")",")",":","return","[","math_ops",".","add","(","total_loss",",","loss",")","]","+","cache","return","eval_step",",","host_calls",",","captured_scaffold_fn",",","captured_eval_hooks"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1398-L1459"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_ModelFnWrapper.convert_to_single_tpu_predict_step","parameters":"(self, dequeue_fn)","argument_list":"","return_statement":"return (predict_step, host_calls, captured_scaffold_fn,\n captured_predict_hooks)","docstring":"Converts user provided model_fn` as a single predict step on TPU.\n\n Args:\n dequeue_fn: The function to retrieve inputs, features and labels, from TPU\n infeed dequeue channel.\n\n Returns:\n A tuple of predict_fn, host_calls, and captured scaffold_fn. The\n predict_fn representing the predict step for TPU.","docstring_summary":"Converts user provided model_fn` as a single predict step on TPU.","docstring_tokens":["Converts","user","provided","model_fn","as","a","single","predict","step","on","TPU","."],"function":"def convert_to_single_tpu_predict_step(self, dequeue_fn):\n \"\"\"Converts user provided model_fn` as a single predict step on TPU.\n\n Args:\n dequeue_fn: The function to retrieve inputs, features and labels, from TPU\n infeed dequeue channel.\n\n Returns:\n A tuple of predict_fn, host_calls, and captured scaffold_fn. The\n predict_fn representing the predict step for TPU.\n \"\"\"\n host_calls = _OutfeedHostCall(self._ctx)\n captured_scaffold_fn = _CapturedObject()\n captured_predict_hooks = _CapturedObject()\n\n def predict_step(unused_scalar_stopping_signal):\n \"\"\"Evaluation step function for use inside a while loop.\"\"\"\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n stopping_signals = inputs.signals()\n\n assert stopping_signals is not None, (\n 'Internal Error: `signals` is missing.')\n\n tpu_estimator_spec = self._call_model_fn(\n features, labels, is_export_mode=False)\n if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access\n raise RuntimeError(\n 'estimator_spec used by TPU prediction must have type'\n '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))\n\n self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)\n\n captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)\n captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)\n to_record = {}\n identity_fn = lambda **kwargs: kwargs\n to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]\n to_record['signals'] = [identity_fn, stopping_signals]\n if tpu_estimator_spec.host_call is not None:\n to_record['host_call'] = tpu_estimator_spec.host_call\n host_calls.record(to_record)\n\n with ops.control_dependencies(host_calls.create_enqueue_op()):\n return _StopSignals.as_scalar_stopping_signal(stopping_signals)\n\n return (predict_step, host_calls, captured_scaffold_fn,\n captured_predict_hooks)","function_tokens":["def","convert_to_single_tpu_predict_step","(","self",",","dequeue_fn",")",":","host_calls","=","_OutfeedHostCall","(","self",".","_ctx",")","captured_scaffold_fn","=","_CapturedObject","(",")","captured_predict_hooks","=","_CapturedObject","(",")","def","predict_step","(","unused_scalar_stopping_signal",")",":","\"\"\"Evaluation step function for use inside a while loop.\"\"\"","inputs","=","dequeue_fn","(",")","features",",","labels","=","inputs",".","features_and_labels","(",")","stopping_signals","=","inputs",".","signals","(",")","assert","stopping_signals","is","not","None",",","(","'Internal Error: `signals` is missing.'",")","tpu_estimator_spec","=","self",".","_call_model_fn","(","features",",","labels",",","is_export_mode","=","False",")","if","not","isinstance","(","tpu_estimator_spec",",","model_fn_lib",".","_TPUEstimatorSpec",")",":","# pylint: disable=protected-access","raise","RuntimeError","(","'estimator_spec used by TPU prediction must have type'","'`TPUEstimatorSpec`. Got {}'",".","format","(","type","(","tpu_estimator_spec",")",")",")","self",".","_verify_tpu_spec_predictions","(","tpu_estimator_spec",".","predictions",")","captured_scaffold_fn",".","capture","(","tpu_estimator_spec",".","scaffold_fn",")","captured_predict_hooks",".","capture","(","tpu_estimator_spec",".","prediction_hooks",")","to_record","=","{","}","identity_fn","=","lambda","*","*","kwargs",":","kwargs","to_record","[","'predictions'","]","=","[","identity_fn",",","tpu_estimator_spec",".","predictions","]","to_record","[","'signals'","]","=","[","identity_fn",",","stopping_signals","]","if","tpu_estimator_spec",".","host_call","is","not","None",":","to_record","[","'host_call'","]","=","tpu_estimator_spec",".","host_call","host_calls",".","record","(","to_record",")","with","ops",".","control_dependencies","(","host_calls",".","create_enqueue_op","(",")",")",":","return","_StopSignals",".","as_scalar_stopping_signal","(","stopping_signals",")","return","(","predict_step",",","host_calls",",","captured_scaffold_fn",",","captured_predict_hooks",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1461-L1508"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_ModelFnWrapper._verify_tpu_spec_predictions","parameters":"(self, predictions)","argument_list":"","return_statement":"return predictions","docstring":"Validates TPUEstimatorSpec.predictions dict.","docstring_summary":"Validates TPUEstimatorSpec.predictions dict.","docstring_tokens":["Validates","TPUEstimatorSpec",".","predictions","dict","."],"function":"def _verify_tpu_spec_predictions(self, predictions):\n \"\"\"Validates TPUEstimatorSpec.predictions dict.\"\"\"\n # TODO(xiejw): Adds validation for prediction dictionrary.\n # TODO(xiejw): Adds support for single tensor as predictions.\n if not isinstance(predictions, dict):\n raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')\n\n for (key, tensor) in predictions.items():\n if tensor.shape.dims[0].value is None:\n raise ValueError(\n 'The tensor with key ({}) in TPUEstimatorSpec.predictions has '\n 'dynamic shape (should be static). Tensor: {}'.format(key, tensor))\n return predictions","function_tokens":["def","_verify_tpu_spec_predictions","(","self",",","predictions",")",":","# TODO(xiejw): Adds validation for prediction dictionrary.","# TODO(xiejw): Adds support for single tensor as predictions.","if","not","isinstance","(","predictions",",","dict",")",":","raise","TypeError","(","'TPUEstimatorSpec.predictions must be dict of Tensors.'",")","for","(","key",",","tensor",")","in","predictions",".","items","(",")",":","if","tensor",".","shape",".","dims","[","0","]",".","value","is","None",":","raise","ValueError","(","'The tensor with key ({}) in TPUEstimatorSpec.predictions has '","'dynamic shape (should be static). Tensor: {}'",".","format","(","key",",","tensor",")",")","return","predictions"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1510-L1522"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_ModelFnWrapper._validate_model_features_and_labels","parameters":"(self, features, labels,\n is_export_mode)","argument_list":"","return_statement":"","docstring":"Validates that the features and labels for the model function are valid.\n\n A valid features\/labels object is the one with:\n - Type: A tensor or any nested structure of tensors supported by TF nest,\n namely nested dictionary, tuple, namedtuple, or sequence of tensors.\n - Static shape if is_export_mode is False.\n\n Args:\n features: the features that would be input to the model function.\n labels: the labels that would be input to the model function.\n is_export_mode: boolean value specifying if in export mode.\n\n Raises:\n TypeError: If features\/labels are not of the correct type.\n ValueError: If features\/labels have dynamic shape.","docstring_summary":"Validates that the features and labels for the model function are valid.","docstring_tokens":["Validates","that","the","features","and","labels","for","the","model","function","are","valid","."],"function":"def _validate_model_features_and_labels(self, features, labels,\n is_export_mode):\n \"\"\"Validates that the features and labels for the model function are valid.\n\n A valid features\/labels object is the one with:\n - Type: A tensor or any nested structure of tensors supported by TF nest,\n namely nested dictionary, tuple, namedtuple, or sequence of tensors.\n - Static shape if is_export_mode is False.\n\n Args:\n features: the features that would be input to the model function.\n labels: the labels that would be input to the model function.\n is_export_mode: boolean value specifying if in export mode.\n\n Raises:\n TypeError: If features\/labels are not of the correct type.\n ValueError: If features\/labels have dynamic shape.\n \"\"\"\n\n def validate(obj, obj_name):\n \"\"\"Helper validate function.\"\"\"\n if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):\n return\n if isinstance(obj, ops.Tensor):\n if not obj.get_shape().is_fully_defined():\n raise ValueError(\n 'The {} to the model returned by input_fn must have static shape.'\n ' Tensor: {}'.format(obj_name, obj))\n else:\n for tensor in data_nest.flatten(obj):\n if not tensor.get_shape().is_fully_defined():\n raise ValueError(\n ('The {} to the model returned by input_fn must have static '\n 'shape. Tensor: {}').format(obj_name, tensor))\n\n validate(features, 'features')\n if labels is not None:\n validate(labels, 'labels')","function_tokens":["def","_validate_model_features_and_labels","(","self",",","features",",","labels",",","is_export_mode",")",":","def","validate","(","obj",",","obj_name",")",":","\"\"\"Helper validate function.\"\"\"","if","is_export_mode","or","self",".","_ctx",".","is_running_on_cpu","(","is_export_mode",")",":","return","if","isinstance","(","obj",",","ops",".","Tensor",")",":","if","not","obj",".","get_shape","(",")",".","is_fully_defined","(",")",":","raise","ValueError","(","'The {} to the model returned by input_fn must have static shape.'","' Tensor: {}'",".","format","(","obj_name",",","obj",")",")","else",":","for","tensor","in","data_nest",".","flatten","(","obj",")",":","if","not","tensor",".","get_shape","(",")",".","is_fully_defined","(",")",":","raise","ValueError","(","(","'The {} to the model returned by input_fn must have static '","'shape. Tensor: {}'",")",".","format","(","obj_name",",","tensor",")",")","validate","(","features",",","'features'",")","if","labels","is","not","None",":","validate","(","labels",",","'labels'",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1524-L1561"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_ModelFnWrapper._call_model_fn","parameters":"(self, features, labels, cache=None, is_export_mode=False)","argument_list":"","return_statement":"","docstring":"Calls the model_fn with required parameters.","docstring_summary":"Calls the model_fn with required parameters.","docstring_tokens":["Calls","the","model_fn","with","required","parameters","."],"function":"def _call_model_fn(self, features, labels, cache=None, is_export_mode=False):\n \"\"\"Calls the model_fn with required parameters.\"\"\"\n self._validate_model_features_and_labels(features, labels, is_export_mode)\n model_fn_args = function_utils.fn_args(self._model_fn)\n kwargs = {}\n\n # Makes deep copy with `config` and params` in case user mutates them.\n config = copy.deepcopy(self._config)\n params = copy.deepcopy(self._params)\n\n if 'labels' in model_fn_args:\n kwargs['labels'] = labels\n elif labels is not None:\n raise ValueError(\n 'model_fn does not take labels, but input_fn returns labels.')\n if 'mode' in model_fn_args:\n kwargs['mode'] = self._ctx.mode\n if 'config' in model_fn_args:\n kwargs['config'] = config\n if 'params' in model_fn_args:\n kwargs['params'] = params\n\n if cache is not None:\n params['cache'] = cache\n\n if 'params' not in model_fn_args:\n raise ValueError('model_fn ({}) does not include params argument, '\n 'required by TPUEstimator to pass batch size as '\n 'params[\\'batch_size\\']'.format(self._model_fn))\n\n if is_export_mode:\n batch_size_for_model_fn = None\n else:\n batch_size_for_model_fn = self._ctx.batch_size_for_model_fn\n\n if batch_size_for_model_fn is not None:\n _add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)\n\n running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)\n _add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)\n\n if not running_on_cpu:\n user_context = tpu_context.TPUContext(\n internal_ctx=self._ctx, call_from_input_fn=False)\n _add_item_to_params(params, _CTX_KEY, user_context)\n\n estimator_spec = self._model_fn(features=features, **kwargs)\n if (running_on_cpu and\n isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access\n # The estimator_spec will be passed to `Estimator` directly, which expects\n # type `EstimatorSpec`.\n return estimator_spec.as_estimator_spec()\n else:\n return estimator_spec","function_tokens":["def","_call_model_fn","(","self",",","features",",","labels",",","cache","=","None",",","is_export_mode","=","False",")",":","self",".","_validate_model_features_and_labels","(","features",",","labels",",","is_export_mode",")","model_fn_args","=","function_utils",".","fn_args","(","self",".","_model_fn",")","kwargs","=","{","}","# Makes deep copy with `config` and params` in case user mutates them.","config","=","copy",".","deepcopy","(","self",".","_config",")","params","=","copy",".","deepcopy","(","self",".","_params",")","if","'labels'","in","model_fn_args",":","kwargs","[","'labels'","]","=","labels","elif","labels","is","not","None",":","raise","ValueError","(","'model_fn does not take labels, but input_fn returns labels.'",")","if","'mode'","in","model_fn_args",":","kwargs","[","'mode'","]","=","self",".","_ctx",".","mode","if","'config'","in","model_fn_args",":","kwargs","[","'config'","]","=","config","if","'params'","in","model_fn_args",":","kwargs","[","'params'","]","=","params","if","cache","is","not","None",":","params","[","'cache'","]","=","cache","if","'params'","not","in","model_fn_args",":","raise","ValueError","(","'model_fn ({}) does not include params argument, '","'required by TPUEstimator to pass batch size as '","'params[\\'batch_size\\']'",".","format","(","self",".","_model_fn",")",")","if","is_export_mode",":","batch_size_for_model_fn","=","None","else",":","batch_size_for_model_fn","=","self",".","_ctx",".","batch_size_for_model_fn","if","batch_size_for_model_fn","is","not","None",":","_add_item_to_params","(","params",",","_BATCH_SIZE_KEY",",","batch_size_for_model_fn",")","running_on_cpu","=","self",".","_ctx",".","is_running_on_cpu","(","is_export_mode",")","_add_item_to_params","(","params",",","_USE_TPU_KEY",",","not","running_on_cpu",")","if","not","running_on_cpu",":","user_context","=","tpu_context",".","TPUContext","(","internal_ctx","=","self",".","_ctx",",","call_from_input_fn","=","False",")","_add_item_to_params","(","params",",","_CTX_KEY",",","user_context",")","estimator_spec","=","self",".","_model_fn","(","features","=","features",",","*","*","kwargs",")","if","(","running_on_cpu","and","isinstance","(","estimator_spec",",","model_fn_lib",".","_TPUEstimatorSpec",")",")",":","# pylint: disable=protected-access","# The estimator_spec will be passed to `Estimator` directly, which expects","# type `EstimatorSpec`.","return","estimator_spec",".","as_estimator_spec","(",")","else",":","return","estimator_spec"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1563-L1616"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_ModelFnWrapper._verify_estimator_spec","parameters":"(self, estimator_spec)","argument_list":"","return_statement":"return estimator_spec","docstring":"Validates the estimator_spec.","docstring_summary":"Validates the estimator_spec.","docstring_tokens":["Validates","the","estimator_spec","."],"function":"def _verify_estimator_spec(self, estimator_spec):\n \"\"\"Validates the estimator_spec.\"\"\"\n if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access\n return estimator_spec\n\n err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'\n if estimator_spec.training_chief_hooks:\n raise ValueError(\n err_msg.format('training_chief_hooks') + 'If you want' +\n ' to pass training hooks, please pass via training_hooks.')\n\n if estimator_spec.scaffold:\n logging.warning('EstimatorSpec.Scaffold is ignored by TPU train\/eval. '\n 'Please use TPUEstimatorSpec.')\n return estimator_spec","function_tokens":["def","_verify_estimator_spec","(","self",",","estimator_spec",")",":","if","isinstance","(","estimator_spec",",","model_fn_lib",".","_TPUEstimatorSpec",")",":","# pylint: disable=protected-access","return","estimator_spec","err_msg","=","'{} returned by EstimatorSpec is not supported in TPUEstimator.'","if","estimator_spec",".","training_chief_hooks",":","raise","ValueError","(","err_msg",".","format","(","'training_chief_hooks'",")","+","'If you want'","+","' to pass training hooks, please pass via training_hooks.'",")","if","estimator_spec",".","scaffold",":","logging",".","warning","(","'EstimatorSpec.Scaffold is ignored by TPU train\/eval. '","'Please use TPUEstimatorSpec.'",")","return","estimator_spec"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1618-L1632"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_OutfeedHostCall.validate","parameters":"(host_calls)","argument_list":"","return_statement":"","docstring":"Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`.","docstring_summary":"Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`.","docstring_tokens":["Validates","the","eval_metrics","and","host_call","in","TPUEstimatorSpec","."],"function":"def validate(host_calls):\n \"\"\"Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`.\"\"\"\n\n for name, host_call in host_calls.items():\n if not isinstance(host_call, (tuple, list)):\n raise ValueError('{} should be tuple or list'.format(name))\n if len(host_call) != 2:\n raise ValueError('{} should have two elements.'.format(name))\n if not callable(host_call[0]):\n raise TypeError('{}[0] should be callable.'.format(name))\n if not isinstance(host_call[1], (tuple, list, dict)):\n raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))\n\n if isinstance(host_call[1], (tuple, list)):\n fullargspec = tf_inspect.getfullargspec(host_call[0])\n fn_args = function_utils.fn_args(host_call[0])\n # wrapped_hostcall_with_global_step uses varargs, so we allow that.\n if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):\n raise RuntimeError(\n 'In TPUEstimatorSpec.{}, length of tensors {} does not match '\n 'method args of the function, which takes {}.'.format(\n name, len(host_call[1]), len(fn_args)))","function_tokens":["def","validate","(","host_calls",")",":","for","name",",","host_call","in","host_calls",".","items","(",")",":","if","not","isinstance","(","host_call",",","(","tuple",",","list",")",")",":","raise","ValueError","(","'{} should be tuple or list'",".","format","(","name",")",")","if","len","(","host_call",")","!=","2",":","raise","ValueError","(","'{} should have two elements.'",".","format","(","name",")",")","if","not","callable","(","host_call","[","0","]",")",":","raise","TypeError","(","'{}[0] should be callable.'",".","format","(","name",")",")","if","not","isinstance","(","host_call","[","1","]",",","(","tuple",",","list",",","dict",")",")",":","raise","ValueError","(","'{}[1] should be tuple or list, or dict.'",".","format","(","name",")",")","if","isinstance","(","host_call","[","1","]",",","(","tuple",",","list",")",")",":","fullargspec","=","tf_inspect",".","getfullargspec","(","host_call","[","0","]",")","fn_args","=","function_utils",".","fn_args","(","host_call","[","0","]",")","# wrapped_hostcall_with_global_step uses varargs, so we allow that.","if","fullargspec",".","varargs","is","None","and","len","(","host_call","[","1","]",")","!=","len","(","fn_args",")",":","raise","RuntimeError","(","'In TPUEstimatorSpec.{}, length of tensors {} does not match '","'method args of the function, which takes {}.'",".","format","(","name",",","len","(","host_call","[","1","]",")",",","len","(","fn_args",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1649-L1670"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_OutfeedHostCall.create_cpu_hostcall","parameters":"(host_calls)","argument_list":"","return_statement":"return ret","docstring":"Runs on the host_call on CPU instead of TPU when use_tpu=False.","docstring_summary":"Runs on the host_call on CPU instead of TPU when use_tpu=False.","docstring_tokens":["Runs","on","the","host_call","on","CPU","instead","of","TPU","when","use_tpu","=","False","."],"function":"def create_cpu_hostcall(host_calls):\n \"\"\"Runs on the host_call on CPU instead of TPU when use_tpu=False.\"\"\"\n\n _OutfeedHostCall.validate(host_calls)\n ret = {}\n for name, host_call in host_calls.items():\n host_fn, tensors = host_call\n if isinstance(tensors, (tuple, list)):\n ret[name] = host_fn(*tensors)\n else:\n # Must be dict.\n try:\n ret[name] = host_fn(**tensors)\n except TypeError as e:\n logging.warning(\n 'Exception while calling %s: %s. It is likely the tensors '\n '(%s[1]) do not match the '\n 'function\\'s arguments', name, e, name)\n raise e\n return ret","function_tokens":["def","create_cpu_hostcall","(","host_calls",")",":","_OutfeedHostCall",".","validate","(","host_calls",")","ret","=","{","}","for","name",",","host_call","in","host_calls",".","items","(",")",":","host_fn",",","tensors","=","host_call","if","isinstance","(","tensors",",","(","tuple",",","list",")",")",":","ret","[","name","]","=","host_fn","(","*","tensors",")","else",":","# Must be dict.","try",":","ret","[","name","]","=","host_fn","(","*","*","tensors",")","except","TypeError","as","e",":","logging",".","warning","(","'Exception while calling %s: %s. It is likely the tensors '","'(%s[1]) do not match the '","'function\\'s arguments'",",","name",",","e",",","name",")","raise","e","return","ret"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1673-L1692"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_OutfeedHostCall.record","parameters":"(self, host_calls)","argument_list":"","return_statement":"","docstring":"Records the host_call structure.","docstring_summary":"Records the host_call structure.","docstring_tokens":["Records","the","host_call","structure","."],"function":"def record(self, host_calls):\n \"\"\"Records the host_call structure.\"\"\"\n\n for name, host_call in host_calls.items():\n host_fn, tensor_list_or_dict = host_call\n self._names.append(name)\n self._host_fns[name] = host_fn\n\n if isinstance(tensor_list_or_dict, dict):\n for (key, tensor) in six.iteritems(tensor_list_or_dict):\n self._tensor_keys[name].append(key)\n self._tensors[name].append(tensor)\n self._tensor_dtypes[name].append(tensor.dtype)\n self._tensor_shapes[name].append(tensor.shape)\n else:\n # List or tuple.\n self._tensor_keys[name] = None\n for tensor in tensor_list_or_dict:\n self._tensors[name].append(tensor)\n self._tensor_dtypes[name].append(tensor.dtype)\n self._tensor_shapes[name].append(tensor.shape)","function_tokens":["def","record","(","self",",","host_calls",")",":","for","name",",","host_call","in","host_calls",".","items","(",")",":","host_fn",",","tensor_list_or_dict","=","host_call","self",".","_names",".","append","(","name",")","self",".","_host_fns","[","name","]","=","host_fn","if","isinstance","(","tensor_list_or_dict",",","dict",")",":","for","(","key",",","tensor",")","in","six",".","iteritems","(","tensor_list_or_dict",")",":","self",".","_tensor_keys","[","name","]",".","append","(","key",")","self",".","_tensors","[","name","]",".","append","(","tensor",")","self",".","_tensor_dtypes","[","name","]",".","append","(","tensor",".","dtype",")","self",".","_tensor_shapes","[","name","]",".","append","(","tensor",".","shape",")","else",":","# List or tuple.","self",".","_tensor_keys","[","name","]","=","None","for","tensor","in","tensor_list_or_dict",":","self",".","_tensors","[","name","]",".","append","(","tensor",")","self",".","_tensor_dtypes","[","name","]",".","append","(","tensor",".","dtype",")","self",".","_tensor_shapes","[","name","]",".","append","(","tensor",".","shape",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1694-L1714"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_OutfeedHostCall.create_enqueue_op","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Create the op to enqueue the recorded host_calls.\n\n Returns:\n A list of enqueue ops, which is empty if there are no host calls.","docstring_summary":"Create the op to enqueue the recorded host_calls.","docstring_tokens":["Create","the","op","to","enqueue","the","recorded","host_calls","."],"function":"def create_enqueue_op(self):\n \"\"\"Create the op to enqueue the recorded host_calls.\n\n Returns:\n A list of enqueue ops, which is empty if there are no host calls.\n \"\"\"\n if not self._names:\n return []\n\n tensors = []\n # TODO(jhseu): Consider deduping tensors.\n for name in self._names:\n tensors.extend(self._tensors[name])\n\n with ops.device(tpu.core(0)):\n return [tpu_ops.outfeed_enqueue_tuple(tensors)]","function_tokens":["def","create_enqueue_op","(","self",")",":","if","not","self",".","_names",":","return","[","]","tensors","=","[","]","# TODO(jhseu): Consider deduping tensors.","for","name","in","self",".","_names",":","tensors",".","extend","(","self",".","_tensors","[","name","]",")","with","ops",".","device","(","tpu",".","core","(","0",")",")",":","return","[","tpu_ops",".","outfeed_enqueue_tuple","(","tensors",")","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1716-L1731"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_OutfeedHostCall.create_tpu_hostcall","parameters":"(self)","argument_list":"","return_statement":"return ret","docstring":"Sends the tensors through outfeed and runs the host_fn on CPU.\n\n The tensors are concatenated along dimension 0 to form a global tensor\n across all shards. The concatenated function is passed to the host_fn and\n executed on the first host.\n\n Returns:\n A dictionary mapping name to the return type of the host_call by that\n name.\n\n Raises:\n RuntimeError: If outfeed tensor is scalar.","docstring_summary":"Sends the tensors through outfeed and runs the host_fn on CPU.","docstring_tokens":["Sends","the","tensors","through","outfeed","and","runs","the","host_fn","on","CPU","."],"function":"def create_tpu_hostcall(self):\n \"\"\"Sends the tensors through outfeed and runs the host_fn on CPU.\n\n The tensors are concatenated along dimension 0 to form a global tensor\n across all shards. The concatenated function is passed to the host_fn and\n executed on the first host.\n\n Returns:\n A dictionary mapping name to the return type of the host_call by that\n name.\n\n Raises:\n RuntimeError: If outfeed tensor is scalar.\n \"\"\"\n if not self._names:\n return {}\n\n ret = {}\n # For each i, dequeue_ops[i] is a list containing the tensors from all\n # shards. This list is concatenated later.\n dequeue_ops = []\n tensor_dtypes = []\n tensor_shapes = []\n for name in self._names:\n for _ in self._tensors[name]:\n dequeue_ops.append([])\n for dtype in self._tensor_dtypes[name]:\n tensor_dtypes.append(dtype)\n for shape in self._tensor_shapes[name]:\n tensor_shapes.append(shape)\n\n # Outfeed ops execute on each replica's first logical core. Note: we must\n # constraint it such that we have at most one outfeed dequeue and enqueue\n # per replica.\n for i in xrange(self._ctx.num_replicas):\n host_device, ordinal_id = self._ctx.device_for_replica(i)\n with ops.device(host_device):\n outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(\n dtypes=tensor_dtypes,\n shapes=tensor_shapes,\n device_ordinal=ordinal_id)\n for j, item in enumerate(outfeed_tensors):\n dequeue_ops[j].append(item)\n\n # Deconstruct dequeue ops.\n dequeue_ops_by_name = {}\n pos = 0\n for name in self._names:\n dequeue_ops_by_name[name] = dequeue_ops[pos:pos +\n len(self._tensors[name])]\n pos += len(self._tensors[name])\n\n # It is assumed evaluation always happens on single host TPU system. So,\n # place all ops on tpu host if possible.\n #\n # TODO(jhseu): Evaluate whether this is right for summaries.\n with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):\n for name in self._names:\n dequeue_ops = dequeue_ops_by_name[name]\n for i, item in enumerate(dequeue_ops):\n if dequeue_ops[i][0].shape.ndims == 0:\n raise RuntimeError(\n 'All tensors outfed from TPU should preserve batch size '\n 'dimension, but got scalar {}'.format(dequeue_ops[i][0]))\n # TODO(xiejw): Allow users to specify the axis for batch size\n # dimension.\n dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)\n\n if self._tensor_keys[name] is not None:\n # The user-provided eval_metrics[1] is a dict.\n dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))\n try:\n ret[name] = self._host_fns[name](**dequeue_ops)\n except TypeError as e:\n logging.warning(\n 'Exception while calling %s: %s. It is likely the tensors '\n '(%s[1]) do not match the '\n 'function\\'s arguments', name, e, name)\n raise e\n else:\n ret[name] = self._host_fns[name](*dequeue_ops)\n\n return ret","function_tokens":["def","create_tpu_hostcall","(","self",")",":","if","not","self",".","_names",":","return","{","}","ret","=","{","}","# For each i, dequeue_ops[i] is a list containing the tensors from all","# shards. This list is concatenated later.","dequeue_ops","=","[","]","tensor_dtypes","=","[","]","tensor_shapes","=","[","]","for","name","in","self",".","_names",":","for","_","in","self",".","_tensors","[","name","]",":","dequeue_ops",".","append","(","[","]",")","for","dtype","in","self",".","_tensor_dtypes","[","name","]",":","tensor_dtypes",".","append","(","dtype",")","for","shape","in","self",".","_tensor_shapes","[","name","]",":","tensor_shapes",".","append","(","shape",")","# Outfeed ops execute on each replica's first logical core. Note: we must","# constraint it such that we have at most one outfeed dequeue and enqueue","# per replica.","for","i","in","xrange","(","self",".","_ctx",".","num_replicas",")",":","host_device",",","ordinal_id","=","self",".","_ctx",".","device_for_replica","(","i",")","with","ops",".","device","(","host_device",")",":","outfeed_tensors","=","tpu_ops",".","outfeed_dequeue_tuple","(","dtypes","=","tensor_dtypes",",","shapes","=","tensor_shapes",",","device_ordinal","=","ordinal_id",")","for","j",",","item","in","enumerate","(","outfeed_tensors",")",":","dequeue_ops","[","j","]",".","append","(","item",")","# Deconstruct dequeue ops.","dequeue_ops_by_name","=","{","}","pos","=","0","for","name","in","self",".","_names",":","dequeue_ops_by_name","[","name","]","=","dequeue_ops","[","pos",":","pos","+","len","(","self",".","_tensors","[","name","]",")","]","pos","+=","len","(","self",".","_tensors","[","name","]",")","# It is assumed evaluation always happens on single host TPU system. So,","# place all ops on tpu host if possible.","#","# TODO(jhseu): Evaluate whether this is right for summaries.","with","ops",".","device","(","self",".","_ctx",".","tpu_host_placement_function","(","replica_id","=","0",")",")",":","for","name","in","self",".","_names",":","dequeue_ops","=","dequeue_ops_by_name","[","name","]","for","i",",","item","in","enumerate","(","dequeue_ops",")",":","if","dequeue_ops","[","i","]","[","0","]",".","shape",".","ndims","==","0",":","raise","RuntimeError","(","'All tensors outfed from TPU should preserve batch size '","'dimension, but got scalar {}'",".","format","(","dequeue_ops","[","i","]","[","0","]",")",")","# TODO(xiejw): Allow users to specify the axis for batch size","# dimension.","dequeue_ops","[","i","]","=","array_ops",".","concat","(","dequeue_ops","[","i","]",",","axis","=","0",")","if","self",".","_tensor_keys","[","name","]","is","not","None",":","# The user-provided eval_metrics[1] is a dict.","dequeue_ops","=","dict","(","zip","(","self",".","_tensor_keys","[","name","]",",","dequeue_ops",")",")","try",":","ret","[","name","]","=","self",".","_host_fns","[","name","]","(","*","*","dequeue_ops",")","except","TypeError","as","e",":","logging",".","warning","(","'Exception while calling %s: %s. It is likely the tensors '","'(%s[1]) do not match the '","'function\\'s arguments'",",","name",",","e",",","name",")","raise","e","else",":","ret","[","name","]","=","self",".","_host_fns","[","name","]","(","*","dequeue_ops",")","return","ret"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L1733-L1815"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"TPUEstimator.__init__","parameters":"(self,\n model_fn=None,\n train_cache_fn=None,\n eval_cache_fn=None,\n model_dir=None,\n config=None,\n params=None,\n use_tpu=True,\n train_batch_size=None,\n eval_batch_size=None,\n predict_batch_size=None,\n batch_axis=None,\n eval_on_tpu=True,\n export_to_tpu=True,\n warm_start_from=None)","argument_list":"","return_statement":"","docstring":"Constructs an `TPUEstimator` instance.\n\n Args:\n model_fn: Model function as required by `Estimator` which returns\n EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',\n and `prediction_hooks` must not capure any TPU Tensor inside the\n model_fn.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model. If `None`, the model_dir in\n `config` will be used if set. If both are set, they must be same. If\n both are `None`, a temporary directory will be used.\n config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.\n params: An optional `dict` of hyper parameters that will be passed into\n `input_fn` and `model_fn`. Keys are names of parameters, values are\n basic python types. There are reserved keys for `TPUEstimator`,\n including 'batch_size'.\n use_tpu: A bool indicating whether TPU support is enabled. Currently, -\n TPU training and evaluation respect this bit, but eval_on_tpu can\n override execution of eval. See below. - Predict still happens on CPU.\n train_batch_size: An int representing the global training batch size.\n TPUEstimator transforms this global batch size to a per-shard batch\n size, as params['batch_size'], when calling `input_fn` and `model_fn`.\n Cannot be `None` if `use_tpu` is `True`. Must be divisible by total\n number of replicas.\n eval_batch_size: An int representing evaluation batch size. Must be\n divisible by total number of replicas.\n predict_batch_size: An int representing the prediction batch size. Must be\n divisible by total number of replicas.\n batch_axis: A python tuple of int values describing how each tensor\n produced by the Estimator `input_fn` should be split across the TPU\n compute shards. For example, if your input_fn produced (images, labels)\n where the images tensor is in `HWCN` format, your shard dimensions would\n be [3, 0], where 3 corresponds to the `N` dimension of your images\n Tensor, and 0 corresponds to the dimension along which to split the\n labels to match up with the corresponding images. If None is supplied,\n and per_host_input_for_training is True, batches will be sharded based\n on the major dimension. If tpu_config.per_host_input_for_training is\n False or `PER_HOST_V2`, batch_axis is ignored.\n eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the\n model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.\n export_to_tpu: If True, `export_savedmodel()` exports a metagraph for\n serving on TPU besides the one on CPU.\n warm_start_from: Optional string filepath to a checkpoint or SavedModel to\n warm-start from, or a `tf.estimator.WarmStartSettings` object to fully\n configure warm-starting. If the string filepath is provided instead of\n a `WarmStartSettings`, then all variables are warm-started, and it is\n assumed that vocabularies and Tensor names are unchanged.\n\n Raises:\n ValueError: `params` has reserved keys already.","docstring_summary":"Constructs an `TPUEstimator` instance.","docstring_tokens":["Constructs","an","TPUEstimator","instance","."],"function":"def __init__(self,\n model_fn=None,\n train_cache_fn=None,\n eval_cache_fn=None,\n model_dir=None,\n config=None,\n params=None,\n use_tpu=True,\n train_batch_size=None,\n eval_batch_size=None,\n predict_batch_size=None,\n batch_axis=None,\n eval_on_tpu=True,\n export_to_tpu=True,\n warm_start_from=None):\n \"\"\"Constructs an `TPUEstimator` instance.\n\n Args:\n model_fn: Model function as required by `Estimator` which returns\n EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',\n and `prediction_hooks` must not capure any TPU Tensor inside the\n model_fn.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model. If `None`, the model_dir in\n `config` will be used if set. If both are set, they must be same. If\n both are `None`, a temporary directory will be used.\n config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.\n params: An optional `dict` of hyper parameters that will be passed into\n `input_fn` and `model_fn`. Keys are names of parameters, values are\n basic python types. There are reserved keys for `TPUEstimator`,\n including 'batch_size'.\n use_tpu: A bool indicating whether TPU support is enabled. Currently, -\n TPU training and evaluation respect this bit, but eval_on_tpu can\n override execution of eval. See below. - Predict still happens on CPU.\n train_batch_size: An int representing the global training batch size.\n TPUEstimator transforms this global batch size to a per-shard batch\n size, as params['batch_size'], when calling `input_fn` and `model_fn`.\n Cannot be `None` if `use_tpu` is `True`. Must be divisible by total\n number of replicas.\n eval_batch_size: An int representing evaluation batch size. Must be\n divisible by total number of replicas.\n predict_batch_size: An int representing the prediction batch size. Must be\n divisible by total number of replicas.\n batch_axis: A python tuple of int values describing how each tensor\n produced by the Estimator `input_fn` should be split across the TPU\n compute shards. For example, if your input_fn produced (images, labels)\n where the images tensor is in `HWCN` format, your shard dimensions would\n be [3, 0], where 3 corresponds to the `N` dimension of your images\n Tensor, and 0 corresponds to the dimension along which to split the\n labels to match up with the corresponding images. If None is supplied,\n and per_host_input_for_training is True, batches will be sharded based\n on the major dimension. If tpu_config.per_host_input_for_training is\n False or `PER_HOST_V2`, batch_axis is ignored.\n eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the\n model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.\n export_to_tpu: If True, `export_savedmodel()` exports a metagraph for\n serving on TPU besides the one on CPU.\n warm_start_from: Optional string filepath to a checkpoint or SavedModel to\n warm-start from, or a `tf.estimator.WarmStartSettings` object to fully\n configure warm-starting. If the string filepath is provided instead of\n a `WarmStartSettings`, then all variables are warm-started, and it is\n assumed that vocabularies and Tensor names are unchanged.\n\n Raises:\n ValueError: `params` has reserved keys already.\n \"\"\"\n if config is None or not isinstance(config, tpu_config.RunConfig):\n raise ValueError(\n '`config` must be provided with type `tpu_config.RunConfig`')\n\n if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):\n raise ValueError('{} are reserved keys but existed in params {}.'.format(\n _RESERVED_PARAMS_KEYS, params))\n\n if use_tpu:\n # Perform some very basic validations. More validations will be found in\n # _InternalTPUContext.\n if train_batch_size is None:\n raise ValueError('`train_batch_size` cannot be `None`')\n util_lib.check_positive_integer(train_batch_size, 'train_batch_size')\n\n if (config.tpu_config.per_host_input_for_training is\n tpu_config.InputPipelineConfig.PER_SHARD_V1 and\n config.tpu_config.num_cores_per_replica):\n raise ValueError(\n 'Model parallelism only supports per host input for training. '\n 'Please adjust TPURunconfig.per_host_input_for_training.')\n\n if eval_batch_size is not None:\n util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')\n\n if predict_batch_size is not None:\n util_lib.check_positive_integer(predict_batch_size,\n 'predict_batch_size')\n\n # Verifies the model_fn signature according to Estimator framework.\n estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access\n # We cannot store config and params in this constructor as parent\n # constructor might change them, such as assigning a temp dir for\n # config.model_dir.\n model_function = self._augment_model_fn(\n model_fn,\n train_cache_fn,\n eval_cache_fn,\n batch_axis)\n\n # Overwrite log_step_count_steps to disable TensorLoggingHook and\n # StepCounterHook from being created in Estimator. TPUEstimator already\n # added equivalent hooks in _augment_model_fn above.\n self._log_every_n_steps = config.log_step_count_steps\n config = config.replace(log_step_count_steps=None)\n\n # Passing non-None params as wrapped model_fn has it.\n params = params or {}\n super(TPUEstimator, self).__init__(\n model_fn=model_function,\n model_dir=model_dir,\n config=config,\n params=params,\n warm_start_from=warm_start_from)\n self._iterations_per_training_loop = (\n self._config.tpu_config.iterations_per_loop)\n\n # All properties passed to _InternalTPUContext are immutable.\n # pylint: disable=protected-access\n self._ctx = tpu_context._get_tpu_context(\n self._config, train_batch_size, eval_batch_size, predict_batch_size,\n use_tpu, eval_on_tpu)\n\n self._export_to_tpu = export_to_tpu\n\n self._is_input_fn_invoked = None\n self._rendezvous = {}","function_tokens":["def","__init__","(","self",",","model_fn","=","None",",","train_cache_fn","=","None",",","eval_cache_fn","=","None",",","model_dir","=","None",",","config","=","None",",","params","=","None",",","use_tpu","=","True",",","train_batch_size","=","None",",","eval_batch_size","=","None",",","predict_batch_size","=","None",",","batch_axis","=","None",",","eval_on_tpu","=","True",",","export_to_tpu","=","True",",","warm_start_from","=","None",")",":","if","config","is","None","or","not","isinstance","(","config",",","tpu_config",".","RunConfig",")",":","raise","ValueError","(","'`config` must be provided with type `tpu_config.RunConfig`'",")","if","params","is","not","None","and","any","(","k","in","params","for","k","in","_RESERVED_PARAMS_KEYS",")",":","raise","ValueError","(","'{} are reserved keys but existed in params {}.'",".","format","(","_RESERVED_PARAMS_KEYS",",","params",")",")","if","use_tpu",":","# Perform some very basic validations. More validations will be found in","# _InternalTPUContext.","if","train_batch_size","is","None",":","raise","ValueError","(","'`train_batch_size` cannot be `None`'",")","util_lib",".","check_positive_integer","(","train_batch_size",",","'train_batch_size'",")","if","(","config",".","tpu_config",".","per_host_input_for_training","is","tpu_config",".","InputPipelineConfig",".","PER_SHARD_V1","and","config",".","tpu_config",".","num_cores_per_replica",")",":","raise","ValueError","(","'Model parallelism only supports per host input for training. '","'Please adjust TPURunconfig.per_host_input_for_training.'",")","if","eval_batch_size","is","not","None",":","util_lib",".","check_positive_integer","(","eval_batch_size",",","'eval_batch_size'",")","if","predict_batch_size","is","not","None",":","util_lib",".","check_positive_integer","(","predict_batch_size",",","'predict_batch_size'",")","# Verifies the model_fn signature according to Estimator framework.","estimator_lib",".","_verify_model_fn_args","(","model_fn",",","params",")","# pylint: disable=protected-access","# We cannot store config and params in this constructor as parent","# constructor might change them, such as assigning a temp dir for","# config.model_dir.","model_function","=","self",".","_augment_model_fn","(","model_fn",",","train_cache_fn",",","eval_cache_fn",",","batch_axis",")","# Overwrite log_step_count_steps to disable TensorLoggingHook and","# StepCounterHook from being created in Estimator. TPUEstimator already","# added equivalent hooks in _augment_model_fn above.","self",".","_log_every_n_steps","=","config",".","log_step_count_steps","config","=","config",".","replace","(","log_step_count_steps","=","None",")","# Passing non-None params as wrapped model_fn has it.","params","=","params","or","{","}","super","(","TPUEstimator",",","self",")",".","__init__","(","model_fn","=","model_function",",","model_dir","=","model_dir",",","config","=","config",",","params","=","params",",","warm_start_from","=","warm_start_from",")","self",".","_iterations_per_training_loop","=","(","self",".","_config",".","tpu_config",".","iterations_per_loop",")","# All properties passed to _InternalTPUContext are immutable.","# pylint: disable=protected-access","self",".","_ctx","=","tpu_context",".","_get_tpu_context","(","self",".","_config",",","train_batch_size",",","eval_batch_size",",","predict_batch_size",",","use_tpu",",","eval_on_tpu",")","self",".","_export_to_tpu","=","export_to_tpu","self",".","_is_input_fn_invoked","=","None","self",".","_rendezvous","=","{","}"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L2094-L2227"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"TPUEstimator._call_model_fn_for_inference","parameters":"(self, features, labels, mode, config)","argument_list":"","return_statement":"return estimator_spec._replace(export_outputs=new_export_outputs)","docstring":"Wraps `_call_model_fn` for `export_savedmodel`.","docstring_summary":"Wraps `_call_model_fn` for `export_savedmodel`.","docstring_tokens":["Wraps","_call_model_fn","for","export_savedmodel","."],"function":"def _call_model_fn_for_inference(self, features, labels, mode, config):\n \"\"\"Wraps `_call_model_fn` for `export_savedmodel`.\"\"\"\n if mode != _REWRITE_FOR_INFERENCE_MODE:\n raise ValueError('mode must be {}; '\n 'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode))\n\n capture = _CapturedObject()\n\n def computation():\n \"\"\"Compute tpu tensors used in export_outputs.\n\n Passed to rewrite_for_inference so that model_fn will be called under\n the rewriting contexts. Only tpu tensors are returned, but export_outputs\n and scaffold are captured.\n\n Returns:\n A list of Tensors used in export_outputs and not marked for\n outside_compilation.\n \"\"\"\n # We should only call model fn once and it should be inside `computation`\n # so that building the graph will happen under `rewrite_for_inference`.\n mode = model_fn_lib.ModeKeys.PREDICT\n estimator_spec = self._call_model_fn(features, labels, mode, config)\n\n # We pick the TPU tensors out from `export_output` and later return them\n # from `computation` for rewriting.\n tensors_dict = collections.OrderedDict(\n (k, _export_output_to_tensors(v))\n for k, v in six.iteritems(estimator_spec.export_outputs))\n tensors = nest.flatten(tensors_dict)\n tpu_tensors = [t for t in tensors if t is not None]\n\n # We cannot return anything other than `tpu_tensors` here so we capture\n # the rest for later use.\n capture.capture((estimator_spec, tensors_dict, tensors))\n return tpu_tensors\n\n tpu_tensors_on_cpu = tpu.rewrite_for_inference(computation)\n estimator_spec, tensors_dict, tensors = capture.get()\n\n # Reconstruct `tensors`, but with `tpu_tensors` replaced with\n # `tpu_tensors_on_cpu`.\n new_tensors = []\n for t in tensors:\n if t is None:\n new_tensors.append(None)\n else:\n new_tensors.append(tpu_tensors_on_cpu.pop(0))\n\n # Reconstruct `tensors_dict`.\n new_tensors_dict = nest.pack_sequence_as(tensors_dict, new_tensors)\n # Reconstruct `export_outputs`.\n export_outputs = estimator_spec.export_outputs\n new_export_outputs = collections.OrderedDict(\n (k, _clone_export_output_with_tensors(export_outputs[k], v))\n for k, v in six.iteritems(new_tensors_dict))\n\n return estimator_spec._replace(export_outputs=new_export_outputs)","function_tokens":["def","_call_model_fn_for_inference","(","self",",","features",",","labels",",","mode",",","config",")",":","if","mode","!=","_REWRITE_FOR_INFERENCE_MODE",":","raise","ValueError","(","'mode must be {}; '","'got {}.'",".","format","(","_REWRITE_FOR_INFERENCE_MODE",",","mode",")",")","capture","=","_CapturedObject","(",")","def","computation","(",")",":","\"\"\"Compute tpu tensors used in export_outputs.\n\n Passed to rewrite_for_inference so that model_fn will be called under\n the rewriting contexts. Only tpu tensors are returned, but export_outputs\n and scaffold are captured.\n\n Returns:\n A list of Tensors used in export_outputs and not marked for\n outside_compilation.\n \"\"\"","# We should only call model fn once and it should be inside `computation`","# so that building the graph will happen under `rewrite_for_inference`.","mode","=","model_fn_lib",".","ModeKeys",".","PREDICT","estimator_spec","=","self",".","_call_model_fn","(","features",",","labels",",","mode",",","config",")","# We pick the TPU tensors out from `export_output` and later return them","# from `computation` for rewriting.","tensors_dict","=","collections",".","OrderedDict","(","(","k",",","_export_output_to_tensors","(","v",")",")","for","k",",","v","in","six",".","iteritems","(","estimator_spec",".","export_outputs",")",")","tensors","=","nest",".","flatten","(","tensors_dict",")","tpu_tensors","=","[","t","for","t","in","tensors","if","t","is","not","None","]","# We cannot return anything other than `tpu_tensors` here so we capture","# the rest for later use.","capture",".","capture","(","(","estimator_spec",",","tensors_dict",",","tensors",")",")","return","tpu_tensors","tpu_tensors_on_cpu","=","tpu",".","rewrite_for_inference","(","computation",")","estimator_spec",",","tensors_dict",",","tensors","=","capture",".","get","(",")","# Reconstruct `tensors`, but with `tpu_tensors` replaced with","# `tpu_tensors_on_cpu`.","new_tensors","=","[","]","for","t","in","tensors",":","if","t","is","None",":","new_tensors",".","append","(","None",")","else",":","new_tensors",".","append","(","tpu_tensors_on_cpu",".","pop","(","0",")",")","# Reconstruct `tensors_dict`.","new_tensors_dict","=","nest",".","pack_sequence_as","(","tensors_dict",",","new_tensors",")","# Reconstruct `export_outputs`.","export_outputs","=","estimator_spec",".","export_outputs","new_export_outputs","=","collections",".","OrderedDict","(","(","k",",","_clone_export_output_with_tensors","(","export_outputs","[","k","]",",","v",")",")","for","k",",","v","in","six",".","iteritems","(","new_tensors_dict",")",")","return","estimator_spec",".","_replace","(","export_outputs","=","new_export_outputs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L2275-L2332"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"TPUEstimator._create_global_step","parameters":"(self, graph)","argument_list":"","return_statement":"return _create_global_step(graph)","docstring":"Creates a global step suitable for TPUs.\n\n Args:\n graph: The graph in which to create the global step.\n\n Returns:\n A global step `Tensor`.\n\n Raises:\n ValueError: if the global step tensor is already defined.","docstring_summary":"Creates a global step suitable for TPUs.","docstring_tokens":["Creates","a","global","step","suitable","for","TPUs","."],"function":"def _create_global_step(self, graph):\n \"\"\"Creates a global step suitable for TPUs.\n\n Args:\n graph: The graph in which to create the global step.\n\n Returns:\n A global step `Tensor`.\n\n Raises:\n ValueError: if the global step tensor is already defined.\n \"\"\"\n return _create_global_step(graph)","function_tokens":["def","_create_global_step","(","self",",","graph",")",":","return","_create_global_step","(","graph",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L2334-L2346"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"TPUEstimator._call_input_fn","parameters":"(self, input_fn, mode)","argument_list":"","return_statement":"","docstring":"Calls the input function.\n\n Args:\n input_fn: The input function.\n mode: ModeKeys\n\n Returns:\n In TPU mode, returns an input_fn to be called later in model_fn.\n Otherwise, calls the input_fn and returns either fatures or\n (features, labels).\n\n Raises:\n ValueError: if input_fn takes invalid arguments or does not have `params`.","docstring_summary":"Calls the input function.","docstring_tokens":["Calls","the","input","function","."],"function":"def _call_input_fn(self, input_fn, mode):\n \"\"\"Calls the input function.\n\n Args:\n input_fn: The input function.\n mode: ModeKeys\n\n Returns:\n In TPU mode, returns an input_fn to be called later in model_fn.\n Otherwise, calls the input_fn and returns either fatures or\n (features, labels).\n\n Raises:\n ValueError: if input_fn takes invalid arguments or does not have `params`.\n \"\"\"\n input_fn_args = function_utils.fn_args(input_fn)\n config = self.config # a deep copy.\n kwargs = {}\n if 'params' in input_fn_args:\n kwargs['params'] = self.params # a deep copy.\n else:\n raise ValueError('input_fn ({}) does not include params argument, '\n 'required by TPUEstimator to pass batch size as '\n 'params[\"batch_size\"]'.format(input_fn))\n if 'config' in input_fn_args:\n kwargs['config'] = config\n\n if 'mode' in input_fn_args:\n kwargs['mode'] = mode\n\n # Records the fact input_fn has been invoked.\n self._is_input_fn_invoked = True\n\n with self._ctx.with_mode(mode) as ctx:\n # Setting the batch size in params first. This helps user to have same\n # input_fn for use_tpu=True\/False.\n batch_size_for_input_fn = ctx.batch_size_for_input_fn\n if batch_size_for_input_fn is not None:\n _add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,\n batch_size_for_input_fn)\n\n # For export_savedmodel, input_fn is never passed to Estimator. So,\n # `is_export_mode` must be False.\n if ctx.is_running_on_cpu(is_export_mode=False):\n with ops.device('\/device:CPU:0'):\n return input_fn(**kwargs)\n\n # For TPU computation, input_fn should be invoked in a tf.while_loop for\n # performance. While constructing the tf.while_loop, the structure of\n # inputs returned by the `input_fn` needs to be recorded. The structure\n # includes whether features or labels is dict or single Tensor, dict keys,\n # tensor shapes, and dtypes. The recorded structure is used to create the\n # infeed dequeue ops, which must be wrapped and passed as a Fn, called\n # inside the TPU computation, as the TPU computation is wrapped inside a\n # tf.while_loop also. So, we either pass input_fn to model_fn or pass\n # dequeue_fn to model_fn. Here, `input_fn` is passed directly as\n # `features` in `model_fn` signature.\n def _input_fn(ctx):\n _add_item_to_params(kwargs['params'], _CTX_KEY, ctx)\n return input_fn(**kwargs)\n\n return _input_fn","function_tokens":["def","_call_input_fn","(","self",",","input_fn",",","mode",")",":","input_fn_args","=","function_utils",".","fn_args","(","input_fn",")","config","=","self",".","config","# a deep copy.","kwargs","=","{","}","if","'params'","in","input_fn_args",":","kwargs","[","'params'","]","=","self",".","params","# a deep copy.","else",":","raise","ValueError","(","'input_fn ({}) does not include params argument, '","'required by TPUEstimator to pass batch size as '","'params[\"batch_size\"]'",".","format","(","input_fn",")",")","if","'config'","in","input_fn_args",":","kwargs","[","'config'","]","=","config","if","'mode'","in","input_fn_args",":","kwargs","[","'mode'","]","=","mode","# Records the fact input_fn has been invoked.","self",".","_is_input_fn_invoked","=","True","with","self",".","_ctx",".","with_mode","(","mode",")","as","ctx",":","# Setting the batch size in params first. This helps user to have same","# input_fn for use_tpu=True\/False.","batch_size_for_input_fn","=","ctx",".","batch_size_for_input_fn","if","batch_size_for_input_fn","is","not","None",":","_add_item_to_params","(","kwargs","[","'params'","]",",","_BATCH_SIZE_KEY",",","batch_size_for_input_fn",")","# For export_savedmodel, input_fn is never passed to Estimator. So,","# `is_export_mode` must be False.","if","ctx",".","is_running_on_cpu","(","is_export_mode","=","False",")",":","with","ops",".","device","(","'\/device:CPU:0'",")",":","return","input_fn","(","*","*","kwargs",")","# For TPU computation, input_fn should be invoked in a tf.while_loop for","# performance. While constructing the tf.while_loop, the structure of","# inputs returned by the `input_fn` needs to be recorded. The structure","# includes whether features or labels is dict or single Tensor, dict keys,","# tensor shapes, and dtypes. The recorded structure is used to create the","# infeed dequeue ops, which must be wrapped and passed as a Fn, called","# inside the TPU computation, as the TPU computation is wrapped inside a","# tf.while_loop also. So, we either pass input_fn to model_fn or pass","# dequeue_fn to model_fn. Here, `input_fn` is passed directly as","# `features` in `model_fn` signature.","def","_input_fn","(","ctx",")",":","_add_item_to_params","(","kwargs","[","'params'","]",",","_CTX_KEY",",","ctx",")","return","input_fn","(","*","*","kwargs",")","return","_input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L2386-L2447"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"TPUEstimator._validate_features_in_predict_input","parameters":"(self, result)","argument_list":"","return_statement":"","docstring":"Skip the validation.\n\n For TPUEstimator, we do not need to check the result type. `_InputPipeline`\n has stronger check. Parent class's check generates confusing warning msg.\n\n Args:\n result: `features` returned by input_fn.","docstring_summary":"Skip the validation.","docstring_tokens":["Skip","the","validation","."],"function":"def _validate_features_in_predict_input(self, result):\n \"\"\"Skip the validation.\n\n For TPUEstimator, we do not need to check the result type. `_InputPipeline`\n has stronger check. Parent class's check generates confusing warning msg.\n\n Args:\n result: `features` returned by input_fn.\n \"\"\"\n pass","function_tokens":["def","_validate_features_in_predict_input","(","self",",","result",")",":","pass"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L2449-L2458"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"TPUEstimator._augment_model_fn","parameters":"(self, model_fn, train_cache_fn, eval_cache_fn, batch_axis)","argument_list":"","return_statement":"return _model_fn","docstring":"Returns a new model_fn, which wraps the TPU support.","docstring_summary":"Returns a new model_fn, which wraps the TPU support.","docstring_tokens":["Returns","a","new","model_fn","which","wraps","the","TPU","support","."],"function":"def _augment_model_fn(self, model_fn, train_cache_fn, eval_cache_fn, batch_axis):\n \"\"\"Returns a new model_fn, which wraps the TPU support.\"\"\"\n\n def _model_fn(features, labels, mode, config, params):\n \"\"\"A Estimator `model_fn` for TPUEstimator.\"\"\"\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, train_cache_fn,\n eval_cache_fn, config, params, ctx)\n\n # `input_fn` is called in `train()`, `evaluate()`, and `predict()`,\n # but not in `export_savedmodel()`.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n # examples_hook is added to training_hooks for both CPU and TPU\n # execution.\n if self._log_every_n_steps is not None:\n examples_hook = ExamplesPerSecondHook(\n ctx.global_batch_size,\n output_dir=self.model_dir,\n every_n_steps=self._log_every_n_steps)\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n estimator_spec = model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n if self._log_every_n_steps is not None:\n estimator_spec = estimator_spec._replace(\n training_hooks=estimator_spec.training_hooks + (examples_hook,))\n return estimator_spec\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n compile_op, loss, host_call, scaffold, training_hooks = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n\n shutdown_hooks = []\n shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',\n 'shutdown_worker')\n if shutdown_mode:\n if shutdown_mode == 'shutdown_worker':\n finalizer_hooks = [\n session_support.ShutdownLameWorkers(timeout_ms=60 * 1000),\n ]\n elif shutdown_mode == 'shutdown_computation':\n finalizer_hooks = [\n session_support.RestartComputation(timeout_ms=60 * 1000),\n ]\n else:\n raise ValueError(\n 'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE \"%s\"' % shutdown_mode)\n\n shutdown_hooks.append(\n session_support.GracefulShutdownHook(\n checkpoint_prefix=self.model_dir + '\/model.ckpt',\n on_shutdown_hooks=finalizer_hooks))\n\n with ops.control_dependencies([loss]):\n global_step = array_ops.identity(training.get_global_step())\n hooks = input_hooks + shutdown_hooks\n hooks.extend([\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n tpu_compile_op=compile_op,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator),\n rendezvous=self._rendezvous[mode],\n master=self._config.master,\n session_config=self._session_config,\n ),\n InstallSignalHandlerHook()\n ])\n if self._log_every_n_steps is not None:\n logging_hook_frequency = ( # Divide and round up\n (self._log_every_n_steps +\n self._config.tpu_config.iterations_per_loop - 1) \/\/\n self._config.tpu_config.iterations_per_loop)\n hooks.append(\n training.LoggingTensorHook({\n 'loss': array_ops.identity(loss),\n 'step': global_step,\n },\n every_n_iter=logging_hook_frequency))\n examples_hook._set_steps_per_run( # pylint: disable=protected-access\n self._config.tpu_config.iterations_per_loop)\n hooks.append(examples_hook)\n\n if training_hooks:\n hooks.extend(training_hooks)\n\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n checkpoint_hook = training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n scaffold=scaffold)\n checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access\n self._config.tpu_config.iterations_per_loop)\n chief_hooks.append(checkpoint_hook)\n\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops(ctx)\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n compile_op, total_loss, host_calls, scaffold, eval_hooks = (\n _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(\n total_loss,\n math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))\n\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops(ctx)\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n\n eval_metrics = host_call_ret.get('eval_metrics', {})\n if eval_metrics:\n # Creates a dummy metric update_op for all metrics. Estimator\n # expects all metrics in `eval_metric_ops` have update_op and calls\n # them one by one. The real metric update_ops are invoked in a\n # separated thread. So, here give Estimator the dummy op for all\n # metrics.\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n for k, v in eval_metrics.items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n else:\n # If no eval metrics are passed, create an identity node for the\n # loss and add `internal_ops_to_run` to its dependencies. So\n # `internal_ops_to_run` can be executed.\n with ops.control_dependencies(internal_ops_to_run):\n mean_loss = array_ops.identity(mean_loss)\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n tpu_compile_op=compile_op,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator),\n rendezvous=self._rendezvous[mode],\n master=self._config.evaluation_master,\n session_config=self._session_config,\n )] + input_hooks\n\n if eval_hooks:\n hooks.extend(eval_hooks)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n (compile_op, dummy_predict_op, host_calls,\n scaffold, prediction_hooks) = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops(ctx)\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions,\n message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(\n ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode],\n tpu_compile_op=compile_op,\n master=self._config.master,\n session_config=self._session_config),\n ] + input_hooks\n\n if prediction_hooks:\n hooks.extend(prediction_hooks)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)\n\n return _model_fn","function_tokens":["def","_augment_model_fn","(","self",",","model_fn",",","train_cache_fn",",","eval_cache_fn",",","batch_axis",")",":","def","_model_fn","(","features",",","labels",",","mode",",","config",",","params",")",":","\"\"\"A Estimator `model_fn` for TPUEstimator.\"\"\"","with","self",".","_ctx",".","with_mode","(","mode",")","as","ctx",":","model_fn_wrapper","=","_ModelFnWrapper","(","model_fn",",","train_cache_fn",",","eval_cache_fn",",","config",",","params",",","ctx",")","# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,","# but not in `export_savedmodel()`.","if","self",".","_is_input_fn_invoked",":","is_export_mode","=","False","else",":","is_export_mode","=","True","# Clear the bit.","self",".","_is_input_fn_invoked","=","None","# examples_hook is added to training_hooks for both CPU and TPU","# execution.","if","self",".","_log_every_n_steps","is","not","None",":","examples_hook","=","ExamplesPerSecondHook","(","ctx",".","global_batch_size",",","output_dir","=","self",".","model_dir",",","every_n_steps","=","self",".","_log_every_n_steps",")","if","ctx",".","is_running_on_cpu","(","is_export_mode","=","is_export_mode",")",":","logging",".","info","(","'Running %s on CPU'",",","mode",")","estimator_spec","=","model_fn_wrapper",".","call_without_tpu","(","features",",","labels",",","is_export_mode","=","is_export_mode",")","if","self",".","_log_every_n_steps","is","not","None",":","estimator_spec","=","estimator_spec",".","_replace","(","training_hooks","=","estimator_spec",".","training_hooks","+","(","examples_hook",",",")",")","return","estimator_spec","assert","labels","is","None",",","'`labels` passed to `model_fn` must be `None`.'","# TPUEstimator._call_input_fn passes `input_fn` as features to here.","assert","callable","(","features",")",",","'`input_fn` is not callable.'","input_fn","=","features","input_holders","=","_InputPipeline","(","input_fn",",","batch_axis",",","ctx",")","enqueue_ops",",","dequeue_fn",",","input_hooks",",","run_infeed_loop_on_coordinator","=","(","input_holders",".","generate_infeed_enqueue_ops_and_dequeue_fn","(",")",")","graph","=","ops",".","get_default_graph","(",")","for","enqueue_op","in","enqueue_ops",":","if","isinstance","(","enqueue_op",",","list",")",":","graph",".","get_collection_ref","(","_TPU_ENQUEUE_OPS",")",".","extend","(","enqueue_op",")","else",":","graph",".","add_to_collection","(","_TPU_ENQUEUE_OPS",",","enqueue_op",")","if","mode","==","model_fn_lib",".","ModeKeys",".","TRAIN",":","compile_op",",","loss",",","host_call",",","scaffold",",","training_hooks","=","(","_train_on_tpu_system","(","ctx",",","model_fn_wrapper",",","dequeue_fn",")",")","host_ops","=","host_call",".","create_tpu_hostcall","(",")","if","host_ops","is","None",":","host_ops","=","[","]","shutdown_hooks","=","[","]","shutdown_mode","=","os",".","environ",".","get","(","'TF_TPU_GRACEFUL_SHUTDOWN_MODE'",",","'shutdown_worker'",")","if","shutdown_mode",":","if","shutdown_mode","==","'shutdown_worker'",":","finalizer_hooks","=","[","session_support",".","ShutdownLameWorkers","(","timeout_ms","=","60","*","1000",")",",","]","elif","shutdown_mode","==","'shutdown_computation'",":","finalizer_hooks","=","[","session_support",".","RestartComputation","(","timeout_ms","=","60","*","1000",")",",","]","else",":","raise","ValueError","(","'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE \"%s\"'","%","shutdown_mode",")","shutdown_hooks",".","append","(","session_support",".","GracefulShutdownHook","(","checkpoint_prefix","=","self",".","model_dir","+","'\/model.ckpt'",",","on_shutdown_hooks","=","finalizer_hooks",")",")","with","ops",".","control_dependencies","(","[","loss","]",")",":","global_step","=","array_ops",".","identity","(","training",".","get_global_step","(",")",")","hooks","=","input_hooks","+","shutdown_hooks","hooks",".","extend","(","[","TPUInfeedOutfeedSessionHook","(","ctx",",","enqueue_ops",",","host_ops",",","tpu_compile_op","=","compile_op",",","run_infeed_loop_on_coordinator","=","(","run_infeed_loop_on_coordinator",")",",","rendezvous","=","self",".","_rendezvous","[","mode","]",",","master","=","self",".","_config",".","master",",","session_config","=","self",".","_session_config",",",")",",","InstallSignalHandlerHook","(",")","]",")","if","self",".","_log_every_n_steps","is","not","None",":","logging_hook_frequency","=","(","# Divide and round up","(","self",".","_log_every_n_steps","+","self",".","_config",".","tpu_config",".","iterations_per_loop","-","1",")","\/\/","self",".","_config",".","tpu_config",".","iterations_per_loop",")","hooks",".","append","(","training",".","LoggingTensorHook","(","{","'loss'",":","array_ops",".","identity","(","loss",")",",","'step'",":","global_step",",","}",",","every_n_iter","=","logging_hook_frequency",")",")","examples_hook",".","_set_steps_per_run","(","# pylint: disable=protected-access","self",".","_config",".","tpu_config",".","iterations_per_loop",")","hooks",".","append","(","examples_hook",")","if","training_hooks",":","hooks",".","extend","(","training_hooks",")","chief_hooks","=","[","]","if","(","self",".","_config",".","save_checkpoints_secs","or","self",".","_config",".","save_checkpoints_steps",")",":","checkpoint_hook","=","training",".","CheckpointSaverHook","(","self",".","model_dir",",","save_secs","=","self",".","_config",".","save_checkpoints_secs",",","save_steps","=","self",".","_config",".","save_checkpoints_steps",",","scaffold","=","scaffold",")","checkpoint_hook",".","_set_steps_per_run","(","# pylint: disable=protected-access","self",".","_config",".","tpu_config",".","iterations_per_loop",")","chief_hooks",".","append","(","checkpoint_hook",")","summary",".","scalar","(","model_fn_lib",".","LOSS_METRIC_KEY",",","loss",")","with","ops",".","control_dependencies","(","[","loss","]",")",":","update_ops","=","_sync_variables_ops","(","ctx",")","# Validate the TPU training graph to catch basic errors","_validate_tpu_training_graph","(",")","train_op","=","control_flow_ops",".","group","(","*","update_ops",")","graph",".","add_to_collection","(","_TPU_TRAIN_OP",",","train_op",")","return","model_fn_lib",".","EstimatorSpec","(","mode",",","loss","=","loss",",","training_chief_hooks","=","chief_hooks",",","training_hooks","=","hooks",",","train_op","=","train_op",",","scaffold","=","scaffold",")","if","mode","==","model_fn_lib",".","ModeKeys",".","EVAL",":","compile_op",",","total_loss",",","host_calls",",","scaffold",",","eval_hooks","=","(","_eval_on_tpu_system","(","ctx",",","model_fn_wrapper",",","dequeue_fn",")",")","iterations_per_loop_var","=","_create_or_get_iterations_per_loop","(",")","mean_loss","=","math_ops",".","div","(","total_loss",",","math_ops",".","cast","(","iterations_per_loop_var",",","dtype","=","total_loss",".","dtype",")",")","with","ops",".","control_dependencies","(","[","mean_loss","]",")",":","# After TPU evaluation computation is done (the mean_loss tensor),","# reads all variables back from TPU and updates the eval step","# counter properly","internal_ops_to_run","=","_sync_variables_ops","(","ctx",")","internal_ops_to_run",".","append","(","_increase_eval_step_op","(","iterations_per_loop_var",")",")","host_call_ret","=","host_calls",".","create_tpu_hostcall","(",")","eval_metric_ops","=","{","}","eval_update_ops","=","[","]","eval_metrics","=","host_call_ret",".","get","(","'eval_metrics'",",","{","}",")","if","eval_metrics",":","# Creates a dummy metric update_op for all metrics. Estimator","# expects all metrics in `eval_metric_ops` have update_op and calls","# them one by one. The real metric update_ops are invoked in a","# separated thread. So, here give Estimator the dummy op for all","# metrics.","with","ops",".","control_dependencies","(","internal_ops_to_run",")",":","dummy_update_op","=","control_flow_ops",".","no_op","(",")","for","k",",","v","in","eval_metrics",".","items","(",")",":","eval_metric_ops","[","k","]","=","(","v","[","0","]",",","dummy_update_op",")","eval_update_ops",".","append","(","v","[","1","]",")","else",":","# If no eval metrics are passed, create an identity node for the","# loss and add `internal_ops_to_run` to its dependencies. So","# `internal_ops_to_run` can be executed.","with","ops",".","control_dependencies","(","internal_ops_to_run",")",":","mean_loss","=","array_ops",".","identity","(","mean_loss",")","if","'host_call'","not","in","host_call_ret",":","host_ops","=","[","]","else",":","host_ops","=","host_call_ret","[","'host_call'","]","hooks","=","[","TPUInfeedOutfeedSessionHook","(","ctx",",","enqueue_ops",",","eval_update_ops","+","host_ops",",","tpu_compile_op","=","compile_op",",","run_infeed_loop_on_coordinator","=","(","run_infeed_loop_on_coordinator",")",",","rendezvous","=","self",".","_rendezvous","[","mode","]",",","master","=","self",".","_config",".","evaluation_master",",","session_config","=","self",".","_session_config",",",")","]","+","input_hooks","if","eval_hooks",":","hooks",".","extend","(","eval_hooks",")","return","model_fn_lib",".","EstimatorSpec","(","mode",",","loss","=","mean_loss",",","evaluation_hooks","=","hooks",",","eval_metric_ops","=","eval_metric_ops",",","scaffold","=","scaffold",")","# Predict","assert","mode","==","model_fn_lib",".","ModeKeys",".","PREDICT","(","compile_op",",","dummy_predict_op",",","host_calls",",","scaffold",",","prediction_hooks",")","=","_predict_on_tpu_system","(","ctx",",","model_fn_wrapper",",","dequeue_fn",")","with","ops",".","control_dependencies","(","[","dummy_predict_op","]",")",":","internal_ops_to_run","=","_sync_variables_ops","(","ctx",")","with","ops",".","control_dependencies","(","internal_ops_to_run",")",":","dummy_predict_op","=","control_flow_ops",".","no_op","(",")","# In train and evaluation, the main TPU program is passed to monitored","# training session to run. Infeed enqueue and outfeed dequeue are","# executed in side threads. This is not the configuration for","# prediction mode.","#","# For prediction, the Estimator executes the EstimatorSpec.predictions","# directly and yield the element (via generator) to call site. So, the","# outfeed based prediction must be passed to MonitoredSession directly.","# Other parts of the TPU execution are organized as follows.","#","# 1. All outfeed based Tensors must be grouped with predictions Tensors","# to form a single invocation. This avoid the issue we might trigger","# multiple outfeeds incorrectly. To achieve this, `host_call` is","# placed in control_dependencies of `stopping_signals`, and","# `stopping_signals` is passed into _StoppingPredictHook, which sets","# the `stopping_signals` as SessionRunArgs. MonitoredSession merges","# all SessionRunArgs with the fetch in session.run together.","#","# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)","# are grouped together. They will be launched once and only once in","# side threads and they quit naturally according to the SAME stopping","# condition.","enqueue_ops",".","append","(","dummy_predict_op",")","host_call_ret","=","host_calls",".","create_tpu_hostcall","(",")","if","'host_call'","not","in","host_call_ret",":","host_ops","=","[","]","else",":","host_ops","=","host_call_ret","[","'host_call'","]","predictions","=","host_call_ret","[","'predictions'","]","_verify_cross_hosts_transfer_size","(","predictions",",","message","=","(","'The estimated size for TPUEstimatorSpec.predictions is too '","'large.'",")",")","signals","=","host_call_ret","[","'signals'","]","with","ops",".","control_dependencies","(","host_ops",")",":","host_ops","=","[","]","# Empty, we do do not need it anymore.","scalar_stopping_signal","=","_StopSignals",".","as_scalar_stopping_signal","(","signals",")","predictions","=","_PaddingSignals",".","slice_tensor_or_dict","(","predictions",",","signals",")","hooks","=","[","_StoppingPredictHook","(","scalar_stopping_signal",")",",","TPUInfeedOutfeedSessionHookForPrediction","(","ctx",",","enqueue_ops",",","host_ops",",","rendezvous","=","self",".","_rendezvous","[","mode","]",",","tpu_compile_op","=","compile_op",",","master","=","self",".","_config",".","master",",","session_config","=","self",".","_session_config",")",",","]","+","input_hooks","if","prediction_hooks",":","hooks",".","extend","(","prediction_hooks",")","return","model_fn_lib",".","EstimatorSpec","(","mode",",","prediction_hooks","=","hooks",",","predictions","=","predictions",",","scaffold","=","scaffold",")","return","_model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L2527-L2813"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_Inputs.from_input_fn","parameters":"(return_values)","argument_list":"","return_statement":"return _Inputs(features, labels)","docstring":"Returns an `_Inputs` instance according to `input_fn` return value.","docstring_summary":"Returns an `_Inputs` instance according to `input_fn` return value.","docstring_tokens":["Returns","an","_Inputs","instance","according","to","input_fn","return","value","."],"function":"def from_input_fn(return_values):\n \"\"\"Returns an `_Inputs` instance according to `input_fn` return value.\"\"\"\n if isinstance(return_values, dataset_ops.DatasetV2):\n dataset = return_values\n return _Inputs(dataset=dataset)\n\n features, labels = _Inputs._parse_inputs(return_values)\n return _Inputs(features, labels)","function_tokens":["def","from_input_fn","(","return_values",")",":","if","isinstance","(","return_values",",","dataset_ops",".","DatasetV2",")",":","dataset","=","return_values","return","_Inputs","(","dataset","=","dataset",")","features",",","labels","=","_Inputs",".","_parse_inputs","(","return_values",")","return","_Inputs","(","features",",","labels",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3129-L3136"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_Inputs.is_dataset","parameters":"(self)","argument_list":"","return_statement":"return self._dataset is not None","docstring":"Returns True if the return value from input_fn is Dataset.","docstring_summary":"Returns True if the return value from input_fn is Dataset.","docstring_tokens":["Returns","True","if","the","return","value","from","input_fn","is","Dataset","."],"function":"def is_dataset(self):\n \"\"\"Returns True if the return value from input_fn is Dataset.\"\"\"\n return self._dataset is not None","function_tokens":["def","is_dataset","(","self",")",":","return","self",".","_dataset","is","not","None"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3147-L3149"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_Inputs.dataset_initializer","parameters":"(self)","argument_list":"","return_statement":"return self._iterator.initializer","docstring":"Returns the dataset's initializer.\n\n The initializer must be run before calling `features_and_labels`.","docstring_summary":"Returns the dataset's initializer.","docstring_tokens":["Returns","the","dataset","s","initializer","."],"function":"def dataset_initializer(self):\n \"\"\"Returns the dataset's initializer.\n\n The initializer must be run before calling `features_and_labels`.\n \"\"\"\n self._iterator = dataset_ops.make_initializable_iterator(self._dataset)\n return self._iterator.initializer","function_tokens":["def","dataset_initializer","(","self",")",":","self",".","_iterator","=","dataset_ops",".","make_initializable_iterator","(","self",".","_dataset",")","return","self",".","_iterator",".","initializer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3151-L3157"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_Inputs.features_and_labels","parameters":"(self)","argument_list":"","return_statement":"return (self._features, self._labels)","docstring":"Gets `features` and `labels`.","docstring_summary":"Gets `features` and `labels`.","docstring_tokens":["Gets","features","and","labels","."],"function":"def features_and_labels(self):\n \"\"\"Gets `features` and `labels`.\"\"\"\n if self.is_dataset:\n if self._iterator is None:\n raise RuntimeError('Internal error: Must run dataset_initializer '\n 'before calling features_and_labels(). Please file '\n 'a bug!')\n return _Inputs._parse_inputs(self._iterator.get_next())\n\n return (self._features, self._labels)","function_tokens":["def","features_and_labels","(","self",")",":","if","self",".","is_dataset",":","if","self",".","_iterator","is","None",":","raise","RuntimeError","(","'Internal error: Must run dataset_initializer '","'before calling features_and_labels(). Please file '","'a bug!'",")","return","_Inputs",".","_parse_inputs","(","self",".","_iterator",".","get_next","(",")",")","return","(","self",".","_features",",","self",".","_labels",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3159-L3168"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_InputsWithStoppingSignals.signals","parameters":"(self)","argument_list":"","return_statement":"return signals","docstring":"Returns the `Signals` from `_Inputs`.","docstring_summary":"Returns the `Signals` from `_Inputs`.","docstring_tokens":["Returns","the","Signals","from","_Inputs","."],"function":"def signals(self):\n \"\"\"Returns the `Signals` from `_Inputs`.\"\"\"\n if self._current_inputs is None:\n raise RuntimeError(\n 'Internal Error: The current inputs have not been properly '\n 'generated. First call features_and_labels, then call signals.')\n signals = self._current_inputs['signals']\n self._current_inputs = None\n return signals","function_tokens":["def","signals","(","self",")",":","if","self",".","_current_inputs","is","None",":","raise","RuntimeError","(","'Internal Error: The current inputs have not been properly '","'generated. First call features_and_labels, then call signals.'",")","signals","=","self",".","_current_inputs","[","'signals'","]","self",".","_current_inputs","=","None","return","signals"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3241-L3249"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_InputsWithStoppingSignals.insert_stopping_signal","parameters":"(stop, batch_size, add_padding=False)","argument_list":"","return_statement":"return _map_fn","docstring":"Inserts stopping_signal into dataset via _map_fn.\n\n Here we change the data structure in the dataset, such that the return value\n is a dictionary now and `features`, `labels`, and `signals` are three\n distinguished keys in that dict. This provides a better structure, which\n eases the process to decompose the inputs (see `features_and_labels`).\n\n Args:\n stop: bool, state of current stopping signals.\n batch_size: int, batch size.\n add_padding: bool, whether to pad the tensor to full batch size.\n\n Returns:\n A map_fn passed to dataset.map API.","docstring_summary":"Inserts stopping_signal into dataset via _map_fn.","docstring_tokens":["Inserts","stopping_signal","into","dataset","via","_map_fn","."],"function":"def insert_stopping_signal(stop, batch_size, add_padding=False):\n \"\"\"Inserts stopping_signal into dataset via _map_fn.\n\n Here we change the data structure in the dataset, such that the return value\n is a dictionary now and `features`, `labels`, and `signals` are three\n distinguished keys in that dict. This provides a better structure, which\n eases the process to decompose the inputs (see `features_and_labels`).\n\n Args:\n stop: bool, state of current stopping signals.\n batch_size: int, batch size.\n add_padding: bool, whether to pad the tensor to full batch size.\n\n Returns:\n A map_fn passed to dataset.map API.\n \"\"\"\n\n def _map_fn(*args):\n \"\"\"The map fn to insert signals.\"\"\"\n if len(args) == 1:\n # Unpack the single Tensor\/dict argument as features. This is required\n # for the input_fn returns no labels.\n args = args[0]\n features, labels = _Inputs._parse_inputs(args)\n new_input_dict = {}\n\n if add_padding:\n padding_mask, features, labels = (\n _PaddingSignals.pad_features_and_labels(features, labels,\n batch_size))\n\n new_input_dict['features'] = features\n if labels is not None:\n new_input_dict['labels'] = labels\n\n else:\n new_input_dict['features'] = features\n if labels is not None:\n new_input_dict['labels'] = labels\n padding_mask = None\n\n new_input_dict['signals'] = _StopSignals(\n stop=stop, batch_size=batch_size,\n padding_mask=padding_mask).as_dict()\n\n return new_input_dict\n\n return _map_fn","function_tokens":["def","insert_stopping_signal","(","stop",",","batch_size",",","add_padding","=","False",")",":","def","_map_fn","(","*","args",")",":","\"\"\"The map fn to insert signals.\"\"\"","if","len","(","args",")","==","1",":","# Unpack the single Tensor\/dict argument as features. This is required","# for the input_fn returns no labels.","args","=","args","[","0","]","features",",","labels","=","_Inputs",".","_parse_inputs","(","args",")","new_input_dict","=","{","}","if","add_padding",":","padding_mask",",","features",",","labels","=","(","_PaddingSignals",".","pad_features_and_labels","(","features",",","labels",",","batch_size",")",")","new_input_dict","[","'features'","]","=","features","if","labels","is","not","None",":","new_input_dict","[","'labels'","]","=","labels","else",":","new_input_dict","[","'features'","]","=","features","if","labels","is","not","None",":","new_input_dict","[","'labels'","]","=","labels","padding_mask","=","None","new_input_dict","[","'signals'","]","=","_StopSignals","(","stop","=","stop",",","batch_size","=","batch_size",",","padding_mask","=","padding_mask",")",".","as_dict","(",")","return","new_input_dict","return","_map_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3252-L3299"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_StopSignals.as_dict","parameters":"(self)","argument_list":"","return_statement":"return signals","docstring":"Returns the signals as Python dict.","docstring_summary":"Returns the signals as Python dict.","docstring_tokens":["Returns","the","signals","as","Python","dict","."],"function":"def as_dict(self):\n \"\"\"Returns the signals as Python dict.\"\"\"\n shape = [self._batch_size, 1]\n dtype = dtypes.bool\n\n if self._stop:\n stopping = array_ops.ones(shape=shape, dtype=dtype)\n else:\n stopping = array_ops.zeros(shape=shape, dtype=dtype)\n\n signals = {'stopping': stopping}\n if self._padding_mask is not None:\n signals['padding_mask'] = self._padding_mask\n return signals","function_tokens":["def","as_dict","(","self",")",":","shape","=","[","self",".","_batch_size",",","1","]","dtype","=","dtypes",".","bool","if","self",".","_stop",":","stopping","=","array_ops",".","ones","(","shape","=","shape",",","dtype","=","dtype",")","else",":","stopping","=","array_ops",".","zeros","(","shape","=","shape",",","dtype","=","dtype",")","signals","=","{","'stopping'",":","stopping","}","if","self",".","_padding_mask","is","not","None",":","signals","[","'padding_mask'","]","=","self",".","_padding_mask","return","signals"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3313-L3326"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_StopSignals.should_stop","parameters":"(scalar_stopping_signal)","argument_list":"","return_statement":"","docstring":"Detects whether scalar_stopping_signal indicates stopping.","docstring_summary":"Detects whether scalar_stopping_signal indicates stopping.","docstring_tokens":["Detects","whether","scalar_stopping_signal","indicates","stopping","."],"function":"def should_stop(scalar_stopping_signal):\n \"\"\"Detects whether scalar_stopping_signal indicates stopping.\"\"\"\n if isinstance(scalar_stopping_signal, ops.Tensor):\n # STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF\n # way to express the bool check whether scalar_stopping_signal is True.\n return math_ops.logical_and(scalar_stopping_signal,\n _StopSignals.STOPPING_SIGNAL)\n else:\n # For non Tensor case, it is used in SessionRunHook. So, we cannot modify\n # the graph anymore. Here, we use pure Python.\n return bool(scalar_stopping_signal)","function_tokens":["def","should_stop","(","scalar_stopping_signal",")",":","if","isinstance","(","scalar_stopping_signal",",","ops",".","Tensor",")",":","# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF","# way to express the bool check whether scalar_stopping_signal is True.","return","math_ops",".","logical_and","(","scalar_stopping_signal",",","_StopSignals",".","STOPPING_SIGNAL",")","else",":","# For non Tensor case, it is used in SessionRunHook. So, we cannot modify","# the graph anymore. Here, we use pure Python.","return","bool","(","scalar_stopping_signal",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3333-L3343"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_PaddingSignals.pad_features_and_labels","parameters":"(features, labels, batch_size)","argument_list":"","return_statement":"return padding_mask, features, labels","docstring":"Pads out the batch dimension of features and labels.","docstring_summary":"Pads out the batch dimension of features and labels.","docstring_tokens":["Pads","out","the","batch","dimension","of","features","and","labels","."],"function":"def pad_features_and_labels(features, labels, batch_size):\n \"\"\"Pads out the batch dimension of features and labels.\"\"\"\n real_batch_size = array_ops.shape(\n _PaddingSignals._find_any_tensor(features))[0]\n\n batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)\n\n check_greater = check_ops.assert_greater_equal(\n batch_size_tensor,\n real_batch_size,\n data=(batch_size_tensor, real_batch_size),\n message='The real batch size should not be greater than batch_size.')\n\n with ops.control_dependencies([check_greater]):\n missing_count = batch_size_tensor - real_batch_size\n\n def pad_single_tensor(tensor):\n \"\"\"Pads out the batch dimension of a tensor to the complete batch_size.\"\"\"\n rank = len(tensor.shape)\n assert rank > 0\n padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))\n padded_shape = (batch_size,) + tuple(tensor.shape[1:])\n padded_tensor = array_ops.pad(tensor, padding)\n padded_tensor.set_shape(padded_shape)\n return padded_tensor\n\n def nest_pad(tensor_or_dict):\n return nest.map_structure(pad_single_tensor, tensor_or_dict)\n\n features = nest_pad(features)\n if labels is not None:\n labels = nest_pad(labels)\n\n padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,\n batch_size)\n\n return padding_mask, features, labels","function_tokens":["def","pad_features_and_labels","(","features",",","labels",",","batch_size",")",":","real_batch_size","=","array_ops",".","shape","(","_PaddingSignals",".","_find_any_tensor","(","features",")",")","[","0","]","batch_size_tensor","=","constant_op",".","constant","(","batch_size",",","dtypes",".","int32",")","check_greater","=","check_ops",".","assert_greater_equal","(","batch_size_tensor",",","real_batch_size",",","data","=","(","batch_size_tensor",",","real_batch_size",")",",","message","=","'The real batch size should not be greater than batch_size.'",")","with","ops",".","control_dependencies","(","[","check_greater","]",")",":","missing_count","=","batch_size_tensor","-","real_batch_size","def","pad_single_tensor","(","tensor",")",":","\"\"\"Pads out the batch dimension of a tensor to the complete batch_size.\"\"\"","rank","=","len","(","tensor",".","shape",")","assert","rank",">","0","padding","=","array_ops",".","stack","(","[","[","0",",","missing_count","]","]","+","[","[","0",",","0","]","]","*","(","rank","-","1",")",")","padded_shape","=","(","batch_size",",",")","+","tuple","(","tensor",".","shape","[","1",":","]",")","padded_tensor","=","array_ops",".","pad","(","tensor",",","padding",")","padded_tensor",".","set_shape","(","padded_shape",")","return","padded_tensor","def","nest_pad","(","tensor_or_dict",")",":","return","nest",".","map_structure","(","pad_single_tensor",",","tensor_or_dict",")","features","=","nest_pad","(","features",")","if","labels","is","not","None",":","labels","=","nest_pad","(","labels",")","padding_mask","=","_PaddingSignals",".","_padding_mask","(","real_batch_size",",","missing_count",",","batch_size",")","return","padding_mask",",","features",",","labels"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3350-L3386"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/tpu_estimator.py","language":"python","identifier":"_PaddingSignals.slice_tensor_or_dict","parameters":"(tensor_or_dict, signals)","argument_list":"","return_statement":"return nest.map_structure(slice_fn, tensor_or_dict)","docstring":"Slice the real Tensors according to padding mask in signals.","docstring_summary":"Slice the real Tensors according to padding mask in signals.","docstring_tokens":["Slice","the","real","Tensors","according","to","padding","mask","in","signals","."],"function":"def slice_tensor_or_dict(tensor_or_dict, signals):\n \"\"\"Slice the real Tensors according to padding mask in signals.\"\"\"\n\n padding_mask = signals['padding_mask']\n batch_size = array_ops.shape(padding_mask)[0]\n\n def verify_batch_size(tensor):\n check_batch_size = math_ops.equal(batch_size, tensor.shape[0])\n with ops.control_dependencies([check_batch_size]):\n return array_ops.identity(tensor)\n\n def slice_single_tensor(tensor):\n rank = len(tensor.shape)\n assert rank > 0\n real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)\n return verify_batch_size(tensor)[0:real_batch_size]\n\n # As we split the Tensors to all TPU cores and concat them back, it is\n # important to ensure the real data is placed before padded ones, i.e.,\n # order is preserved. By that, the sliced padding mask should have all 0's.\n # If this assertion failed, # the slice logic here would not hold.\n sliced_padding_mask = slice_single_tensor(padding_mask)\n assert_padding_mask = math_ops.equal(\n math_ops.reduce_sum(sliced_padding_mask), 0)\n\n with ops.control_dependencies([assert_padding_mask]):\n should_stop = _StopSignals.should_stop(\n _StopSignals.as_scalar_stopping_signal(signals))\n\n is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)\n\n def slice_fn(tensor):\n # If the current batch is full batch or part of stopping signals, we do\n # not need to slice to save performance.\n return control_flow_ops.cond(\n math_ops.logical_or(should_stop, is_full_batch),\n (lambda: verify_batch_size(tensor)),\n (lambda: slice_single_tensor(tensor)))\n\n return nest.map_structure(slice_fn, tensor_or_dict)","function_tokens":["def","slice_tensor_or_dict","(","tensor_or_dict",",","signals",")",":","padding_mask","=","signals","[","'padding_mask'","]","batch_size","=","array_ops",".","shape","(","padding_mask",")","[","0","]","def","verify_batch_size","(","tensor",")",":","check_batch_size","=","math_ops",".","equal","(","batch_size",",","tensor",".","shape","[","0","]",")","with","ops",".","control_dependencies","(","[","check_batch_size","]",")",":","return","array_ops",".","identity","(","tensor",")","def","slice_single_tensor","(","tensor",")",":","rank","=","len","(","tensor",".","shape",")","assert","rank",">","0","real_batch_size","=","batch_size","-","math_ops",".","reduce_sum","(","padding_mask",")","return","verify_batch_size","(","tensor",")","[","0",":","real_batch_size","]","# As we split the Tensors to all TPU cores and concat them back, it is","# important to ensure the real data is placed before padded ones, i.e.,","# order is preserved. By that, the sliced padding mask should have all 0's.","# If this assertion failed, # the slice logic here would not hold.","sliced_padding_mask","=","slice_single_tensor","(","padding_mask",")","assert_padding_mask","=","math_ops",".","equal","(","math_ops",".","reduce_sum","(","sliced_padding_mask",")",",","0",")","with","ops",".","control_dependencies","(","[","assert_padding_mask","]",")",":","should_stop","=","_StopSignals",".","should_stop","(","_StopSignals",".","as_scalar_stopping_signal","(","signals",")",")","is_full_batch","=","math_ops",".","equal","(","math_ops",".","reduce_sum","(","padding_mask",")",",","0",")","def","slice_fn","(","tensor",")",":","# If the current batch is full batch or part of stopping signals, we do","# not need to slice to save performance.","return","control_flow_ops",".","cond","(","math_ops",".","logical_or","(","should_stop",",","is_full_batch",")",",","(","lambda",":","verify_batch_size","(","tensor",")",")",",","(","lambda",":","slice_single_tensor","(","tensor",")",")",")","return","nest",".","map_structure","(","slice_fn",",","tensor_or_dict",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/tpu_estimator.py#L3389-L3428"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/prepro_utils.py","language":"python","identifier":"printable_text","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_summary":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_tokens":["Returns","text","encoded","in","a","way","suitable","for","print","or","tf",".","logging","."],"function":"def printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","printable_text","(","text",")",":","# These functions want `str` for both Python2 and Python3, but in one case","# it's a Unicode string and in the other it's a byte string.","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","unicode",")",":","return","text",".","encode","(","\"utf-8\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/prepro_utils.py#L14-L34"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/squad_utils.py","language":"python","identifier":"normalize_answer","parameters":"(s)","argument_list":"","return_statement":"return white_space_fix(remove_articles(remove_punc(lower(s))))","docstring":"Lower text and remove punctuation, articles and extra whitespace.","docstring_summary":"Lower text and remove punctuation, articles and extra whitespace.","docstring_tokens":["Lower","text","and","remove","punctuation","articles","and","extra","whitespace","."],"function":"def normalize_answer(s):\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n def remove_articles(text):\n regex = re.compile(r'\\b(a|an|the)\\b', re.UNICODE)\n return re.sub(regex, ' ', text)\n def white_space_fix(text):\n return ' '.join(text.split())\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n def lower(text):\n return text.lower()\n return white_space_fix(remove_articles(remove_punc(lower(s))))","function_tokens":["def","normalize_answer","(","s",")",":","def","remove_articles","(","text",")",":","regex","=","re",".","compile","(","r'\\b(a|an|the)\\b'",",","re",".","UNICODE",")","return","re",".","sub","(","regex",",","' '",",","text",")","def","white_space_fix","(","text",")",":","return","' '",".","join","(","text",".","split","(",")",")","def","remove_punc","(","text",")",":","exclude","=","set","(","string",".","punctuation",")","return","''",".","join","(","ch","for","ch","in","text","if","ch","not","in","exclude",")","def","lower","(","text",")",":","return","text",".","lower","(",")","return","white_space_fix","(","remove_articles","(","remove_punc","(","lower","(","s",")",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/squad_utils.py#L45-L57"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/function_builder.py","language":"python","identifier":"construct_scalar_host_call","parameters":"(\n monitor_dict,\n model_dir,\n prefix=\"\",\n reduce_fn=None)","argument_list":"","return_statement":"return host_call_fn, [global_step_tensor] + other_tensors","docstring":"Construct host calls to monitor training progress on TPUs.","docstring_summary":"Construct host calls to monitor training progress on TPUs.","docstring_tokens":["Construct","host","calls","to","monitor","training","progress","on","TPUs","."],"function":"def construct_scalar_host_call(\n monitor_dict,\n model_dir,\n prefix=\"\",\n reduce_fn=None):\n \"\"\"\n Construct host calls to monitor training progress on TPUs.\n \"\"\"\n\n metric_names = list(monitor_dict.keys())\n\n def host_call_fn(global_step, *args):\n \"\"\"actual host call function.\"\"\"\n step = global_step[0]\n with tf.contrib.summary.create_file_writer(\n logdir=model_dir, filename_suffix=\".host_call\").as_default():\n with tf.contrib.summary.always_record_summaries():\n for i, name in enumerate(metric_names):\n if reduce_fn is None:\n scalar = args[i][0]\n else:\n scalar = reduce_fn(args[i])\n with tf.contrib.summary.record_summaries_every_n_global_steps(\n 100, global_step=step):\n tf.contrib.summary.scalar(prefix + name, scalar, step=step)\n\n return tf.contrib.summary.all_summary_ops()\n\n global_step_tensor = tf.reshape(tf.train.get_or_create_global_step(), [1])\n other_tensors = [tf.reshape(monitor_dict[key], [1]) for key in metric_names]\n\n return host_call_fn, [global_step_tensor] + other_tensors","function_tokens":["def","construct_scalar_host_call","(","monitor_dict",",","model_dir",",","prefix","=","\"\"",",","reduce_fn","=","None",")",":","metric_names","=","list","(","monitor_dict",".","keys","(",")",")","def","host_call_fn","(","global_step",",","*","args",")",":","\"\"\"actual host call function.\"\"\"","step","=","global_step","[","0","]","with","tf",".","contrib",".","summary",".","create_file_writer","(","logdir","=","model_dir",",","filename_suffix","=","\".host_call\"",")",".","as_default","(",")",":","with","tf",".","contrib",".","summary",".","always_record_summaries","(",")",":","for","i",",","name","in","enumerate","(","metric_names",")",":","if","reduce_fn","is","None",":","scalar","=","args","[","i","]","[","0","]","else",":","scalar","=","reduce_fn","(","args","[","i","]",")","with","tf",".","contrib",".","summary",".","record_summaries_every_n_global_steps","(","100",",","global_step","=","step",")",":","tf",".","contrib",".","summary",".","scalar","(","prefix","+","name",",","scalar",",","step","=","step",")","return","tf",".","contrib",".","summary",".","all_summary_ops","(",")","global_step_tensor","=","tf",".","reshape","(","tf",".","train",".","get_or_create_global_step","(",")",",","[","1","]",")","other_tensors","=","[","tf",".","reshape","(","monitor_dict","[","key","]",",","[","1","]",")","for","key","in","metric_names","]","return","host_call_fn",",","[","global_step_tensor","]","+","other_tensors"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/function_builder.py#L13-L44"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/function_builder.py","language":"python","identifier":"two_stream_loss","parameters":"(FLAGS, features, labels, mems, is_training)","argument_list":"","return_statement":"return total_loss, new_mems, monitor_dict","docstring":"Pretraining loss with two-stream attention Transformer-XL.","docstring_summary":"Pretraining loss with two-stream attention Transformer-XL.","docstring_tokens":["Pretraining","loss","with","two","-","stream","attention","Transformer","-","XL","."],"function":"def two_stream_loss(FLAGS, features, labels, mems, is_training):\n \"\"\"Pretraining loss with two-stream attention Transformer-XL.\"\"\"\n\n #### Unpack input\n mem_name = \"mems\"\n mems = mems.get(mem_name, None)\n\n inp_k = tf.transpose(features[\"input_k\"], [1, 0])\n inp_q = tf.transpose(features[\"input_q\"], [1, 0])\n\n seg_id = tf.transpose(features[\"seg_id\"], [1, 0])\n\n inp_mask = None\n perm_mask = tf.transpose(features[\"perm_mask\"], [1, 2, 0])\n\n if FLAGS.num_predict is not None:\n # [num_predict x tgt_len x bsz]\n target_mapping = tf.transpose(features[\"target_mapping\"], [1, 2, 0])\n else:\n target_mapping = None\n\n # target for LM loss\n tgt = tf.transpose(features[\"target\"], [1, 0])\n\n # target mask for LM loss\n tgt_mask = tf.transpose(features[\"target_mask\"], [1, 0])\n\n # construct xlnet config and save to model_dir\n xlnet_config = xlnet.XLNetConfig(FLAGS=FLAGS)\n xlnet_config.to_json(os.path.join(FLAGS.model_dir, \"config.json\"))\n\n # construct run config from FLAGS\n run_config = xlnet.create_run_config(is_training, False, FLAGS)\n\n xlnet_model = xlnet.XLNetModel(\n xlnet_config=xlnet_config,\n run_config=run_config,\n input_ids=inp_k,\n seg_ids=seg_id,\n input_mask=inp_mask,\n mems=mems,\n perm_mask=perm_mask,\n target_mapping=target_mapping,\n inp_q=inp_q)\n\n output = xlnet_model.get_sequence_output()\n new_mems = {mem_name: xlnet_model.get_new_memory()}\n lookup_table = xlnet_model.get_embedding_table()\n\n initializer = xlnet_model.get_initializer()\n\n with tf.variable_scope(\"model\", reuse=tf.AUTO_REUSE):\n # LM loss\n lm_loss = modeling.lm_loss(\n hidden=output,\n target=tgt,\n n_token=xlnet_config.n_token,\n d_model=xlnet_config.d_model,\n initializer=initializer,\n lookup_table=lookup_table,\n tie_weight=True,\n bi_data=run_config.bi_data,\n use_tpu=run_config.use_tpu)\n\n #### Quantity to monitor\n monitor_dict = {}\n\n if FLAGS.use_bfloat16:\n tgt_mask = tf.cast(tgt_mask, tf.float32)\n lm_loss = tf.cast(lm_loss, tf.float32)\n\n total_loss = tf.reduce_sum(lm_loss * tgt_mask) \/ tf.reduce_sum(tgt_mask)\n monitor_dict[\"total_loss\"] = total_loss\n\n return total_loss, new_mems, monitor_dict","function_tokens":["def","two_stream_loss","(","FLAGS",",","features",",","labels",",","mems",",","is_training",")",":","#### Unpack input","mem_name","=","\"mems\"","mems","=","mems",".","get","(","mem_name",",","None",")","inp_k","=","tf",".","transpose","(","features","[","\"input_k\"","]",",","[","1",",","0","]",")","inp_q","=","tf",".","transpose","(","features","[","\"input_q\"","]",",","[","1",",","0","]",")","seg_id","=","tf",".","transpose","(","features","[","\"seg_id\"","]",",","[","1",",","0","]",")","inp_mask","=","None","perm_mask","=","tf",".","transpose","(","features","[","\"perm_mask\"","]",",","[","1",",","2",",","0","]",")","if","FLAGS",".","num_predict","is","not","None",":","# [num_predict x tgt_len x bsz]","target_mapping","=","tf",".","transpose","(","features","[","\"target_mapping\"","]",",","[","1",",","2",",","0","]",")","else",":","target_mapping","=","None","# target for LM loss","tgt","=","tf",".","transpose","(","features","[","\"target\"","]",",","[","1",",","0","]",")","# target mask for LM loss","tgt_mask","=","tf",".","transpose","(","features","[","\"target_mask\"","]",",","[","1",",","0","]",")","# construct xlnet config and save to model_dir","xlnet_config","=","xlnet",".","XLNetConfig","(","FLAGS","=","FLAGS",")","xlnet_config",".","to_json","(","os",".","path",".","join","(","FLAGS",".","model_dir",",","\"config.json\"",")",")","# construct run config from FLAGS","run_config","=","xlnet",".","create_run_config","(","is_training",",","False",",","FLAGS",")","xlnet_model","=","xlnet",".","XLNetModel","(","xlnet_config","=","xlnet_config",",","run_config","=","run_config",",","input_ids","=","inp_k",",","seg_ids","=","seg_id",",","input_mask","=","inp_mask",",","mems","=","mems",",","perm_mask","=","perm_mask",",","target_mapping","=","target_mapping",",","inp_q","=","inp_q",")","output","=","xlnet_model",".","get_sequence_output","(",")","new_mems","=","{","mem_name",":","xlnet_model",".","get_new_memory","(",")","}","lookup_table","=","xlnet_model",".","get_embedding_table","(",")","initializer","=","xlnet_model",".","get_initializer","(",")","with","tf",".","variable_scope","(","\"model\"",",","reuse","=","tf",".","AUTO_REUSE",")",":","# LM loss","lm_loss","=","modeling",".","lm_loss","(","hidden","=","output",",","target","=","tgt",",","n_token","=","xlnet_config",".","n_token",",","d_model","=","xlnet_config",".","d_model",",","initializer","=","initializer",",","lookup_table","=","lookup_table",",","tie_weight","=","True",",","bi_data","=","run_config",".","bi_data",",","use_tpu","=","run_config",".","use_tpu",")","#### Quantity to monitor","monitor_dict","=","{","}","if","FLAGS",".","use_bfloat16",":","tgt_mask","=","tf",".","cast","(","tgt_mask",",","tf",".","float32",")","lm_loss","=","tf",".","cast","(","lm_loss",",","tf",".","float32",")","total_loss","=","tf",".","reduce_sum","(","lm_loss","*","tgt_mask",")","\/","tf",".","reduce_sum","(","tgt_mask",")","monitor_dict","[","\"total_loss\"","]","=","total_loss","return","total_loss",",","new_mems",",","monitor_dict"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/function_builder.py#L47-L121"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/function_builder.py","language":"python","identifier":"get_loss","parameters":"(FLAGS, features, labels, mems, is_training)","argument_list":"","return_statement":"","docstring":"Pretraining loss with two-stream attention Transformer-XL.","docstring_summary":"Pretraining loss with two-stream attention Transformer-XL.","docstring_tokens":["Pretraining","loss","with","two","-","stream","attention","Transformer","-","XL","."],"function":"def get_loss(FLAGS, features, labels, mems, is_training):\n \"\"\"Pretraining loss with two-stream attention Transformer-XL.\"\"\"\n if FLAGS.use_bfloat16:\n with tf.tpu.bfloat16_scope():\n return two_stream_loss(FLAGS, features, labels, mems, is_training)\n else:\n return two_stream_loss(FLAGS, features, labels, mems, is_training)","function_tokens":["def","get_loss","(","FLAGS",",","features",",","labels",",","mems",",","is_training",")",":","if","FLAGS",".","use_bfloat16",":","with","tf",".","tpu",".","bfloat16_scope","(",")",":","return","two_stream_loss","(","FLAGS",",","features",",","labels",",","mems",",","is_training",")","else",":","return","two_stream_loss","(","FLAGS",",","features",",","labels",",","mems",",","is_training",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/function_builder.py#L124-L130"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/function_builder.py","language":"python","identifier":"get_classification_loss","parameters":"(\n FLAGS, features, n_class, is_training)","argument_list":"","return_statement":"","docstring":"Loss for downstream classification tasks.","docstring_summary":"Loss for downstream classification tasks.","docstring_tokens":["Loss","for","downstream","classification","tasks","."],"function":"def get_classification_loss(\n FLAGS, features, n_class, is_training):\n \"\"\"Loss for downstream classification tasks.\"\"\"\n\n bsz_per_core = tf.shape(features[\"input_ids\"])[0]\n\n inp = tf.transpose(features[\"input_ids\"], [1, 0])\n seg_id = tf.transpose(features[\"segment_ids\"], [1, 0])\n inp_mask = tf.transpose(features[\"input_mask\"], [1, 0])\n label = tf.reshape(features[\"label_ids\"], [bsz_per_core])\n\n xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path)\n run_config = xlnet.create_run_config(is_training, True, FLAGS)\n\n xlnet_model = xlnet.XLNetModel(\n xlnet_config=xlnet_config,\n run_config=run_config,\n input_ids=inp,\n seg_ids=seg_id,\n input_mask=inp_mask)\n\n summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj)\n\n with tf.variable_scope(\"model\", reuse=tf.AUTO_REUSE):\n\n if FLAGS.cls_scope is not None and FLAGS.cls_scope:\n cls_scope = \"classification_{}\".format(FLAGS.cls_scope)\n else:\n cls_scope = \"classification_{}\".format(FLAGS.task_name.lower())\n\n per_example_loss, logits = modeling.classification_loss(\n hidden=summary,\n labels=label,\n n_class=n_class,\n initializer=xlnet_model.get_initializer(),\n scope=cls_scope,\n return_logits=True)\n\n total_loss = tf.reduce_mean(per_example_loss)\n\n return total_loss, per_example_loss, logits","function_tokens":["def","get_classification_loss","(","FLAGS",",","features",",","n_class",",","is_training",")",":","bsz_per_core","=","tf",".","shape","(","features","[","\"input_ids\"","]",")","[","0","]","inp","=","tf",".","transpose","(","features","[","\"input_ids\"","]",",","[","1",",","0","]",")","seg_id","=","tf",".","transpose","(","features","[","\"segment_ids\"","]",",","[","1",",","0","]",")","inp_mask","=","tf",".","transpose","(","features","[","\"input_mask\"","]",",","[","1",",","0","]",")","label","=","tf",".","reshape","(","features","[","\"label_ids\"","]",",","[","bsz_per_core","]",")","xlnet_config","=","xlnet",".","XLNetConfig","(","json_path","=","FLAGS",".","model_config_path",")","run_config","=","xlnet",".","create_run_config","(","is_training",",","True",",","FLAGS",")","xlnet_model","=","xlnet",".","XLNetModel","(","xlnet_config","=","xlnet_config",",","run_config","=","run_config",",","input_ids","=","inp",",","seg_ids","=","seg_id",",","input_mask","=","inp_mask",")","summary","=","xlnet_model",".","get_pooled_out","(","FLAGS",".","summary_type",",","FLAGS",".","use_summ_proj",")","with","tf",".","variable_scope","(","\"model\"",",","reuse","=","tf",".","AUTO_REUSE",")",":","if","FLAGS",".","cls_scope","is","not","None","and","FLAGS",".","cls_scope",":","cls_scope","=","\"classification_{}\"",".","format","(","FLAGS",".","cls_scope",")","else",":","cls_scope","=","\"classification_{}\"",".","format","(","FLAGS",".","task_name",".","lower","(",")",")","per_example_loss",",","logits","=","modeling",".","classification_loss","(","hidden","=","summary",",","labels","=","label",",","n_class","=","n_class",",","initializer","=","xlnet_model",".","get_initializer","(",")",",","scope","=","cls_scope",",","return_logits","=","True",")","total_loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","total_loss",",","per_example_loss",",","logits"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/function_builder.py#L133-L173"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/function_builder.py","language":"python","identifier":"get_regression_loss","parameters":"(\n FLAGS, features, is_training)","argument_list":"","return_statement":"","docstring":"Loss for downstream regression tasks.","docstring_summary":"Loss for downstream regression tasks.","docstring_tokens":["Loss","for","downstream","regression","tasks","."],"function":"def get_regression_loss(\n FLAGS, features, is_training):\n \"\"\"Loss for downstream regression tasks.\"\"\"\n\n bsz_per_core = tf.shape(features[\"input_ids\"])[0]\n\n inp = tf.transpose(features[\"input_ids\"], [1, 0])\n seg_id = tf.transpose(features[\"segment_ids\"], [1, 0])\n inp_mask = tf.transpose(features[\"input_mask\"], [1, 0])\n label = tf.reshape(features[\"label_ids\"], [bsz_per_core])\n\n xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path)\n run_config = xlnet.create_run_config(is_training, True, FLAGS)\n\n xlnet_model = xlnet.XLNetModel(\n xlnet_config=xlnet_config,\n run_config=run_config,\n input_ids=inp,\n seg_ids=seg_id,\n input_mask=inp_mask)\n\n summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj)\n\n with tf.variable_scope(\"model\", reuse=tf.AUTO_REUSE):\n per_example_loss, logits = modeling.regression_loss(\n hidden=summary,\n labels=label,\n initializer=xlnet_model.get_initializer(),\n scope=\"regression_{}\".format(FLAGS.task_name.lower()),\n return_logits=True)\n\n total_loss = tf.reduce_mean(per_example_loss)\n\n return total_loss, per_example_loss, logits","function_tokens":["def","get_regression_loss","(","FLAGS",",","features",",","is_training",")",":","bsz_per_core","=","tf",".","shape","(","features","[","\"input_ids\"","]",")","[","0","]","inp","=","tf",".","transpose","(","features","[","\"input_ids\"","]",",","[","1",",","0","]",")","seg_id","=","tf",".","transpose","(","features","[","\"segment_ids\"","]",",","[","1",",","0","]",")","inp_mask","=","tf",".","transpose","(","features","[","\"input_mask\"","]",",","[","1",",","0","]",")","label","=","tf",".","reshape","(","features","[","\"label_ids\"","]",",","[","bsz_per_core","]",")","xlnet_config","=","xlnet",".","XLNetConfig","(","json_path","=","FLAGS",".","model_config_path",")","run_config","=","xlnet",".","create_run_config","(","is_training",",","True",",","FLAGS",")","xlnet_model","=","xlnet",".","XLNetModel","(","xlnet_config","=","xlnet_config",",","run_config","=","run_config",",","input_ids","=","inp",",","seg_ids","=","seg_id",",","input_mask","=","inp_mask",")","summary","=","xlnet_model",".","get_pooled_out","(","FLAGS",".","summary_type",",","FLAGS",".","use_summ_proj",")","with","tf",".","variable_scope","(","\"model\"",",","reuse","=","tf",".","AUTO_REUSE",")",":","per_example_loss",",","logits","=","modeling",".","regression_loss","(","hidden","=","summary",",","labels","=","label",",","initializer","=","xlnet_model",".","get_initializer","(",")",",","scope","=","\"regression_{}\"",".","format","(","FLAGS",".","task_name",".","lower","(",")",")",",","return_logits","=","True",")","total_loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","total_loss",",","per_example_loss",",","logits"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/function_builder.py#L176-L209"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/function_builder.py","language":"python","identifier":"get_qa_outputs","parameters":"(FLAGS, features, is_training)","argument_list":"","return_statement":"return return_dict","docstring":"Loss for downstream span-extraction QA tasks such as SQuAD.","docstring_summary":"Loss for downstream span-extraction QA tasks such as SQuAD.","docstring_tokens":["Loss","for","downstream","span","-","extraction","QA","tasks","such","as","SQuAD","."],"function":"def get_qa_outputs(FLAGS, features, is_training):\n \"\"\"Loss for downstream span-extraction QA tasks such as SQuAD.\"\"\"\n\n inp = tf.transpose(features[\"input_ids\"], [1, 0])\n seg_id = tf.transpose(features[\"segment_ids\"], [1, 0])\n inp_mask = tf.transpose(features[\"input_mask\"], [1, 0])\n\n seq_len = tf.shape(inp)[0]\n\n xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path)\n run_config = xlnet.create_run_config(is_training, True, FLAGS)\n\n xlnet_model = xlnet.XLNetModel(\n xlnet_config=xlnet_config,\n run_config=run_config,\n input_ids=inp,\n seg_ids=seg_id,\n input_mask=inp_mask)\n output = xlnet_model.get_sequence_output()\n initializer = xlnet_model.get_initializer()\n\n return_dict = {}\n\n # invalid position mask such as query and special symbols (PAD, SEP, CLS)\n p_mask = features[\"p_mask\"]\n\n # logit of the start position\n with tf.variable_scope(\"start_logits\"):\n start_logits = tf.layers.dense(\n output,\n 1,\n kernel_initializer=initializer)\n start_logits = tf.transpose(tf.squeeze(start_logits, -1), [1, 0])\n start_logits_masked = start_logits * (1 - p_mask) - 1e30 * p_mask\n start_log_probs = tf.nn.log_softmax(start_logits_masked, -1)\n\n # logit of the end position\n with tf.variable_scope(\"end_logits\"):\n if is_training:\n # during training, compute the end logits based on the\n # ground truth of the start position\n\n start_positions = tf.reshape(features[\"start_positions\"], [-1])\n start_index = tf.one_hot(start_positions, depth=seq_len, axis=-1,\n dtype=tf.float32)\n start_features = tf.einsum(\"lbh,bl->bh\", output, start_index)\n start_features = tf.tile(start_features[None], [seq_len, 1, 1])\n end_logits = tf.layers.dense(\n tf.concat([output, start_features], axis=-1), xlnet_config.d_model,\n kernel_initializer=initializer, activation=tf.tanh, name=\"dense_0\")\n end_logits = tf.contrib.layers.layer_norm(\n end_logits, begin_norm_axis=-1)\n\n end_logits = tf.layers.dense(\n end_logits, 1,\n kernel_initializer=initializer,\n name=\"dense_1\")\n end_logits = tf.transpose(tf.squeeze(end_logits, -1), [1, 0])\n end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask\n end_log_probs = tf.nn.log_softmax(end_logits_masked, -1)\n else:\n # during inference, compute the end logits based on beam search\n\n start_top_log_probs, start_top_index = tf.nn.top_k(\n start_log_probs, k=FLAGS.start_n_top)\n start_index = tf.one_hot(start_top_index,\n depth=seq_len, axis=-1, dtype=tf.float32)\n start_features = tf.einsum(\"lbh,bkl->bkh\", output, start_index)\n end_input = tf.tile(output[:, :, None],\n [1, 1, FLAGS.start_n_top, 1])\n start_features = tf.tile(start_features[None],\n [seq_len, 1, 1, 1])\n end_input = tf.concat([end_input, start_features], axis=-1)\n end_logits = tf.layers.dense(\n end_input,\n xlnet_config.d_model,\n kernel_initializer=initializer,\n activation=tf.tanh,\n name=\"dense_0\")\n end_logits = tf.contrib.layers.layer_norm(end_logits,\n begin_norm_axis=-1)\n end_logits = tf.layers.dense(\n end_logits,\n 1,\n kernel_initializer=initializer,\n name=\"dense_1\")\n end_logits = tf.reshape(end_logits, [seq_len, -1, FLAGS.start_n_top])\n end_logits = tf.transpose(end_logits, [1, 2, 0])\n end_logits_masked = end_logits * (\n 1 - p_mask[:, None]) - 1e30 * p_mask[:, None]\n end_log_probs = tf.nn.log_softmax(end_logits_masked, -1)\n end_top_log_probs, end_top_index = tf.nn.top_k(\n end_log_probs, k=FLAGS.end_n_top)\n end_top_log_probs = tf.reshape(\n end_top_log_probs,\n [-1, FLAGS.start_n_top * FLAGS.end_n_top])\n end_top_index = tf.reshape(\n end_top_index,\n [-1, FLAGS.start_n_top * FLAGS.end_n_top])\n\n if is_training:\n return_dict[\"start_log_probs\"] = start_log_probs\n return_dict[\"end_log_probs\"] = end_log_probs\n else:\n return_dict[\"start_top_log_probs\"] = start_top_log_probs\n return_dict[\"start_top_index\"] = start_top_index\n return_dict[\"end_top_log_probs\"] = end_top_log_probs\n return_dict[\"end_top_index\"] = end_top_index\n\n return return_dict","function_tokens":["def","get_qa_outputs","(","FLAGS",",","features",",","is_training",")",":","inp","=","tf",".","transpose","(","features","[","\"input_ids\"","]",",","[","1",",","0","]",")","seg_id","=","tf",".","transpose","(","features","[","\"segment_ids\"","]",",","[","1",",","0","]",")","inp_mask","=","tf",".","transpose","(","features","[","\"input_mask\"","]",",","[","1",",","0","]",")","seq_len","=","tf",".","shape","(","inp",")","[","0","]","xlnet_config","=","xlnet",".","XLNetConfig","(","json_path","=","FLAGS",".","model_config_path",")","run_config","=","xlnet",".","create_run_config","(","is_training",",","True",",","FLAGS",")","xlnet_model","=","xlnet",".","XLNetModel","(","xlnet_config","=","xlnet_config",",","run_config","=","run_config",",","input_ids","=","inp",",","seg_ids","=","seg_id",",","input_mask","=","inp_mask",")","output","=","xlnet_model",".","get_sequence_output","(",")","initializer","=","xlnet_model",".","get_initializer","(",")","return_dict","=","{","}","# invalid position mask such as query and special symbols (PAD, SEP, CLS)","p_mask","=","features","[","\"p_mask\"","]","# logit of the start position","with","tf",".","variable_scope","(","\"start_logits\"",")",":","start_logits","=","tf",".","layers",".","dense","(","output",",","1",",","kernel_initializer","=","initializer",")","start_logits","=","tf",".","transpose","(","tf",".","squeeze","(","start_logits",",","-","1",")",",","[","1",",","0","]",")","start_logits_masked","=","start_logits","*","(","1","-","p_mask",")","-","1e30","*","p_mask","start_log_probs","=","tf",".","nn",".","log_softmax","(","start_logits_masked",",","-","1",")","# logit of the end position","with","tf",".","variable_scope","(","\"end_logits\"",")",":","if","is_training",":","# during training, compute the end logits based on the","# ground truth of the start position","start_positions","=","tf",".","reshape","(","features","[","\"start_positions\"","]",",","[","-","1","]",")","start_index","=","tf",".","one_hot","(","start_positions",",","depth","=","seq_len",",","axis","=","-","1",",","dtype","=","tf",".","float32",")","start_features","=","tf",".","einsum","(","\"lbh,bl->bh\"",",","output",",","start_index",")","start_features","=","tf",".","tile","(","start_features","[","None","]",",","[","seq_len",",","1",",","1","]",")","end_logits","=","tf",".","layers",".","dense","(","tf",".","concat","(","[","output",",","start_features","]",",","axis","=","-","1",")",",","xlnet_config",".","d_model",",","kernel_initializer","=","initializer",",","activation","=","tf",".","tanh",",","name","=","\"dense_0\"",")","end_logits","=","tf",".","contrib",".","layers",".","layer_norm","(","end_logits",",","begin_norm_axis","=","-","1",")","end_logits","=","tf",".","layers",".","dense","(","end_logits",",","1",",","kernel_initializer","=","initializer",",","name","=","\"dense_1\"",")","end_logits","=","tf",".","transpose","(","tf",".","squeeze","(","end_logits",",","-","1",")",",","[","1",",","0","]",")","end_logits_masked","=","end_logits","*","(","1","-","p_mask",")","-","1e30","*","p_mask","end_log_probs","=","tf",".","nn",".","log_softmax","(","end_logits_masked",",","-","1",")","else",":","# during inference, compute the end logits based on beam search","start_top_log_probs",",","start_top_index","=","tf",".","nn",".","top_k","(","start_log_probs",",","k","=","FLAGS",".","start_n_top",")","start_index","=","tf",".","one_hot","(","start_top_index",",","depth","=","seq_len",",","axis","=","-","1",",","dtype","=","tf",".","float32",")","start_features","=","tf",".","einsum","(","\"lbh,bkl->bkh\"",",","output",",","start_index",")","end_input","=","tf",".","tile","(","output","[",":",",",":",",","None","]",",","[","1",",","1",",","FLAGS",".","start_n_top",",","1","]",")","start_features","=","tf",".","tile","(","start_features","[","None","]",",","[","seq_len",",","1",",","1",",","1","]",")","end_input","=","tf",".","concat","(","[","end_input",",","start_features","]",",","axis","=","-","1",")","end_logits","=","tf",".","layers",".","dense","(","end_input",",","xlnet_config",".","d_model",",","kernel_initializer","=","initializer",",","activation","=","tf",".","tanh",",","name","=","\"dense_0\"",")","end_logits","=","tf",".","contrib",".","layers",".","layer_norm","(","end_logits",",","begin_norm_axis","=","-","1",")","end_logits","=","tf",".","layers",".","dense","(","end_logits",",","1",",","kernel_initializer","=","initializer",",","name","=","\"dense_1\"",")","end_logits","=","tf",".","reshape","(","end_logits",",","[","seq_len",",","-","1",",","FLAGS",".","start_n_top","]",")","end_logits","=","tf",".","transpose","(","end_logits",",","[","1",",","2",",","0","]",")","end_logits_masked","=","end_logits","*","(","1","-","p_mask","[",":",",","None","]",")","-","1e30","*","p_mask","[",":",",","None","]","end_log_probs","=","tf",".","nn",".","log_softmax","(","end_logits_masked",",","-","1",")","end_top_log_probs",",","end_top_index","=","tf",".","nn",".","top_k","(","end_log_probs",",","k","=","FLAGS",".","end_n_top",")","end_top_log_probs","=","tf",".","reshape","(","end_top_log_probs",",","[","-","1",",","FLAGS",".","start_n_top","*","FLAGS",".","end_n_top","]",")","end_top_index","=","tf",".","reshape","(","end_top_index",",","[","-","1",",","FLAGS",".","start_n_top","*","FLAGS",".","end_n_top","]",")","if","is_training",":","return_dict","[","\"start_log_probs\"","]","=","start_log_probs","return_dict","[","\"end_log_probs\"","]","=","end_log_probs","else",":","return_dict","[","\"start_top_log_probs\"","]","=","start_top_log_probs","return_dict","[","\"start_top_index\"","]","=","start_top_index","return_dict","[","\"end_top_log_probs\"","]","=","end_top_log_probs","return_dict","[","\"end_top_index\"","]","=","end_top_index","return","return_dict"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/function_builder.py#L212-L321"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/xlnet\/function_builder.py","language":"python","identifier":"get_race_loss","parameters":"(FLAGS, features, is_training)","argument_list":"","return_statement":"return total_loss, per_example_loss, logits","docstring":"Loss for downstream multi-choice QA tasks such as RACE.","docstring_summary":"Loss for downstream multi-choice QA tasks such as RACE.","docstring_tokens":["Loss","for","downstream","multi","-","choice","QA","tasks","such","as","RACE","."],"function":"def get_race_loss(FLAGS, features, is_training):\n \"\"\"Loss for downstream multi-choice QA tasks such as RACE.\"\"\"\n\n bsz_per_core = tf.shape(features[\"input_ids\"])[0]\n\n def _transform_features(feature):\n out = tf.reshape(feature, [bsz_per_core, 4, -1])\n out = tf.transpose(out, [2, 0, 1])\n out = tf.reshape(out, [-1, bsz_per_core * 4])\n return out\n\n inp = _transform_features(features[\"input_ids\"])\n seg_id = _transform_features(features[\"segment_ids\"])\n inp_mask = _transform_features(features[\"input_mask\"])\n label = tf.reshape(features[\"label_ids\"], [bsz_per_core])\n\n xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path)\n run_config = xlnet.create_run_config(is_training, True, FLAGS)\n\n xlnet_model = xlnet.XLNetModel(\n xlnet_config=xlnet_config,\n run_config=run_config,\n input_ids=inp,\n seg_ids=seg_id,\n input_mask=inp_mask)\n summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj)\n\n with tf.variable_scope(\"logits\"):\n logits = tf.layers.dense(summary, 1,\n kernel_initializer=xlnet_model.get_initializer())\n logits = tf.reshape(logits, [bsz_per_core, 4])\n\n one_hot_target = tf.one_hot(label, 4)\n per_example_loss = -tf.reduce_sum(\n tf.nn.log_softmax(logits) * one_hot_target, -1)\n total_loss = tf.reduce_mean(per_example_loss)\n\n return total_loss, per_example_loss, logits","function_tokens":["def","get_race_loss","(","FLAGS",",","features",",","is_training",")",":","bsz_per_core","=","tf",".","shape","(","features","[","\"input_ids\"","]",")","[","0","]","def","_transform_features","(","feature",")",":","out","=","tf",".","reshape","(","feature",",","[","bsz_per_core",",","4",",","-","1","]",")","out","=","tf",".","transpose","(","out",",","[","2",",","0",",","1","]",")","out","=","tf",".","reshape","(","out",",","[","-","1",",","bsz_per_core","*","4","]",")","return","out","inp","=","_transform_features","(","features","[","\"input_ids\"","]",")","seg_id","=","_transform_features","(","features","[","\"segment_ids\"","]",")","inp_mask","=","_transform_features","(","features","[","\"input_mask\"","]",")","label","=","tf",".","reshape","(","features","[","\"label_ids\"","]",",","[","bsz_per_core","]",")","xlnet_config","=","xlnet",".","XLNetConfig","(","json_path","=","FLAGS",".","model_config_path",")","run_config","=","xlnet",".","create_run_config","(","is_training",",","True",",","FLAGS",")","xlnet_model","=","xlnet",".","XLNetModel","(","xlnet_config","=","xlnet_config",",","run_config","=","run_config",",","input_ids","=","inp",",","seg_ids","=","seg_id",",","input_mask","=","inp_mask",")","summary","=","xlnet_model",".","get_pooled_out","(","FLAGS",".","summary_type",",","FLAGS",".","use_summ_proj",")","with","tf",".","variable_scope","(","\"logits\"",")",":","logits","=","tf",".","layers",".","dense","(","summary",",","1",",","kernel_initializer","=","xlnet_model",".","get_initializer","(",")",")","logits","=","tf",".","reshape","(","logits",",","[","bsz_per_core",",","4","]",")","one_hot_target","=","tf",".","one_hot","(","label",",","4",")","per_example_loss","=","-","tf",".","reduce_sum","(","tf",".","nn",".","log_softmax","(","logits",")","*","one_hot_target",",","-","1",")","total_loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","total_loss",",","per_example_loss",",","logits"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/xlnet\/function_builder.py#L324-L361"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_pretraining.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n next_sentence_labels = features[\"next_sentence_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), model.get_embedding_table(),\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n (next_sentence_loss, next_sentence_example_loss,\n next_sentence_log_probs) = get_next_sentence_output(\n bert_config, model.get_pooled_output(), next_sentence_labels)\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n next_sentence_log_probs = tf.reshape(\n next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])\n next_sentence_predictions = tf.argmax(\n next_sentence_log_probs, axis=-1, output_type=tf.int32)\n next_sentence_labels = tf.reshape(next_sentence_labels, [-1])\n next_sentence_accuracy = tf.metrics.accuracy(\n labels=next_sentence_labels, predictions=next_sentence_predictions)\n next_sentence_mean_loss = tf.metrics.mean(\n values=next_sentence_example_loss)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n \"next_sentence_accuracy\": next_sentence_accuracy,\n \"next_sentence_loss\": next_sentence_mean_loss,\n }\n\n eval_metrics = (metric_fn, [\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels\n ])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\"Only TRAIN and EVAL modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","masked_lm_positions","=","features","[","\"masked_lm_positions\"","]","masked_lm_ids","=","features","[","\"masked_lm_ids\"","]","masked_lm_weights","=","features","[","\"masked_lm_weights\"","]","next_sentence_labels","=","features","[","\"next_sentence_labels\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","(","masked_lm_loss",",","masked_lm_example_loss",",","masked_lm_log_probs",")","=","get_masked_lm_output","(","bert_config",",","model",".","get_sequence_output","(",")",",","model",".","get_embedding_table","(",")",",","masked_lm_positions",",","masked_lm_ids",",","masked_lm_weights",")","(","next_sentence_loss",",","next_sentence_example_loss",",","next_sentence_log_probs",")","=","get_next_sentence_output","(","bert_config",",","model",".","get_pooled_output","(",")",",","next_sentence_labels",")","total_loss","=","masked_lm_loss","+","next_sentence_loss","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels",")",":","\"\"\"Computes the loss and accuracy of the model.\"\"\"","masked_lm_log_probs","=","tf",".","reshape","(","masked_lm_log_probs",",","[","-","1",",","masked_lm_log_probs",".","shape","[","-","1","]","]",")","masked_lm_predictions","=","tf",".","argmax","(","masked_lm_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","masked_lm_example_loss","=","tf",".","reshape","(","masked_lm_example_loss",",","[","-","1","]",")","masked_lm_ids","=","tf",".","reshape","(","masked_lm_ids",",","[","-","1","]",")","masked_lm_weights","=","tf",".","reshape","(","masked_lm_weights",",","[","-","1","]",")","masked_lm_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","masked_lm_ids",",","predictions","=","masked_lm_predictions",",","weights","=","masked_lm_weights",")","masked_lm_mean_loss","=","tf",".","metrics",".","mean","(","values","=","masked_lm_example_loss",",","weights","=","masked_lm_weights",")","next_sentence_log_probs","=","tf",".","reshape","(","next_sentence_log_probs",",","[","-","1",",","next_sentence_log_probs",".","shape","[","-","1","]","]",")","next_sentence_predictions","=","tf",".","argmax","(","next_sentence_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","next_sentence_labels","=","tf",".","reshape","(","next_sentence_labels",",","[","-","1","]",")","next_sentence_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","next_sentence_labels",",","predictions","=","next_sentence_predictions",")","next_sentence_mean_loss","=","tf",".","metrics",".","mean","(","values","=","next_sentence_example_loss",")","return","{","\"masked_lm_accuracy\"",":","masked_lm_accuracy",",","\"masked_lm_loss\"",":","masked_lm_mean_loss",",","\"next_sentence_accuracy\"",":","next_sentence_accuracy",",","\"next_sentence_loss\"",":","next_sentence_mean_loss",",","}","eval_metrics","=","(","metric_fn",",","[","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","raise","ValueError","(","\"Only TRAIN and EVAL modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_pretraining.py#L109-L237"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_pretraining.py","language":"python","identifier":"get_masked_lm_output","parameters":"(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights)","argument_list":"","return_statement":"return (loss, per_example_loss, log_probs)","docstring":"Get loss and log probs for the masked LM.","docstring_summary":"Get loss and log probs for the masked LM.","docstring_tokens":["Get","loss","and","log","probs","for","the","masked","LM","."],"function":"def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights):\n \"\"\"Get loss and log probs for the masked LM.\"\"\"\n input_tensor = gather_indexes(input_tensor, positions)\n\n with tf.variable_scope(\"cls\/predictions\"):\n # We apply one more non-linear transformation before the output layer.\n # This matrix is not used after pre-training.\n with tf.variable_scope(\"transform\"):\n input_tensor = tf.layers.dense(\n input_tensor,\n units=bert_config.hidden_size,\n activation=modeling.get_activation(bert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n bert_config.initializer_range))\n input_tensor = modeling.layer_norm(input_tensor)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n output_bias = tf.get_variable(\n \"output_bias\",\n shape=[bert_config.vocab_size],\n initializer=tf.zeros_initializer())\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n label_ids = tf.reshape(label_ids, [-1])\n label_weights = tf.reshape(label_weights, [-1])\n\n one_hot_labels = tf.one_hot(\n label_ids, depth=bert_config.vocab_size, dtype=tf.float32)\n\n # The `positions` tensor might be zero-padded (if the sequence is too\n # short to have the maximum number of predictions). The `label_weights`\n # tensor has a value of 1.0 for every real prediction and 0.0 for the\n # padding predictions.\n per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])\n numerator = tf.reduce_sum(label_weights * per_example_loss)\n denominator = tf.reduce_sum(label_weights) + 1e-5\n loss = numerator \/ denominator\n\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_masked_lm_output","(","bert_config",",","input_tensor",",","output_weights",",","positions",",","label_ids",",","label_weights",")",":","input_tensor","=","gather_indexes","(","input_tensor",",","positions",")","with","tf",".","variable_scope","(","\"cls\/predictions\"",")",":","# We apply one more non-linear transformation before the output layer.","# This matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"transform\"",")",":","input_tensor","=","tf",".","layers",".","dense","(","input_tensor",",","units","=","bert_config",".","hidden_size",",","activation","=","modeling",".","get_activation","(","bert_config",".","hidden_act",")",",","kernel_initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","input_tensor","=","modeling",".","layer_norm","(","input_tensor",")","# The output weights are the same as the input embeddings, but there is","# an output-only bias for each token.","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","bert_config",".","vocab_size","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","matmul","(","input_tensor",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","label_ids","=","tf",".","reshape","(","label_ids",",","[","-","1","]",")","label_weights","=","tf",".","reshape","(","label_weights",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","label_ids",",","depth","=","bert_config",".","vocab_size",",","dtype","=","tf",".","float32",")","# The `positions` tensor might be zero-padded (if the sequence is too","# short to have the maximum number of predictions). The `label_weights`","# tensor has a value of 1.0 for every real prediction and 0.0 for the","# padding predictions.","per_example_loss","=","-","tf",".","reduce_sum","(","log_probs","*","one_hot_labels",",","axis","=","[","-","1","]",")","numerator","=","tf",".","reduce_sum","(","label_weights","*","per_example_loss",")","denominator","=","tf",".","reduce_sum","(","label_weights",")","+","1e-5","loss","=","numerator","\/","denominator","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_pretraining.py#L240-L282"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_pretraining.py","language":"python","identifier":"get_next_sentence_output","parameters":"(bert_config, input_tensor, labels)","argument_list":"","return_statement":"","docstring":"Get loss and log probs for the next sentence prediction.","docstring_summary":"Get loss and log probs for the next sentence prediction.","docstring_tokens":["Get","loss","and","log","probs","for","the","next","sentence","prediction","."],"function":"def get_next_sentence_output(bert_config, input_tensor, labels):\n \"\"\"Get loss and log probs for the next sentence prediction.\"\"\"\n\n # Simple binary classification. Note that 0 is \"next sentence\" and 1 is\n # \"random sentence\". This weight matrix is not used after pre-training.\n with tf.variable_scope(\"cls\/seq_relationship\"):\n output_weights = tf.get_variable(\n \"output_weights\",\n shape=[2, bert_config.hidden_size],\n initializer=modeling.create_initializer(bert_config.initializer_range))\n output_bias = tf.get_variable(\n \"output_bias\", shape=[2], initializer=tf.zeros_initializer())\n\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n labels = tf.reshape(labels, [-1])\n one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_next_sentence_output","(","bert_config",",","input_tensor",",","labels",")",":","# Simple binary classification. Note that 0 is \"next sentence\" and 1 is","# \"random sentence\". This weight matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"cls\/seq_relationship\"",")",":","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","shape","=","[","2",",","bert_config",".","hidden_size","]",",","initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","2","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","matmul","(","input_tensor",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","labels","=","tf",".","reshape","(","labels",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","2",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_pretraining.py#L285-L305"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_pretraining.py","language":"python","identifier":"gather_indexes","parameters":"(sequence_tensor, positions)","argument_list":"","return_statement":"return output_tensor","docstring":"Gathers the vectors at the specific positions over a minibatch.","docstring_summary":"Gathers the vectors at the specific positions over a minibatch.","docstring_tokens":["Gathers","the","vectors","at","the","specific","positions","over","a","minibatch","."],"function":"def gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor","function_tokens":["def","gather_indexes","(","sequence_tensor",",","positions",")",":","sequence_shape","=","modeling",".","get_shape_list","(","sequence_tensor",",","expected_rank","=","3",")","batch_size","=","sequence_shape","[","0","]","seq_length","=","sequence_shape","[","1","]","width","=","sequence_shape","[","2","]","flat_offsets","=","tf",".","reshape","(","tf",".","range","(","0",",","batch_size",",","dtype","=","tf",".","int32",")","*","seq_length",",","[","-","1",",","1","]",")","flat_positions","=","tf",".","reshape","(","positions","+","flat_offsets",",","[","-","1","]",")","flat_sequence_tensor","=","tf",".","reshape","(","sequence_tensor",",","[","batch_size","*","seq_length",",","width","]",")","output_tensor","=","tf",".","gather","(","flat_sequence_tensor",",","flat_positions",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_pretraining.py#L308-L321"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_pretraining.py","language":"python","identifier":"input_fn_builder","parameters":"(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n name_to_features = {\n \"input_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"input_mask\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"segment_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"masked_lm_positions\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_ids\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_weights\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.float32),\n \"next_sentence_labels\":\n tf.FixedLenFeature([1], tf.int64),\n }\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n if is_training:\n d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))\n d = d.repeat()\n d = d.shuffle(buffer_size=len(input_files))\n\n # `cycle_length` is the number of parallel files that get read.\n cycle_length = min(num_cpu_threads, len(input_files))\n\n # `sloppy` mode means that the interleaving is not exact. This adds\n # even more randomness to the training pipeline.\n d = d.apply(\n tf.contrib.data.parallel_interleave(\n tf.data.TFRecordDataset,\n sloppy=is_training,\n cycle_length=cycle_length))\n d = d.shuffle(buffer_size=100)\n else:\n d = tf.data.TFRecordDataset(input_files)\n # Since we evaluate for a fixed number of steps we don't want to encounter\n # out-of-range exceptions.\n d = d.repeat()\n\n # We must `drop_remainder` on training because the TPU requires fixed\n # size dimensions. For eval, we assume we are evaluating on the CPU or GPU\n # and we *don't* want to drop the remainder, otherwise we wont cover\n # every sample.\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n num_parallel_batches=num_cpu_threads,\n drop_remainder=True))\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","input_files",",","max_seq_length",",","max_predictions_per_seq",",","is_training",",","num_cpu_threads","=","4",")",":","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"masked_lm_positions\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_ids\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_weights\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","float32",")",",","\"next_sentence_labels\"",":","tf",".","FixedLenFeature","(","[","1","]",",","tf",".","int64",")",",","}","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","if","is_training",":","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","tf",".","constant","(","input_files",")",")","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","len","(","input_files",")",")","# `cycle_length` is the number of parallel files that get read.","cycle_length","=","min","(","num_cpu_threads",",","len","(","input_files",")",")","# `sloppy` mode means that the interleaving is not exact. This adds","# even more randomness to the training pipeline.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","parallel_interleave","(","tf",".","data",".","TFRecordDataset",",","sloppy","=","is_training",",","cycle_length","=","cycle_length",")",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","else",":","d","=","tf",".","data",".","TFRecordDataset","(","input_files",")","# Since we evaluate for a fixed number of steps we don't want to encounter","# out-of-range exceptions.","d","=","d",".","repeat","(",")","# We must `drop_remainder` on training because the TPU requires fixed","# size dimensions. For eval, we assume we are evaluating on the CPU or GPU","# and we *don't* want to drop the remainder, otherwise we wont cover","# every sample.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","num_parallel_batches","=","num_cpu_threads",",","drop_remainder","=","True",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_pretraining.py#L324-L388"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_pretraining.py","language":"python","identifier":"_decode_record","parameters":"(record, name_to_features)","argument_list":"","return_statement":"return example","docstring":"Decodes a record to a TensorFlow example.","docstring_summary":"Decodes a record to a TensorFlow example.","docstring_tokens":["Decodes","a","record","to","a","TensorFlow","example","."],"function":"def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example","function_tokens":["def","_decode_record","(","record",",","name_to_features",")",":","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_pretraining.py#L391-L403"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_squad.py","language":"python","identifier":"read_squad_examples","parameters":"(input_file, is_training)","argument_list":"","return_statement":"return examples","docstring":"Read a SQuAD json file into a list of SquadExample.","docstring_summary":"Read a SQuAD json file into a list of SquadExample.","docstring_tokens":["Read","a","SQuAD","json","file","into","a","list","of","SquadExample","."],"function":"def read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n\n if FLAGS.version_2_with_negative:\n is_impossible = qa[\"is_impossible\"]\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\")\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length -\n 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(\n doc_tokens[start_position:(end_position + 1)])\n cleaned_answer_text = \" \".join(\n tokenization.whitespace_tokenize(orig_answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n tf.logging.warning(\"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n\n return examples","function_tokens":["def","read_squad_examples","(","input_file",",","is_training",")",":","with","tf",".","gfile",".","Open","(","input_file",",","\"r\"",")","as","reader",":","input_data","=","json",".","load","(","reader",")","[","\"data\"","]","def","is_whitespace","(","c",")",":","if","c","==","\" \"","or","c","==","\"\\t\"","or","c","==","\"\\r\"","or","c","==","\"\\n\"","or","ord","(","c",")","==","0x202F",":","return","True","return","False","examples","=","[","]","for","entry","in","input_data",":","for","paragraph","in","entry","[","\"paragraphs\"","]",":","paragraph_text","=","paragraph","[","\"context\"","]","doc_tokens","=","[","]","char_to_word_offset","=","[","]","prev_is_whitespace","=","True","for","c","in","paragraph_text",":","if","is_whitespace","(","c",")",":","prev_is_whitespace","=","True","else",":","if","prev_is_whitespace",":","doc_tokens",".","append","(","c",")","else",":","doc_tokens","[","-","1","]","+=","c","prev_is_whitespace","=","False","char_to_word_offset",".","append","(","len","(","doc_tokens",")","-","1",")","for","qa","in","paragraph","[","\"qas\"","]",":","qas_id","=","qa","[","\"id\"","]","question_text","=","qa","[","\"question\"","]","start_position","=","None","end_position","=","None","orig_answer_text","=","None","is_impossible","=","False","if","is_training",":","if","FLAGS",".","version_2_with_negative",":","is_impossible","=","qa","[","\"is_impossible\"","]","if","(","len","(","qa","[","\"answers\"","]",")","!=","1",")","and","(","not","is_impossible",")",":","raise","ValueError","(","\"For training, each question should have exactly 1 answer.\"",")","if","not","is_impossible",":","answer","=","qa","[","\"answers\"","]","[","0","]","orig_answer_text","=","answer","[","\"text\"","]","answer_offset","=","answer","[","\"answer_start\"","]","answer_length","=","len","(","orig_answer_text",")","start_position","=","char_to_word_offset","[","answer_offset","]","end_position","=","char_to_word_offset","[","answer_offset","+","answer_length","-","1","]","# Only add answers where the text can be exactly recovered from the","# document. If this CAN'T happen it's likely due to weird Unicode","# stuff so we will just skip the example.","#","# Note that this means for training mode, every example is NOT","# guaranteed to be preserved.","actual_text","=","\" \"",".","join","(","doc_tokens","[","start_position",":","(","end_position","+","1",")","]",")","cleaned_answer_text","=","\" \"",".","join","(","tokenization",".","whitespace_tokenize","(","orig_answer_text",")",")","if","actual_text",".","find","(","cleaned_answer_text",")","==","-","1",":","tf",".","logging",".","warning","(","\"Could not find answer: '%s' vs. '%s'\"",",","actual_text",",","cleaned_answer_text",")","continue","else",":","start_position","=","-","1","end_position","=","-","1","orig_answer_text","=","\"\"","example","=","SquadExample","(","qas_id","=","qas_id",",","question_text","=","question_text",",","doc_tokens","=","doc_tokens",",","orig_answer_text","=","orig_answer_text",",","start_position","=","start_position",",","end_position","=","end_position",",","is_impossible","=","is_impossible",")","examples",".","append","(","example",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_squad.py#L227-L306"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_squad.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn)","argument_list":"","return_statement":"","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"token_to_orig_map: %s\" % \" \".join(\n [\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))\n tf.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n tf.logging.info(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\n \"answer: %s\" % (tokenization.printable_text(answer_text)))\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1","function_tokens":["def","convert_examples_to_features","(","examples",",","tokenizer",",","max_seq_length",",","doc_stride",",","max_query_length",",","is_training",",","output_fn",")",":","unique_id","=","1000000000","for","(","example_index",",","example",")","in","enumerate","(","examples",")",":","query_tokens","=","tokenizer",".","tokenize","(","example",".","question_text",")","if","len","(","query_tokens",")",">","max_query_length",":","query_tokens","=","query_tokens","[","0",":","max_query_length","]","tok_to_orig_index","=","[","]","orig_to_tok_index","=","[","]","all_doc_tokens","=","[","]","for","(","i",",","token",")","in","enumerate","(","example",".","doc_tokens",")",":","orig_to_tok_index",".","append","(","len","(","all_doc_tokens",")",")","sub_tokens","=","tokenizer",".","tokenize","(","token",")","for","sub_token","in","sub_tokens",":","tok_to_orig_index",".","append","(","i",")","all_doc_tokens",".","append","(","sub_token",")","tok_start_position","=","None","tok_end_position","=","None","if","is_training","and","example",".","is_impossible",":","tok_start_position","=","-","1","tok_end_position","=","-","1","if","is_training","and","not","example",".","is_impossible",":","tok_start_position","=","orig_to_tok_index","[","example",".","start_position","]","if","example",".","end_position","<","len","(","example",".","doc_tokens",")","-","1",":","tok_end_position","=","orig_to_tok_index","[","example",".","end_position","+","1","]","-","1","else",":","tok_end_position","=","len","(","all_doc_tokens",")","-","1","(","tok_start_position",",","tok_end_position",")","=","_improve_answer_span","(","all_doc_tokens",",","tok_start_position",",","tok_end_position",",","tokenizer",",","example",".","orig_answer_text",")","# The -3 accounts for [CLS], [SEP] and [SEP]","max_tokens_for_doc","=","max_seq_length","-","len","(","query_tokens",")","-","3","# We can have documents that are longer than the maximum sequence length.","# To deal with this we do a sliding window approach, where we take chunks","# of the up to our max length with a stride of `doc_stride`.","_DocSpan","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"DocSpan\"",",","[","\"start\"",",","\"length\"","]",")","doc_spans","=","[","]","start_offset","=","0","while","start_offset","<","len","(","all_doc_tokens",")",":","length","=","len","(","all_doc_tokens",")","-","start_offset","if","length",">","max_tokens_for_doc",":","length","=","max_tokens_for_doc","doc_spans",".","append","(","_DocSpan","(","start","=","start_offset",",","length","=","length",")",")","if","start_offset","+","length","==","len","(","all_doc_tokens",")",":","break","start_offset","+=","min","(","length",",","doc_stride",")","for","(","doc_span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","tokens","=","[","]","token_to_orig_map","=","{","}","token_is_max_context","=","{","}","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","query_tokens",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","i","in","range","(","doc_span",".","length",")",":","split_token_index","=","doc_span",".","start","+","i","token_to_orig_map","[","len","(","tokens",")","]","=","tok_to_orig_index","[","split_token_index","]","is_max_context","=","_check_is_max_context","(","doc_spans",",","doc_span_index",",","split_token_index",")","token_is_max_context","[","len","(","tokens",")","]","=","is_max_context","tokens",".","append","(","all_doc_tokens","[","split_token_index","]",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","start_position","=","None","end_position","=","None","if","is_training","and","not","example",".","is_impossible",":","# For training, if our document chunk does not contain an annotation","# we throw it out, since there is nothing to predict.","doc_start","=","doc_span",".","start","doc_end","=","doc_span",".","start","+","doc_span",".","length","-","1","out_of_span","=","False","if","not","(","tok_start_position",">=","doc_start","and","tok_end_position","<=","doc_end",")",":","out_of_span","=","True","if","out_of_span",":","start_position","=","0","end_position","=","0","else",":","doc_offset","=","len","(","query_tokens",")","+","2","start_position","=","tok_start_position","-","doc_start","+","doc_offset","end_position","=","tok_end_position","-","doc_start","+","doc_offset","if","is_training","and","example",".","is_impossible",":","start_position","=","0","end_position","=","0","if","example_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"unique_id: %s\"","%","(","unique_id",")",")","tf",".","logging",".","info","(","\"example_index: %s\"","%","(","example_index",")",")","tf",".","logging",".","info","(","\"doc_span_index: %s\"","%","(","doc_span_index",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"token_to_orig_map: %s\"","%","\" \"",".","join","(","[","\"%d:%d\"","%","(","x",",","y",")","for","(","x",",","y",")","in","six",".","iteritems","(","token_to_orig_map",")","]",")",")","tf",".","logging",".","info","(","\"token_is_max_context: %s\"","%","\" \"",".","join","(","[","\"%d:%s\"","%","(","x",",","y",")","for","(","x",",","y",")","in","six",".","iteritems","(","token_is_max_context",")","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","if","is_training","and","example",".","is_impossible",":","tf",".","logging",".","info","(","\"impossible example\"",")","if","is_training","and","not","example",".","is_impossible",":","answer_text","=","\" \"",".","join","(","tokens","[","start_position",":","(","end_position","+","1",")","]",")","tf",".","logging",".","info","(","\"start_position: %d\"","%","(","start_position",")",")","tf",".","logging",".","info","(","\"end_position: %d\"","%","(","end_position",")",")","tf",".","logging",".","info","(","\"answer: %s\"","%","(","tokenization",".","printable_text","(","answer_text",")",")",")","feature","=","InputFeatures","(","unique_id","=","unique_id",",","example_index","=","example_index",",","doc_span_index","=","doc_span_index",",","tokens","=","tokens",",","token_to_orig_map","=","token_to_orig_map",",","token_is_max_context","=","token_is_max_context",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","start_position","=","start_position",",","end_position","=","end_position",",","is_impossible","=","example",".","is_impossible",")","# Run callback","output_fn","(","feature",")","unique_id","+=","1"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_squad.py#L309-L473"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_squad.py","language":"python","identifier":"_improve_answer_span","parameters":"(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text)","argument_list":"","return_statement":"return (input_start, input_end)","docstring":"Returns tokenized answer spans that better match the annotated answer.","docstring_summary":"Returns tokenized answer spans that better match the annotated answer.","docstring_tokens":["Returns","tokenized","answer","spans","that","better","match","the","annotated","answer","."],"function":"def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)","function_tokens":["def","_improve_answer_span","(","doc_tokens",",","input_start",",","input_end",",","tokenizer",",","orig_answer_text",")",":","# The SQuAD annotations are character based. We first project them to","# whitespace-tokenized words. But then after WordPiece tokenization, we can","# often find a \"better match\". For example:","#","# Question: What year was John Smith born?","# Context: The leader was John Smith (1895-1943).","# Answer: 1895","#","# The original whitespace-tokenized answer will be \"(1895-1943).\". However","# after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match","# the exact answer, 1895.","#","# However, this is not always possible. Consider the following:","#","# Question: What country is the top exporter of electornics?","# Context: The Japanese electronics industry is the lagest in the world.","# Answer: Japan","#","# In this case, the annotator chose \"Japan\" as a character sub-span of","# the word \"Japanese\". Since our WordPiece tokenizer does not split","# \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare","# in SQuAD, but does happen.","tok_answer_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_answer_text",")",")","for","new_start","in","range","(","input_start",",","input_end","+","1",")",":","for","new_end","in","range","(","input_end",",","new_start","-","1",",","-","1",")",":","text_span","=","\" \"",".","join","(","doc_tokens","[","new_start",":","(","new_end","+","1",")","]",")","if","text_span","==","tok_answer_text",":","return","(","new_start",",","new_end",")","return","(","input_start",",","input_end",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_squad.py#L476-L510"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_squad.py","language":"python","identifier":"_check_is_max_context","parameters":"(doc_spans, cur_span_index, position)","argument_list":"","return_statement":"return cur_span_index == best_span_index","docstring":"Check if this is the 'max context' doc span for the token.","docstring_summary":"Check if this is the 'max context' doc span for the token.","docstring_tokens":["Check","if","this","is","the","max","context","doc","span","for","the","token","."],"function":"def _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index","function_tokens":["def","_check_is_max_context","(","doc_spans",",","cur_span_index",",","position",")",":","# Because of the sliding window approach taken to scoring documents, a single","# token can appear in multiple documents. E.g.","# Doc: the man went to the store and bought a gallon of milk","# Span A: the man went to the","# Span B: to the store and bought","# Span C: and bought a gallon of","# ...","#","# Now the word 'bought' will have two scores from spans B and C. We only","# want to consider the score with \"maximum context\", which we define as","# the *minimum* of its left and right context (the *sum* of left and","# right context will always be the same, of course).","#","# In the example the maximum context for 'bought' would be span C since","# it has 1 left context and 3 right context, while span B has 4 left context","# and 0 right context.","best_score","=","None","best_span_index","=","None","for","(","span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","end","=","doc_span",".","start","+","doc_span",".","length","-","1","if","position","<","doc_span",".","start",":","continue","if","position",">","end",":","continue","num_left_context","=","position","-","doc_span",".","start","num_right_context","=","end","-","position","score","=","min","(","num_left_context",",","num_right_context",")","+","0.01","*","doc_span",".","length","if","best_score","is","None","or","score",">","best_score",":","best_score","=","score","best_span_index","=","span_index","return","cur_span_index","==","best_span_index"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_squad.py#L513-L547"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_squad.py","language":"python","identifier":"create_model","parameters":"(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return (start_logits, end_logits)","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n final_hidden = model.get_sequence_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n hidden_size = final_hidden_shape[2]\n\n output_weights = tf.get_variable(\n \"cls\/squad\/output_weights\", [2, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"cls\/squad\/output_bias\", [2], initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden,\n [batch_size * seq_length, hidden_size])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(logits, [2, 0, 1])\n\n unstacked_logits = tf.unstack(logits, axis=0)\n\n (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n return (start_logits, end_logits)","function_tokens":["def","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","use_one_hot_embeddings",")",":","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","final_hidden","=","model",".","get_sequence_output","(",")","final_hidden_shape","=","modeling",".","get_shape_list","(","final_hidden",",","expected_rank","=","3",")","batch_size","=","final_hidden_shape","[","0","]","seq_length","=","final_hidden_shape","[","1","]","hidden_size","=","final_hidden_shape","[","2","]","output_weights","=","tf",".","get_variable","(","\"cls\/squad\/output_weights\"",",","[","2",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"cls\/squad\/output_bias\"",",","[","2","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","final_hidden_matrix","=","tf",".","reshape","(","final_hidden",",","[","batch_size","*","seq_length",",","hidden_size","]",")","logits","=","tf",".","matmul","(","final_hidden_matrix",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","logits","=","tf",".","reshape","(","logits",",","[","batch_size",",","seq_length",",","2","]",")","logits","=","tf",".","transpose","(","logits",",","[","2",",","0",",","1","]",")","unstacked_logits","=","tf",".","unstack","(","logits",",","axis","=","0",")","(","start_logits",",","end_logits",")","=","(","unstacked_logits","[","0","]",",","unstacked_logits","[","1","]",")","return","(","start_logits",",","end_logits",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_squad.py#L550-L587"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_squad.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (start_logits, end_logits) = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n start_positions = features[\"start_positions\"]\n end_positions = features[\"end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) \/ 2.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"start_logits\": start_logits,\n \"end_logits\": end_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","unique_ids","=","features","[","\"unique_ids\"","]","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","start_logits",",","end_logits",")","=","create_model","(","bert_config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","seq_length","=","modeling",".","get_shape_list","(","input_ids",")","[","1","]","def","compute_loss","(","logits",",","positions",")",":","one_hot_positions","=","tf",".","one_hot","(","positions",",","depth","=","seq_length",",","dtype","=","tf",".","float32",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","loss","=","-","tf",".","reduce_mean","(","tf",".","reduce_sum","(","one_hot_positions","*","log_probs",",","axis","=","-","1",")",")","return","loss","start_positions","=","features","[","\"start_positions\"","]","end_positions","=","features","[","\"end_positions\"","]","start_loss","=","compute_loss","(","start_logits",",","start_positions",")","end_loss","=","compute_loss","(","end_logits",",","end_positions",")","total_loss","=","(","start_loss","+","end_loss",")","\/","2.0","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","PREDICT",":","predictions","=","{","\"unique_ids\"",":","unique_ids",",","\"start_logits\"",":","start_logits",",","\"end_logits\"",":","end_logits",",","}","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","predictions",",","scaffold_fn","=","scaffold_fn",")","else",":","raise","ValueError","(","\"Only TRAIN and PREDICT modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_squad.py#L590-L684"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_squad.py","language":"python","identifier":"input_fn_builder","parameters":"(input_file, seq_length, is_training, drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(input_file, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","input_file",",","seq_length",",","is_training",",","drop_remainder",")",":","name_to_features","=","{","\"unique_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","}","if","is_training",":","name_to_features","[","\"start_positions\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")","name_to_features","[","\"end_positions\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","d","=","tf",".","data",".","TFRecordDataset","(","input_file",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_squad.py#L687-L734"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_squad.py","language":"python","identifier":"write_predictions","parameters":"(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file)","argument_list":"","return_statement":"","docstring":"Write final predictions to the json file and log-odds of null if needed.","docstring_summary":"Write final predictions to the json file and log-odds of null if needed.","docstring_tokens":["Write","final","predictions","to","the","json","file","and","log","-","odds","of","null","if","needed","."],"function":"def write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if FLAGS.version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n\n if FLAGS.version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # if we didn't inlude the empty option in the n-best, inlcude it\n if FLAGS.version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\", start_logit=null_start_logit,\n end_logit=null_end_logit))\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not FLAGS.version_2_with_negative:\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (\n best_non_null_entry.end_logit)\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > FLAGS.null_score_diff_threshold:\n all_predictions[example.qas_id] = \"\"\n else:\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n if FLAGS.version_2_with_negative:\n with tf.gfile.GFile(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")","function_tokens":["def","write_predictions","(","all_examples",",","all_features",",","all_results",",","n_best_size",",","max_answer_length",",","do_lower_case",",","output_prediction_file",",","output_nbest_file",",","output_null_log_odds_file",")",":","tf",".","logging",".","info","(","\"Writing predictions to: %s\"","%","(","output_prediction_file",")",")","tf",".","logging",".","info","(","\"Writing nbest to: %s\"","%","(","output_nbest_file",")",")","example_index_to_features","=","collections",".","defaultdict","(","list",")","for","feature","in","all_features",":","example_index_to_features","[","feature",".","example_index","]",".","append","(","feature",")","unique_id_to_result","=","{","}","for","result","in","all_results",":","unique_id_to_result","[","result",".","unique_id","]","=","result","_PrelimPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"PrelimPrediction\"",",","[","\"feature_index\"",",","\"start_index\"",",","\"end_index\"",",","\"start_logit\"",",","\"end_logit\"","]",")","all_predictions","=","collections",".","OrderedDict","(",")","all_nbest_json","=","collections",".","OrderedDict","(",")","scores_diff_json","=","collections",".","OrderedDict","(",")","for","(","example_index",",","example",")","in","enumerate","(","all_examples",")",":","features","=","example_index_to_features","[","example_index","]","prelim_predictions","=","[","]","# keep track of the minimum score of null start+end of position 0","score_null","=","1000000","# large and positive","min_null_feature_index","=","0","# the paragraph slice with min mull score","null_start_logit","=","0","# the start logit at the slice with min null score","null_end_logit","=","0","# the end logit at the slice with min null score","for","(","feature_index",",","feature",")","in","enumerate","(","features",")",":","result","=","unique_id_to_result","[","feature",".","unique_id","]","start_indexes","=","_get_best_indexes","(","result",".","start_logits",",","n_best_size",")","end_indexes","=","_get_best_indexes","(","result",".","end_logits",",","n_best_size",")","# if we could have irrelevant answers, get the min score of irrelevant","if","FLAGS",".","version_2_with_negative",":","feature_null_score","=","result",".","start_logits","[","0","]","+","result",".","end_logits","[","0","]","if","feature_null_score","<","score_null",":","score_null","=","feature_null_score","min_null_feature_index","=","feature_index","null_start_logit","=","result",".","start_logits","[","0","]","null_end_logit","=","result",".","end_logits","[","0","]","for","start_index","in","start_indexes",":","for","end_index","in","end_indexes",":","# We could hypothetically create invalid predictions, e.g., predict","# that the start of the span is in the question. We throw out all","# invalid predictions.","if","start_index",">=","len","(","feature",".","tokens",")",":","continue","if","end_index",">=","len","(","feature",".","tokens",")",":","continue","if","start_index","not","in","feature",".","token_to_orig_map",":","continue","if","end_index","not","in","feature",".","token_to_orig_map",":","continue","if","not","feature",".","token_is_max_context",".","get","(","start_index",",","False",")",":","continue","if","end_index","<","start_index",":","continue","length","=","end_index","-","start_index","+","1","if","length",">","max_answer_length",":","continue","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","feature_index",",","start_index","=","start_index",",","end_index","=","end_index",",","start_logit","=","result",".","start_logits","[","start_index","]",",","end_logit","=","result",".","end_logits","[","end_index","]",")",")","if","FLAGS",".","version_2_with_negative",":","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","min_null_feature_index",",","start_index","=","0",",","end_index","=","0",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","prelim_predictions","=","sorted","(","prelim_predictions",",","key","=","lambda","x",":","(","x",".","start_logit","+","x",".","end_logit",")",",","reverse","=","True",")","_NbestPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"NbestPrediction\"",",","[","\"text\"",",","\"start_logit\"",",","\"end_logit\"","]",")","seen_predictions","=","{","}","nbest","=","[","]","for","pred","in","prelim_predictions",":","if","len","(","nbest",")",">=","n_best_size",":","break","feature","=","features","[","pred",".","feature_index","]","if","pred",".","start_index",">","0",":","# this is a non-null prediction","tok_tokens","=","feature",".","tokens","[","pred",".","start_index",":","(","pred",".","end_index","+","1",")","]","orig_doc_start","=","feature",".","token_to_orig_map","[","pred",".","start_index","]","orig_doc_end","=","feature",".","token_to_orig_map","[","pred",".","end_index","]","orig_tokens","=","example",".","doc_tokens","[","orig_doc_start",":","(","orig_doc_end","+","1",")","]","tok_text","=","\" \"",".","join","(","tok_tokens",")","# De-tokenize WordPieces that have been split off.","tok_text","=","tok_text",".","replace","(","\" ##\"",",","\"\"",")","tok_text","=","tok_text",".","replace","(","\"##\"",",","\"\"",")","# Clean whitespace","tok_text","=","tok_text",".","strip","(",")","tok_text","=","\" \"",".","join","(","tok_text",".","split","(",")",")","orig_text","=","\" \"",".","join","(","orig_tokens",")","final_text","=","get_final_text","(","tok_text",",","orig_text",",","do_lower_case",")","if","final_text","in","seen_predictions",":","continue","seen_predictions","[","final_text","]","=","True","else",":","final_text","=","\"\"","seen_predictions","[","final_text","]","=","True","nbest",".","append","(","_NbestPrediction","(","text","=","final_text",",","start_logit","=","pred",".","start_logit",",","end_logit","=","pred",".","end_logit",")",")","# if we didn't inlude the empty option in the n-best, inlcude it","if","FLAGS",".","version_2_with_negative",":","if","\"\"","not","in","seen_predictions",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"\"",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","# In very rare edge cases we could have no valid predictions. So we","# just create a nonce prediction in this case to avoid failure.","if","not","nbest",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"empty\"",",","start_logit","=","0.0",",","end_logit","=","0.0",")",")","assert","len","(","nbest",")",">=","1","total_scores","=","[","]","best_non_null_entry","=","None","for","entry","in","nbest",":","total_scores",".","append","(","entry",".","start_logit","+","entry",".","end_logit",")","if","not","best_non_null_entry",":","if","entry",".","text",":","best_non_null_entry","=","entry","probs","=","_compute_softmax","(","total_scores",")","nbest_json","=","[","]","for","(","i",",","entry",")","in","enumerate","(","nbest",")",":","output","=","collections",".","OrderedDict","(",")","output","[","\"text\"","]","=","entry",".","text","output","[","\"probability\"","]","=","probs","[","i","]","output","[","\"start_logit\"","]","=","entry",".","start_logit","output","[","\"end_logit\"","]","=","entry",".","end_logit","nbest_json",".","append","(","output",")","assert","len","(","nbest_json",")",">=","1","if","not","FLAGS",".","version_2_with_negative",":","all_predictions","[","example",".","qas_id","]","=","nbest_json","[","0","]","[","\"text\"","]","else",":","# predict \"\" iff the null score - the score of best non-null > threshold","score_diff","=","score_null","-","best_non_null_entry",".","start_logit","-","(","best_non_null_entry",".","end_logit",")","scores_diff_json","[","example",".","qas_id","]","=","score_diff","if","score_diff",">","FLAGS",".","null_score_diff_threshold",":","all_predictions","[","example",".","qas_id","]","=","\"\"","else",":","all_predictions","[","example",".","qas_id","]","=","best_non_null_entry",".","text","all_nbest_json","[","example",".","qas_id","]","=","nbest_json","with","tf",".","gfile",".","GFile","(","output_prediction_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_predictions",",","indent","=","4",")","+","\"\\n\"",")","with","tf",".","gfile",".","GFile","(","output_nbest_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_nbest_json",",","indent","=","4",")","+","\"\\n\"",")","if","FLAGS",".","version_2_with_negative",":","with","tf",".","gfile",".","GFile","(","output_null_log_odds_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","scores_diff_json",",","indent","=","4",")","+","\"\\n\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_squad.py#L741-L924"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_squad.py","language":"python","identifier":"get_final_text","parameters":"(pred_text, orig_text, do_lower_case)","argument_list":"","return_statement":"return output_text","docstring":"Project the tokenized prediction back to the original text.","docstring_summary":"Project the tokenized prediction back to the original text.","docstring_tokens":["Project","the","tokenized","prediction","back","to","the","original","text","."],"function":"def get_final_text(pred_text, orig_text, do_lower_case):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping\/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if FLAGS.verbose_logging:\n tf.logging.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if FLAGS.verbose_logging:\n tf.logging.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text","function_tokens":["def","get_final_text","(","pred_text",",","orig_text",",","do_lower_case",")",":","# When we created the data, we kept track of the alignment between original","# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So","# now `orig_text` contains the span of our original text corresponding to the","# span that we predicted.","#","# However, `orig_text` may contain extra characters that we don't want in","# our prediction.","#","# For example, let's say:","# pred_text = steve smith","# orig_text = Steve Smith's","#","# We don't want to return `orig_text` because it contains the extra \"'s\".","#","# We don't want to return `pred_text` because it's already been normalized","# (the SQuAD eval script also does punctuation stripping\/lower casing but","# our tokenizer does additional normalization like stripping accent","# characters).","#","# What we really want to return is \"Steve Smith\".","#","# Therefore, we have to apply a semi-complicated alignment heruistic between","# `pred_text` and `orig_text` to get a character-to-charcter alignment. This","# can fail in certain cases in which case we just return `orig_text`.","def","_strip_spaces","(","text",")",":","ns_chars","=","[","]","ns_to_s_map","=","collections",".","OrderedDict","(",")","for","(","i",",","c",")","in","enumerate","(","text",")",":","if","c","==","\" \"",":","continue","ns_to_s_map","[","len","(","ns_chars",")","]","=","i","ns_chars",".","append","(","c",")","ns_text","=","\"\"",".","join","(","ns_chars",")","return","(","ns_text",",","ns_to_s_map",")","# We first tokenize `orig_text`, strip whitespace from the result","# and `pred_text`, and check if they are the same length. If they are","# NOT the same length, the heuristic has failed. If they are the same","# length, we assume the characters are one-to-one aligned.","tokenizer","=","tokenization",".","BasicTokenizer","(","do_lower_case","=","do_lower_case",")","tok_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_text",")",")","start_position","=","tok_text",".","find","(","pred_text",")","if","start_position","==","-","1",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Unable to find text: '%s' in '%s'\"","%","(","pred_text",",","orig_text",")",")","return","orig_text","end_position","=","start_position","+","len","(","pred_text",")","-","1","(","orig_ns_text",",","orig_ns_to_s_map",")","=","_strip_spaces","(","orig_text",")","(","tok_ns_text",",","tok_ns_to_s_map",")","=","_strip_spaces","(","tok_text",")","if","len","(","orig_ns_text",")","!=","len","(","tok_ns_text",")",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Length not equal after stripping spaces: '%s' vs '%s'\"",",","orig_ns_text",",","tok_ns_text",")","return","orig_text","# We then project the characters in `pred_text` back to `orig_text` using","# the character-to-character alignment.","tok_s_to_ns_map","=","{","}","for","(","i",",","tok_index",")","in","six",".","iteritems","(","tok_ns_to_s_map",")",":","tok_s_to_ns_map","[","tok_index","]","=","i","orig_start_position","=","None","if","start_position","in","tok_s_to_ns_map",":","ns_start_position","=","tok_s_to_ns_map","[","start_position","]","if","ns_start_position","in","orig_ns_to_s_map",":","orig_start_position","=","orig_ns_to_s_map","[","ns_start_position","]","if","orig_start_position","is","None",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Couldn't map start position\"",")","return","orig_text","orig_end_position","=","None","if","end_position","in","tok_s_to_ns_map",":","ns_end_position","=","tok_s_to_ns_map","[","end_position","]","if","ns_end_position","in","orig_ns_to_s_map",":","orig_end_position","=","orig_ns_to_s_map","[","ns_end_position","]","if","orig_end_position","is","None",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Couldn't map end position\"",")","return","orig_text","output_text","=","orig_text","[","orig_start_position",":","(","orig_end_position","+","1",")","]","return","output_text"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_squad.py#L927-L1020"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_squad.py","language":"python","identifier":"_get_best_indexes","parameters":"(logits, n_best_size)","argument_list":"","return_statement":"return best_indexes","docstring":"Get the n-best logits from a list.","docstring_summary":"Get the n-best logits from a list.","docstring_tokens":["Get","the","n","-","best","logits","from","a","list","."],"function":"def _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes","function_tokens":["def","_get_best_indexes","(","logits",",","n_best_size",")",":","index_and_score","=","sorted","(","enumerate","(","logits",")",",","key","=","lambda","x",":","x","[","1","]",",","reverse","=","True",")","best_indexes","=","[","]","for","i","in","range","(","len","(","index_and_score",")",")",":","if","i",">=","n_best_size",":","break","best_indexes",".","append","(","index_and_score","[","i","]","[","0","]",")","return","best_indexes"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_squad.py#L1023-L1032"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_squad.py","language":"python","identifier":"_compute_softmax","parameters":"(scores)","argument_list":"","return_statement":"return probs","docstring":"Compute softmax probability over raw logits.","docstring_summary":"Compute softmax probability over raw logits.","docstring_tokens":["Compute","softmax","probability","over","raw","logits","."],"function":"def _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score \/ total_sum)\n return probs","function_tokens":["def","_compute_softmax","(","scores",")",":","if","not","scores",":","return","[","]","max_score","=","None","for","score","in","scores",":","if","max_score","is","None","or","score",">","max_score",":","max_score","=","score","exp_scores","=","[","]","total_sum","=","0.0","for","score","in","scores",":","x","=","math",".","exp","(","score","-","max_score",")","exp_scores",".","append","(","x",")","total_sum","+=","x","probs","=","[","]","for","score","in","exp_scores",":","probs",".","append","(","score","\/","total_sum",")","return","probs"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_squad.py#L1035-L1055"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_squad.py","language":"python","identifier":"validate_flags_or_throw","parameters":"(bert_config)","argument_list":"","return_statement":"","docstring":"Validate the input FLAGS or throw an exception.","docstring_summary":"Validate the input FLAGS or throw an exception.","docstring_tokens":["Validate","the","input","FLAGS","or","throw","an","exception","."],"function":"def validate_flags_or_throw(bert_config):\n \"\"\"Validate the input FLAGS or throw an exception.\"\"\"\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_predict` must be True.\")\n\n if FLAGS.do_train:\n if not FLAGS.train_file:\n raise ValueError(\n \"If `do_train` is True, then `train_file` must be specified.\")\n if FLAGS.do_predict:\n if not FLAGS.predict_file:\n raise ValueError(\n \"If `do_predict` is True, then `predict_file` must be specified.\")\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:\n raise ValueError(\n \"The max_seq_length (%d) must be greater than max_query_length \"\n \"(%d) + 3\" % (FLAGS.max_seq_length, FLAGS.max_query_length))","function_tokens":["def","validate_flags_or_throw","(","bert_config",")",":","tokenization",".","validate_case_matches_checkpoint","(","FLAGS",".","do_lower_case",",","FLAGS",".","init_checkpoint",")","if","not","FLAGS",".","do_train","and","not","FLAGS",".","do_predict",":","raise","ValueError","(","\"At least one of `do_train` or `do_predict` must be True.\"",")","if","FLAGS",".","do_train",":","if","not","FLAGS",".","train_file",":","raise","ValueError","(","\"If `do_train` is True, then `train_file` must be specified.\"",")","if","FLAGS",".","do_predict",":","if","not","FLAGS",".","predict_file",":","raise","ValueError","(","\"If `do_predict` is True, then `predict_file` must be specified.\"",")","if","FLAGS",".","max_seq_length",">","bert_config",".","max_position_embeddings",":","raise","ValueError","(","\"Cannot use sequence length %d because the BERT model \"","\"was only trained up to sequence length %d\"","%","(","FLAGS",".","max_seq_length",",","bert_config",".","max_position_embeddings",")",")","if","FLAGS",".","max_seq_length","<=","FLAGS",".","max_query_length","+","3",":","raise","ValueError","(","\"The max_seq_length (%d) must be greater than max_query_length \"","\"(%d) + 3\"","%","(","FLAGS",".","max_seq_length",",","FLAGS",".","max_query_length",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_squad.py#L1097-L1123"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_squad.py","language":"python","identifier":"FeatureWriter.process_feature","parameters":"(self, feature)","argument_list":"","return_statement":"","docstring":"Write a InputFeature to the TFRecordWriter as a tf.train.Example.","docstring_summary":"Write a InputFeature to the TFRecordWriter as a tf.train.Example.","docstring_tokens":["Write","a","InputFeature","to","the","TFRecordWriter","as","a","tf",".","train",".","Example","."],"function":"def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())","function_tokens":["def","process_feature","(","self",",","feature",")",":","self",".","num_features","+=","1","def","create_int_feature","(","values",")",":","feature","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","feature","features","=","collections",".","OrderedDict","(",")","features","[","\"unique_ids\"","]","=","create_int_feature","(","[","feature",".","unique_id","]",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","if","self",".","is_training",":","features","[","\"start_positions\"","]","=","create_int_feature","(","[","feature",".","start_position","]",")","features","[","\"end_positions\"","]","=","create_int_feature","(","[","feature",".","end_position","]",")","impossible","=","0","if","feature",".","is_impossible",":","impossible","=","1","features","[","\"is_impossible\"","]","=","create_int_feature","(","[","impossible","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","self",".","_writer",".","write","(","tf_example",".","SerializeToString","(",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_squad.py#L1067-L1091"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"validate_case_matches_checkpoint","parameters":"(do_lower_case, init_checkpoint)","argument_list":"","return_statement":"","docstring":"Checks whether the casing config is consistent with the checkpoint name.","docstring_summary":"Checks whether the casing config is consistent with the checkpoint name.","docstring_tokens":["Checks","whether","the","casing","config","is","consistent","with","the","checkpoint","name","."],"function":"def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):\n \"\"\"Checks whether the casing config is consistent with the checkpoint name.\"\"\"\n\n # The casing has to be passed in by the user and there is no explicit check\n # as to whether it matches the checkpoint. The casing information probably\n # should have been stored in the bert_config.json file, but it's not, so\n # we have to heuristically detect it to validate.\n\n if not init_checkpoint:\n return\n\n m = re.match(\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\", init_checkpoint)\n if m is None:\n return\n\n model_name = m.group(1)\n\n lower_models = [\n \"uncased_L-24_H-1024_A-16\", \"uncased_L-12_H-768_A-12\",\n \"multilingual_L-12_H-768_A-12\", \"chinese_L-12_H-768_A-12\"\n ]\n\n cased_models = [\n \"cased_L-12_H-768_A-12\", \"cased_L-24_H-1024_A-16\",\n \"multi_cased_L-12_H-768_A-12\"\n ]\n\n is_bad_config = False\n if model_name in lower_models and not do_lower_case:\n is_bad_config = True\n actual_flag = \"False\"\n case_name = \"lowercased\"\n opposite_flag = \"True\"\n\n if model_name in cased_models and do_lower_case:\n is_bad_config = True\n actual_flag = \"True\"\n case_name = \"cased\"\n opposite_flag = \"False\"\n\n if is_bad_config:\n raise ValueError(\n \"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"\n \"However, `%s` seems to be a %s model, so you \"\n \"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"\n \"how the model was pre-training. If this error is wrong, please \"\n \"just comment out this check.\" % (actual_flag, init_checkpoint,\n model_name, case_name, opposite_flag))","function_tokens":["def","validate_case_matches_checkpoint","(","do_lower_case",",","init_checkpoint",")",":","# The casing has to be passed in by the user and there is no explicit check","# as to whether it matches the checkpoint. The casing information probably","# should have been stored in the bert_config.json file, but it's not, so","# we have to heuristically detect it to validate.","if","not","init_checkpoint",":","return","m","=","re",".","match","(","\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\"",",","init_checkpoint",")","if","m","is","None",":","return","model_name","=","m",".","group","(","1",")","lower_models","=","[","\"uncased_L-24_H-1024_A-16\"",",","\"uncased_L-12_H-768_A-12\"",",","\"multilingual_L-12_H-768_A-12\"",",","\"chinese_L-12_H-768_A-12\"","]","cased_models","=","[","\"cased_L-12_H-768_A-12\"",",","\"cased_L-24_H-1024_A-16\"",",","\"multi_cased_L-12_H-768_A-12\"","]","is_bad_config","=","False","if","model_name","in","lower_models","and","not","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"False\"","case_name","=","\"lowercased\"","opposite_flag","=","\"True\"","if","model_name","in","cased_models","and","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"True\"","case_name","=","\"cased\"","opposite_flag","=","\"False\"","if","is_bad_config",":","raise","ValueError","(","\"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"","\"However, `%s` seems to be a %s model, so you \"","\"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"","\"how the model was pre-training. If this error is wrong, please \"","\"just comment out this check.\"","%","(","actual_flag",",","init_checkpoint",",","model_name",",","case_name",",","opposite_flag",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L28-L75"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"convert_to_unicode","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_summary":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_tokens":["Converts","text","to","Unicode","(","if","it","s","not","already",")","assuming","utf","-","8","input","."],"function":"def convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","convert_to_unicode","(","text",")",":","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","elif","isinstance","(","text",",","unicode",")",":","return","text","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L78-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"printable_text","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_summary":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_tokens":["Returns","text","encoded","in","a","way","suitable","for","print","or","tf",".","logging","."],"function":"def printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","printable_text","(","text",")",":","# These functions want `str` for both Python2 and Python3, but in one case","# it's a Unicode string and in the other it's a byte string.","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","unicode",")",":","return","text",".","encode","(","\"utf-8\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L98-L118"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"load_vocab","parameters":"(vocab_file)","argument_list":"","return_statement":"return vocab","docstring":"Loads a vocabulary file into a dictionary.","docstring_summary":"Loads a vocabulary file into a dictionary.","docstring_tokens":["Loads","a","vocabulary","file","into","a","dictionary","."],"function":"def load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab","function_tokens":["def","load_vocab","(","vocab_file",")",":","vocab","=","collections",".","OrderedDict","(",")","index","=","0","with","tf",".","gfile",".","GFile","(","vocab_file",",","\"r\"",")","as","reader",":","while","True",":","token","=","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","token",":","break","token","=","token",".","strip","(",")","vocab","[","token","]","=","index","index","+=","1","return","vocab"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L121-L133"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"convert_by_vocab","parameters":"(vocab, items)","argument_list":"","return_statement":"return output","docstring":"Converts a sequence of [tokens|ids] using the vocab.","docstring_summary":"Converts a sequence of [tokens|ids] using the vocab.","docstring_tokens":["Converts","a","sequence","of","[","tokens|ids","]","using","the","vocab","."],"function":"def convert_by_vocab(vocab, items):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n output = []\n for item in items:\n output.append(vocab[item])\n return output","function_tokens":["def","convert_by_vocab","(","vocab",",","items",")",":","output","=","[","]","for","item","in","items",":","output",".","append","(","vocab","[","item","]",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L136-L141"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"whitespace_tokenize","parameters":"(text)","argument_list":"","return_statement":"return tokens","docstring":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_summary":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_tokens":["Runs","basic","whitespace","cleaning","and","splitting","on","a","piece","of","text","."],"function":"def whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens","function_tokens":["def","whitespace_tokenize","(","text",")",":","text","=","text",".","strip","(",")","if","not","text",":","return","[","]","tokens","=","text",".","split","(",")","return","tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L152-L158"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"_is_whitespace","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a whitespace character.","docstring_summary":"Checks whether `chars` is a whitespace character.","docstring_tokens":["Checks","whether","chars","is","a","whitespace","character","."],"function":"def _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False","function_tokens":["def","_is_whitespace","(","char",")",":","# \\t, \\n, and \\r are technically contorl characters but we treat them","# as whitespace since they are generally considered as such.","if","char","==","\" \"","or","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Zs\"",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L362-L371"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"_is_control","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a control character.","docstring_summary":"Checks whether `chars` is a control character.","docstring_tokens":["Checks","whether","chars","is","a","control","character","."],"function":"def _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False","function_tokens":["def","_is_control","(","char",")",":","# These are technically control characters but we count them as whitespace","# characters.","if","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","False","cat","=","unicodedata",".","category","(","char",")","if","cat","in","(","\"Cc\"",",","\"Cf\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L374-L383"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"_is_punctuation","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a punctuation character.","docstring_summary":"Checks whether `chars` is a punctuation character.","docstring_tokens":["Checks","whether","chars","is","a","punctuation","character","."],"function":"def _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter\/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False","function_tokens":["def","_is_punctuation","(","char",")",":","cp","=","ord","(","char",")","# We treat all non-letter\/number ASCII as punctuation.","# Characters such as \"^\", \"$\", and \"`\" are not in the Unicode","# Punctuation class but we treat them as punctuation anyways, for","# consistency.","if","(","(","cp",">=","33","and","cp","<=","47",")","or","(","cp",">=","58","and","cp","<=","64",")","or","(","cp",">=","91","and","cp","<=","96",")","or","(","cp",">=","123","and","cp","<=","126",")",")",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat",".","startswith","(","\"P\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L386-L399"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer.__init__","parameters":"(self, do_lower_case=True)","argument_list":"","return_statement":"","docstring":"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.","docstring_summary":"Constructs a BasicTokenizer.","docstring_tokens":["Constructs","a","BasicTokenizer","."],"function":"def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case","function_tokens":["def","__init__","(","self",",","do_lower_case","=","True",")",":","self",".","do_lower_case","=","do_lower_case"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L188-L194"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text.","docstring_summary":"Tokenizes a piece of text.","docstring_tokens":["Tokenizes","a","piece","of","text","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","text","=","self",".","_clean_text","(","text",")","# This was added on November 1st, 2018 for the multilingual and Chinese","# models. This is also applied to the English models now, but it doesn't","# matter since the English models were not trained on any Chinese data","# and generally don't have any Chinese data in them (there are Chinese","# characters in the vocabulary because Wikipedia does have some Chinese","# words in the English Wikipedia.).","text","=","self",".","_tokenize_chinese_chars","(","text",")","orig_tokens","=","whitespace_tokenize","(","text",")","split_tokens","=","[","]","for","token","in","orig_tokens",":","if","self",".","do_lower_case",":","token","=","token",".","lower","(",")","token","=","self",".","_run_strip_accents","(","token",")","split_tokens",".","extend","(","self",".","_run_split_on_punc","(","token",")",")","output_tokens","=","whitespace_tokenize","(","\" \"",".","join","(","split_tokens",")",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L196-L218"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_strip_accents","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Strips accents from a piece of text.","docstring_summary":"Strips accents from a piece of text.","docstring_tokens":["Strips","accents","from","a","piece","of","text","."],"function":"def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_run_strip_accents","(","self",",","text",")",":","text","=","unicodedata",".","normalize","(","\"NFD\"",",","text",")","output","=","[","]","for","char","in","text",":","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Mn\"",":","continue","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L220-L229"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_split_on_punc","parameters":"(self, text)","argument_list":"","return_statement":"return [\"\".join(x) for x in output]","docstring":"Splits punctuation on a piece of text.","docstring_summary":"Splits punctuation on a piece of text.","docstring_tokens":["Splits","punctuation","on","a","piece","of","text","."],"function":"def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]","function_tokens":["def","_run_split_on_punc","(","self",",","text",")",":","chars","=","list","(","text",")","i","=","0","start_new_word","=","True","output","=","[","]","while","i","<","len","(","chars",")",":","char","=","chars","[","i","]","if","_is_punctuation","(","char",")",":","output",".","append","(","[","char","]",")","start_new_word","=","True","else",":","if","start_new_word",":","output",".","append","(","[","]",")","start_new_word","=","False","output","[","-","1","]",".","append","(","char",")","i","+=","1","return","[","\"\"",".","join","(","x",")","for","x","in","output","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L231-L249"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._tokenize_chinese_chars","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Adds whitespace around any CJK character.","docstring_summary":"Adds whitespace around any CJK character.","docstring_tokens":["Adds","whitespace","around","any","CJK","character","."],"function":"def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_tokenize_chinese_chars","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","self",".","_is_chinese_char","(","cp",")",":","output",".","append","(","\" \"",")","output",".","append","(","char",")","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L251-L262"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._is_chinese_char","parameters":"(self, cp)","argument_list":"","return_statement":"return False","docstring":"Checks whether CP is the codepoint of a CJK character.","docstring_summary":"Checks whether CP is the codepoint of a CJK character.","docstring_tokens":["Checks","whether","CP","is","the","codepoint","of","a","CJK","character","."],"function":"def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False","function_tokens":["def","_is_chinese_char","(","self",",","cp",")",":","# This defines a \"chinese character\" as anything in the CJK Unicode block:","# https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)","#","# Note that the CJK Unicode block is NOT all Japanese and Korean characters,","# despite its name. The modern Korean Hangul alphabet is a different block,","# as is Japanese Hiragana and Katakana. Those alphabets are used to write","# space-separated words, so they are not treated specially and handled","# like the all of the other languages.","if","(","(","cp",">=","0x4E00","and","cp","<=","0x9FFF",")","or","#","(","cp",">=","0x3400","and","cp","<=","0x4DBF",")","or","#","(","cp",">=","0x20000","and","cp","<=","0x2A6DF",")","or","#","(","cp",">=","0x2A700","and","cp","<=","0x2B73F",")","or","#","(","cp",">=","0x2B740","and","cp","<=","0x2B81F",")","or","#","(","cp",">=","0x2B820","and","cp","<=","0x2CEAF",")","or","(","cp",">=","0xF900","and","cp","<=","0xFAFF",")","or","#","(","cp",">=","0x2F800","and","cp","<=","0x2FA1F",")",")",":","#","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L264-L284"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._clean_text","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Performs invalid character removal and whitespace cleanup on text.","docstring_summary":"Performs invalid character removal and whitespace cleanup on text.","docstring_tokens":["Performs","invalid","character","removal","and","whitespace","cleanup","on","text","."],"function":"def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_clean_text","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","cp","==","0","or","cp","==","0xfffd","or","_is_control","(","char",")",":","continue","if","_is_whitespace","(","char",")",":","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L286-L297"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tokenization.py","language":"python","identifier":"WordpieceTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.","docstring_summary":"Tokenizes a piece of text into its word pieces.","docstring_tokens":["Tokenizes","a","piece","of","text","into","its","word","pieces","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","output_tokens","=","[","]","for","token","in","whitespace_tokenize","(","text",")",":","chars","=","list","(","token",")","if","len","(","chars",")",">","self",".","max_input_chars_per_word",":","output_tokens",".","append","(","self",".","unk_token",")","continue","is_bad","=","False","start","=","0","sub_tokens","=","[","]","while","start","<","len","(","chars",")",":","end","=","len","(","chars",")","cur_substr","=","None","while","start","<","end",":","substr","=","\"\"",".","join","(","chars","[","start",":","end","]",")","if","start",">","0",":","substr","=","\"##\"","+","substr","if","substr","in","self",".","vocab",":","cur_substr","=","substr","break","end","-=","1","if","cur_substr","is","None",":","is_bad","=","True","break","sub_tokens",".","append","(","cur_substr",")","start","=","end","if","is_bad",":","output_tokens",".","append","(","self",".","unk_token",")","else",":","output_tokens",".","extend","(","sub_tokens",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tokenization.py#L308-L359"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tf_metrics.py","language":"python","identifier":"precision","parameters":"(labels, predictions, num_classes, pos_indices=None,\n weights=None, average='micro')","argument_list":"","return_statement":"return (pr, op)","docstring":"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","precision","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def precision(labels, predictions, num_classes, pos_indices=None,\n weights=None, average='micro'):\n \"\"\"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n pr, _, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n op, _, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (pr, op)","function_tokens":["def","precision","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","pr",",","_",",","_","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",")","op",",","_",",","_","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",")","return","(","pr",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tf_metrics.py#L15-L50"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tf_metrics.py","language":"python","identifier":"recall","parameters":"(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro')","argument_list":"","return_statement":"return (re, op)","docstring":"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","recall","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def recall(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro'):\n \"\"\"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, re, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n _, op, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (re, op)","function_tokens":["def","recall","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","_",",","re",",","_","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",")","_",",","op",",","_","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",")","return","(","re",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tf_metrics.py#L53-L88"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tf_metrics.py","language":"python","identifier":"fbeta","parameters":"(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro', beta=1)","argument_list":"","return_statement":"return (fbeta, op)","docstring":"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","fbeta","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","beta",":","int","optional","Weight","of","precision","in","harmonic","mean","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def fbeta(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro', beta=1):\n \"\"\"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, _, fbeta = metrics_from_confusion_matrix(\n cm, pos_indices, average=average, beta=beta)\n _, _, op = metrics_from_confusion_matrix(\n op, pos_indices, average=average, beta=beta)\n return (fbeta, op)","function_tokens":["def","fbeta","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",",","beta","=","1",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","_",",","_",",","fbeta","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",",","beta","=","beta",")","_",",","_",",","op","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",",","beta","=","beta",")","return","(","fbeta",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tf_metrics.py#L97-L134"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tf_metrics.py","language":"python","identifier":"safe_div","parameters":"(numerator, denominator)","argument_list":"","return_statement":"return tf.where(denominator_is_zero, zeros, numerator \/ denominator)","docstring":"Safe division, return 0 if denominator is 0","docstring_summary":"Safe division, return 0 if denominator is 0","docstring_tokens":["Safe","division","return","0","if","denominator","is","0"],"function":"def safe_div(numerator, denominator):\n \"\"\"Safe division, return 0 if denominator is 0\"\"\"\n numerator, denominator = tf.to_float(numerator), tf.to_float(denominator)\n zeros = tf.zeros_like(numerator, dtype=numerator.dtype)\n denominator_is_zero = tf.equal(denominator, zeros)\n return tf.where(denominator_is_zero, zeros, numerator \/ denominator)","function_tokens":["def","safe_div","(","numerator",",","denominator",")",":","numerator",",","denominator","=","tf",".","to_float","(","numerator",")",",","tf",".","to_float","(","denominator",")","zeros","=","tf",".","zeros_like","(","numerator",",","dtype","=","numerator",".","dtype",")","denominator_is_zero","=","tf",".","equal","(","denominator",",","zeros",")","return","tf",".","where","(","denominator_is_zero",",","zeros",",","numerator","\/","denominator",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tf_metrics.py#L137-L142"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tf_metrics.py","language":"python","identifier":"pr_re_fbeta","parameters":"(cm, pos_indices, beta=1)","argument_list":"","return_statement":"return pr, re, fbeta","docstring":"Uses a confusion matrix to compute precision, recall and fbeta","docstring_summary":"Uses a confusion matrix to compute precision, recall and fbeta","docstring_tokens":["Uses","a","confusion","matrix","to","compute","precision","recall","and","fbeta"],"function":"def pr_re_fbeta(cm, pos_indices, beta=1):\n \"\"\"Uses a confusion matrix to compute precision, recall and fbeta\"\"\"\n num_classes = cm.shape[0]\n neg_indices = [i for i in range(num_classes) if i not in pos_indices]\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, neg_indices] = 0\n diag_sum = tf.reduce_sum(tf.diag_part(cm * cm_mask))\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[:, neg_indices] = 0\n tot_pred = tf.reduce_sum(cm * cm_mask)\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, :] = 0\n tot_gold = tf.reduce_sum(cm * cm_mask)\n\n pr = safe_div(diag_sum, tot_pred)\n re = safe_div(diag_sum, tot_gold)\n fbeta = safe_div((1. + beta**2) * pr * re, beta**2 * pr + re)\n\n return pr, re, fbeta","function_tokens":["def","pr_re_fbeta","(","cm",",","pos_indices",",","beta","=","1",")",":","num_classes","=","cm",".","shape","[","0","]","neg_indices","=","[","i","for","i","in","range","(","num_classes",")","if","i","not","in","pos_indices","]","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[","neg_indices",",","neg_indices","]","=","0","diag_sum","=","tf",".","reduce_sum","(","tf",".","diag_part","(","cm","*","cm_mask",")",")","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[",":",",","neg_indices","]","=","0","tot_pred","=","tf",".","reduce_sum","(","cm","*","cm_mask",")","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[","neg_indices",",",":","]","=","0","tot_gold","=","tf",".","reduce_sum","(","cm","*","cm_mask",")","pr","=","safe_div","(","diag_sum",",","tot_pred",")","re","=","safe_div","(","diag_sum",",","tot_gold",")","fbeta","=","safe_div","(","(","1.","+","beta","**","2",")","*","pr","*","re",",","beta","**","2","*","pr","+","re",")","return","pr",",","re",",","fbeta"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tf_metrics.py#L145-L165"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/tf_metrics.py","language":"python","identifier":"metrics_from_confusion_matrix","parameters":"(cm, pos_indices=None, average='micro',\n beta=1)","argument_list":"","return_statement":"","docstring":"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'","docstring_summary":"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'","docstring_tokens":["Precision","Recall","and","F1","from","the","confusion","matrix","Parameters","----------","cm",":","tf",".","Tensor","of","type","tf",".","int32","of","shape","(","num_classes","num_classes",")","The","streaming","confusion","matrix",".","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","beta",":","int","optional","Weight","of","precision","in","harmonic","mean","average",":","str","optional","micro","macro","or","weighted"],"function":"def metrics_from_confusion_matrix(cm, pos_indices=None, average='micro',\n beta=1):\n \"\"\"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'\n \"\"\"\n num_classes = cm.shape[0]\n if pos_indices is None:\n pos_indices = [i for i in range(num_classes)]\n\n if average == 'micro':\n return pr_re_fbeta(cm, pos_indices, beta)\n elif average in {'macro', 'weighted'}:\n precisions, recalls, fbetas, n_golds = [], [], [], []\n for idx in pos_indices:\n pr, re, fbeta = pr_re_fbeta(cm, [idx], beta)\n precisions.append(pr)\n recalls.append(re)\n fbetas.append(fbeta)\n cm_mask = np.zeros([num_classes, num_classes])\n cm_mask[idx, :] = 1\n n_golds.append(tf.to_float(tf.reduce_sum(cm * cm_mask)))\n\n if average == 'macro':\n pr = tf.reduce_mean(precisions)\n re = tf.reduce_mean(recalls)\n fbeta = tf.reduce_mean(fbetas)\n return pr, re, fbeta\n if average == 'weighted':\n n_gold = tf.reduce_sum(n_golds)\n pr_sum = sum(p * n for p, n in zip(precisions, n_golds))\n pr = safe_div(pr_sum, n_gold)\n re_sum = sum(r * n for r, n in zip(recalls, n_golds))\n re = safe_div(re_sum, n_gold)\n fbeta_sum = sum(f * n for f, n in zip(fbetas, n_golds))\n fbeta = safe_div(fbeta_sum, n_gold)\n return pr, re, fbeta\n\n else:\n raise NotImplementedError()","function_tokens":["def","metrics_from_confusion_matrix","(","cm",",","pos_indices","=","None",",","average","=","'micro'",",","beta","=","1",")",":","num_classes","=","cm",".","shape","[","0","]","if","pos_indices","is","None",":","pos_indices","=","[","i","for","i","in","range","(","num_classes",")","]","if","average","==","'micro'",":","return","pr_re_fbeta","(","cm",",","pos_indices",",","beta",")","elif","average","in","{","'macro'",",","'weighted'","}",":","precisions",",","recalls",",","fbetas",",","n_golds","=","[","]",",","[","]",",","[","]",",","[","]","for","idx","in","pos_indices",":","pr",",","re",",","fbeta","=","pr_re_fbeta","(","cm",",","[","idx","]",",","beta",")","precisions",".","append","(","pr",")","recalls",".","append","(","re",")","fbetas",".","append","(","fbeta",")","cm_mask","=","np",".","zeros","(","[","num_classes",",","num_classes","]",")","cm_mask","[","idx",",",":","]","=","1","n_golds",".","append","(","tf",".","to_float","(","tf",".","reduce_sum","(","cm","*","cm_mask",")",")",")","if","average","==","'macro'",":","pr","=","tf",".","reduce_mean","(","precisions",")","re","=","tf",".","reduce_mean","(","recalls",")","fbeta","=","tf",".","reduce_mean","(","fbetas",")","return","pr",",","re",",","fbeta","if","average","==","'weighted'",":","n_gold","=","tf",".","reduce_sum","(","n_golds",")","pr_sum","=","sum","(","p","*","n","for","p",",","n","in","zip","(","precisions",",","n_golds",")",")","pr","=","safe_div","(","pr_sum",",","n_gold",")","re_sum","=","sum","(","r","*","n","for","r",",","n","in","zip","(","recalls",",","n_golds",")",")","re","=","safe_div","(","re_sum",",","n_gold",")","fbeta_sum","=","sum","(","f","*","n","for","f",",","n","in","zip","(","fbetas",",","n_golds",")",")","fbeta","=","safe_div","(","fbeta_sum",",","n_gold",")","return","pr",",","re",",","fbeta","else",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/tf_metrics.py#L168-L215"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"gelu","parameters":"(x)","argument_list":"","return_statement":"return x * cdf","docstring":"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.","docstring_summary":"Gaussian Error Linear Unit.","docstring_tokens":["Gaussian","Error","Linear","Unit","."],"function":"def gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.tanh(\n (np.sqrt(2 \/ np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf","function_tokens":["def","gelu","(","x",")",":","cdf","=","0.5","*","(","1.0","+","tf",".","tanh","(","(","np",".","sqrt","(","2","\/","np",".","pi",")","*","(","x","+","0.044715","*","tf",".","pow","(","x",",","3",")",")",")",")",")","return","x","*","cdf"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L264-L277"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"get_activation","parameters":"(activation_string)","argument_list":"","return_statement":"","docstring":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.","docstring_summary":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.","docstring_tokens":["Maps","a","string","to","a","Python","function","e",".","g",".","relu","=",">","tf",".","nn",".","relu","."],"function":"def get_activation(activation_string):\n \"\"\"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.\n \"\"\"\n\n # We assume that anything that\"s not a string is already an activation\n # function, so we just return it.\n if not isinstance(activation_string, six.string_types):\n return activation_string\n\n if not activation_string:\n return None\n\n act = activation_string.lower()\n if act == \"linear\":\n return None\n elif act == \"relu\":\n return tf.nn.relu\n elif act == \"gelu\":\n return gelu\n elif act == \"tanh\":\n return tf.tanh\n else:\n raise ValueError(\"Unsupported activation: %s\" % act)","function_tokens":["def","get_activation","(","activation_string",")",":","# We assume that anything that\"s not a string is already an activation","# function, so we just return it.","if","not","isinstance","(","activation_string",",","six",".","string_types",")",":","return","activation_string","if","not","activation_string",":","return","None","act","=","activation_string",".","lower","(",")","if","act","==","\"linear\"",":","return","None","elif","act","==","\"relu\"",":","return","tf",".","nn",".","relu","elif","act","==","\"gelu\"",":","return","gelu","elif","act","==","\"tanh\"",":","return","tf",".","tanh","else",":","raise","ValueError","(","\"Unsupported activation: %s\"","%","act",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L280-L314"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"get_assignment_map_from_checkpoint","parameters":"(tvars, init_checkpoint)","argument_list":"","return_statement":"return (assignment_map, initialized_variable_names)","docstring":"Compute the union of the current variables and checkpoint variables.","docstring_summary":"Compute the union of the current variables and checkpoint variables.","docstring_tokens":["Compute","the","union","of","the","current","variables","and","checkpoint","variables","."],"function":"def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)","function_tokens":["def","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")",":","assignment_map","=","{","}","initialized_variable_names","=","{","}","name_to_variable","=","collections",".","OrderedDict","(",")","for","var","in","tvars",":","name","=","var",".","name","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","name",")","if","m","is","not","None",":","name","=","m",".","group","(","1",")","name_to_variable","[","name","]","=","var","init_vars","=","tf",".","train",".","list_variables","(","init_checkpoint",")","assignment_map","=","collections",".","OrderedDict","(",")","for","x","in","init_vars",":","(","name",",","var",")","=","(","x","[","0","]",",","x","[","1","]",")","if","name","not","in","name_to_variable",":","continue","assignment_map","[","name","]","=","name","initialized_variable_names","[","name","]","=","1","initialized_variable_names","[","name","+","\":0\"","]","=","1","return","(","assignment_map",",","initialized_variable_names",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L317-L341"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"dropout","parameters":"(input_tensor, dropout_prob)","argument_list":"","return_statement":"return output","docstring":"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.","docstring_summary":"Perform dropout.","docstring_tokens":["Perform","dropout","."],"function":"def dropout(input_tensor, dropout_prob):\n \"\"\"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.\n \"\"\"\n if dropout_prob is None or dropout_prob == 0.0:\n return input_tensor\n\n output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)\n return output","function_tokens":["def","dropout","(","input_tensor",",","dropout_prob",")",":","if","dropout_prob","is","None","or","dropout_prob","==","0.0",":","return","input_tensor","output","=","tf",".","nn",".","dropout","(","input_tensor",",","1.0","-","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L344-L359"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"layer_norm","parameters":"(input_tensor, name=None)","argument_list":"","return_statement":"return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","docstring":"Run layer normalization on the last dimension of the tensor.","docstring_summary":"Run layer normalization on the last dimension of the tensor.","docstring_tokens":["Run","layer","normalization","on","the","last","dimension","of","the","tensor","."],"function":"def layer_norm(input_tensor, name=None):\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\n return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","function_tokens":["def","layer_norm","(","input_tensor",",","name","=","None",")",":","return","tf",".","contrib",".","layers",".","layer_norm","(","inputs","=","input_tensor",",","begin_norm_axis","=","-","1",",","begin_params_axis","=","-","1",",","scope","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L362-L365"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"layer_norm_and_dropout","parameters":"(input_tensor, dropout_prob, name=None)","argument_list":"","return_statement":"return output_tensor","docstring":"Runs layer normalization followed by dropout.","docstring_summary":"Runs layer normalization followed by dropout.","docstring_tokens":["Runs","layer","normalization","followed","by","dropout","."],"function":"def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):\n \"\"\"Runs layer normalization followed by dropout.\"\"\"\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob)\n return output_tensor","function_tokens":["def","layer_norm_and_dropout","(","input_tensor",",","dropout_prob",",","name","=","None",")",":","output_tensor","=","layer_norm","(","input_tensor",",","name",")","output_tensor","=","dropout","(","output_tensor",",","dropout_prob",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L368-L372"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"create_initializer","parameters":"(initializer_range=0.02)","argument_list":"","return_statement":"return tf.truncated_normal_initializer(stddev=initializer_range)","docstring":"Creates a `truncated_normal_initializer` with the given range.","docstring_summary":"Creates a `truncated_normal_initializer` with the given range.","docstring_tokens":["Creates","a","truncated_normal_initializer","with","the","given","range","."],"function":"def create_initializer(initializer_range=0.02):\n \"\"\"Creates a `truncated_normal_initializer` with the given range.\"\"\"\n return tf.truncated_normal_initializer(stddev=initializer_range)","function_tokens":["def","create_initializer","(","initializer_range","=","0.02",")",":","return","tf",".","truncated_normal_initializer","(","stddev","=","initializer_range",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L375-L377"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"embedding_lookup","parameters":"(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False)","argument_list":"","return_statement":"return (output, embedding_table)","docstring":"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].","docstring_summary":"Looks up words embeddings for id tensor.","docstring_tokens":["Looks","up","words","embeddings","for","id","tensor","."],"function":"def embedding_lookup(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False):\n \"\"\"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].\n \"\"\"\n # This function assumes that the input is of shape [batch_size, seq_length,\n # num_inputs].\n #\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\n # reshape to [batch_size, seq_length, 1].\n if input_ids.shape.ndims == 2:\n input_ids = tf.expand_dims(input_ids, axis=[-1])\n\n embedding_table = tf.get_variable(\n name=word_embedding_name,\n shape=[vocab_size, embedding_size],\n initializer=create_initializer(initializer_range))\n\n flat_input_ids = tf.reshape(input_ids, [-1])\n if use_one_hot_embeddings:\n one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)\n output = tf.matmul(one_hot_input_ids, embedding_table)\n else:\n output = tf.gather(embedding_table, flat_input_ids)\n\n input_shape = get_shape_list(input_ids)\n\n output = tf.reshape(output,\n input_shape[0:-1] + [input_shape[-1] * embedding_size])\n return (output, embedding_table)","function_tokens":["def","embedding_lookup","(","input_ids",",","vocab_size",",","embedding_size","=","128",",","initializer_range","=","0.02",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","False",")",":","# This function assumes that the input is of shape [batch_size, seq_length,","# num_inputs].","#","# If the input is a 2D tensor of shape [batch_size, seq_length], we","# reshape to [batch_size, seq_length, 1].","if","input_ids",".","shape",".","ndims","==","2",":","input_ids","=","tf",".","expand_dims","(","input_ids",",","axis","=","[","-","1","]",")","embedding_table","=","tf",".","get_variable","(","name","=","word_embedding_name",",","shape","=","[","vocab_size",",","embedding_size","]",",","initializer","=","create_initializer","(","initializer_range",")",")","flat_input_ids","=","tf",".","reshape","(","input_ids",",","[","-","1","]",")","if","use_one_hot_embeddings",":","one_hot_input_ids","=","tf",".","one_hot","(","flat_input_ids",",","depth","=","vocab_size",")","output","=","tf",".","matmul","(","one_hot_input_ids",",","embedding_table",")","else",":","output","=","tf",".","gather","(","embedding_table",",","flat_input_ids",")","input_shape","=","get_shape_list","(","input_ids",")","output","=","tf",".","reshape","(","output",",","input_shape","[","0",":","-","1","]","+","[","input_shape","[","-","1","]","*","embedding_size","]",")","return","(","output",",","embedding_table",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L380-L425"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"embedding_postprocessor","parameters":"(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1)","argument_list":"","return_statement":"return output","docstring":"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.","docstring_summary":"Performs various post-processing on a word embedding tensor.","docstring_tokens":["Performs","various","post","-","processing","on","a","word","embedding","tensor","."],"function":"def embedding_postprocessor(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1):\n \"\"\"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.\n \"\"\"\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = input_tensor\n\n if use_token_type:\n if token_type_ids is None:\n raise ValueError(\"`token_type_ids` must be specified if\"\n \"`use_token_type` is True.\")\n token_type_table = tf.get_variable(\n name=token_type_embedding_name,\n shape=[token_type_vocab_size, width],\n initializer=create_initializer(initializer_range))\n # This vocab will be small so we always do one-hot here, since it is always\n # faster for a small vocabulary.\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)\n token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if use_position_embeddings:\n assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)\n with tf.control_dependencies([assert_op]):\n full_position_embeddings = tf.get_variable(\n name=position_embedding_name,\n shape=[max_position_embeddings, width],\n initializer=create_initializer(initializer_range))\n # Since the position embedding table is a learned variable, we create it\n # using a (long) sequence length `max_position_embeddings`. The actual\n # sequence length might be shorter than this, for faster training of\n # tasks that do not have long sequences.\n #\n # So `full_position_embeddings` is effectively an embedding table\n # for position [0, 1, 2, ..., max_position_embeddings-1], and the current\n # sequence has positions [0, 1, 2, ... seq_length-1], so we can just\n # perform a slice.\n position_embeddings = tf.slice(full_position_embeddings, [0, 0],\n [seq_length, -1])\n num_dims = len(output.shape.as_list())\n\n # Only the last two dimensions are relevant (`seq_length` and `width`), so\n # we broadcast among the first dimensions, which is typically just\n # the batch size.\n position_broadcast_shape = []\n for _ in range(num_dims - 2):\n position_broadcast_shape.append(1)\n position_broadcast_shape.extend([seq_length, width])\n position_embeddings = tf.reshape(position_embeddings,\n position_broadcast_shape)\n output += position_embeddings\n\n output = layer_norm_and_dropout(output, dropout_prob)\n return output","function_tokens":["def","embedding_postprocessor","(","input_tensor",",","use_token_type","=","False",",","token_type_ids","=","None",",","token_type_vocab_size","=","16",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","0.02",",","max_position_embeddings","=","512",",","dropout_prob","=","0.1",")",":","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","width","=","input_shape","[","2","]","output","=","input_tensor","if","use_token_type",":","if","token_type_ids","is","None",":","raise","ValueError","(","\"`token_type_ids` must be specified if\"","\"`use_token_type` is True.\"",")","token_type_table","=","tf",".","get_variable","(","name","=","token_type_embedding_name",",","shape","=","[","token_type_vocab_size",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# This vocab will be small so we always do one-hot here, since it is always","# faster for a small vocabulary.","flat_token_type_ids","=","tf",".","reshape","(","token_type_ids",",","[","-","1","]",")","one_hot_ids","=","tf",".","one_hot","(","flat_token_type_ids",",","depth","=","token_type_vocab_size",")","token_type_embeddings","=","tf",".","matmul","(","one_hot_ids",",","token_type_table",")","token_type_embeddings","=","tf",".","reshape","(","token_type_embeddings",",","[","batch_size",",","seq_length",",","width","]",")","output","+=","token_type_embeddings","if","use_position_embeddings",":","assert_op","=","tf",".","assert_less_equal","(","seq_length",",","max_position_embeddings",")","with","tf",".","control_dependencies","(","[","assert_op","]",")",":","full_position_embeddings","=","tf",".","get_variable","(","name","=","position_embedding_name",",","shape","=","[","max_position_embeddings",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# Since the position embedding table is a learned variable, we create it","# using a (long) sequence length `max_position_embeddings`. The actual","# sequence length might be shorter than this, for faster training of","# tasks that do not have long sequences.","#","# So `full_position_embeddings` is effectively an embedding table","# for position [0, 1, 2, ..., max_position_embeddings-1], and the current","# sequence has positions [0, 1, 2, ... seq_length-1], so we can just","# perform a slice.","position_embeddings","=","tf",".","slice","(","full_position_embeddings",",","[","0",",","0","]",",","[","seq_length",",","-","1","]",")","num_dims","=","len","(","output",".","shape",".","as_list","(",")",")","# Only the last two dimensions are relevant (`seq_length` and `width`), so","# we broadcast among the first dimensions, which is typically just","# the batch size.","position_broadcast_shape","=","[","]","for","_","in","range","(","num_dims","-","2",")",":","position_broadcast_shape",".","append","(","1",")","position_broadcast_shape",".","extend","(","[","seq_length",",","width","]",")","position_embeddings","=","tf",".","reshape","(","position_embeddings",",","position_broadcast_shape",")","output","+=","position_embeddings","output","=","layer_norm_and_dropout","(","output",",","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L428-L521"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"create_attention_mask_from_input_mask","parameters":"(from_tensor, to_mask)","argument_list":"","return_statement":"return mask","docstring":"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].","docstring_summary":"Create 3D attention mask from a 2D tensor mask.","docstring_tokens":["Create","3D","attention","mask","from","a","2D","tensor","mask","."],"function":"def create_attention_mask_from_input_mask(from_tensor, to_mask):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n\n return mask","function_tokens":["def","create_attention_mask_from_input_mask","(","from_tensor",",","to_mask",")",":","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_shape","=","get_shape_list","(","to_mask",",","expected_rank","=","2",")","to_seq_length","=","to_shape","[","1","]","to_mask","=","tf",".","cast","(","tf",".","reshape","(","to_mask",",","[","batch_size",",","1",",","to_seq_length","]",")",",","tf",".","float32",")","# We don't assume that `from_tensor` is a mask (although it could be). We","# don't actually care if we attend *from* padding tokens (only *to* padding)","# tokens so we create a tensor of all ones.","#","# `broadcast_ones` = [batch_size, from_seq_length, 1]","broadcast_ones","=","tf",".","ones","(","shape","=","[","batch_size",",","from_seq_length",",","1","]",",","dtype","=","tf",".","float32",")","# Here we broadcast along two dimensions to create the mask.","mask","=","broadcast_ones","*","to_mask","return","mask"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L524-L555"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"attention_layer","parameters":"(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None)","argument_list":"","return_statement":"return context_layer","docstring":"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.","docstring_summary":"Performs multi-headed attention from `from_tensor` to `to_tensor`.","docstring_tokens":["Performs","multi","-","headed","attention","from","from_tensor","to","to_tensor","."],"function":"def attention_layer(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n \"\"\"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])\n return output_tensor\n\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n from_tensor_2d = reshape_to_matrix(from_tensor)\n to_tensor_2d = reshape_to_matrix(to_tensor)\n\n # `query_layer` = [B*F, N*H]\n query_layer = tf.layers.dense(\n from_tensor_2d,\n num_attention_heads * size_per_head,\n activation=query_act,\n name=\"query\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `key_layer` = [B*T, N*H]\n key_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=key_act,\n name=\"key\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `value_layer` = [B*T, N*H]\n value_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=value_act,\n name=\"value\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `query_layer` = [B, N, F, H]\n query_layer = transpose_for_scores(query_layer, batch_size,\n num_attention_heads, from_seq_length,\n size_per_head)\n\n # `key_layer` = [B, N, T, H]\n key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,\n to_seq_length, size_per_head)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n # `attention_scores` = [B, N, F, T]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(attention_scores,\n 1.0 \/ math.sqrt(float(size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\n\n # `value_layer` = [B, T, N, H]\n value_layer = tf.reshape(\n value_layer,\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\n\n # `value_layer` = [B, N, T, H]\n value_layer = tf.transpose(value_layer, [0, 2, 1, 3])\n\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\n\n if do_return_2d_tensor:\n # `context_layer` = [B*F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\n else:\n # `context_layer` = [B, F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\n\n return context_layer","function_tokens":["def","attention_layer","(","from_tensor",",","to_tensor",",","attention_mask","=","None",",","num_attention_heads","=","1",",","size_per_head","=","512",",","query_act","=","None",",","key_act","=","None",",","value_act","=","None",",","attention_probs_dropout_prob","=","0.0",",","initializer_range","=","0.02",",","do_return_2d_tensor","=","False",",","batch_size","=","None",",","from_seq_length","=","None",",","to_seq_length","=","None",")",":","def","transpose_for_scores","(","input_tensor",",","batch_size",",","num_attention_heads",",","seq_length",",","width",")",":","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","batch_size",",","seq_length",",","num_attention_heads",",","width","]",")","output_tensor","=","tf",".","transpose","(","output_tensor",",","[","0",",","2",",","1",",","3","]",")","return","output_tensor","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","to_shape","=","get_shape_list","(","to_tensor",",","expected_rank","=","[","2",",","3","]",")","if","len","(","from_shape",")","!=","len","(","to_shape",")",":","raise","ValueError","(","\"The rank of `from_tensor` must match the rank of `to_tensor`.\"",")","if","len","(","from_shape",")","==","3",":","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_seq_length","=","to_shape","[","1","]","elif","len","(","from_shape",")","==","2",":","if","(","batch_size","is","None","or","from_seq_length","is","None","or","to_seq_length","is","None",")",":","raise","ValueError","(","\"When passing in rank 2 tensors to attention_layer, the values \"","\"for `batch_size`, `from_seq_length`, and `to_seq_length` \"","\"must all be specified.\"",")","# Scalar dimensions referenced here:","# B = batch size (number of sequences)","# F = `from_tensor` sequence length","# T = `to_tensor` sequence length","# N = `num_attention_heads`","# H = `size_per_head`","from_tensor_2d","=","reshape_to_matrix","(","from_tensor",")","to_tensor_2d","=","reshape_to_matrix","(","to_tensor",")","# `query_layer` = [B*F, N*H]","query_layer","=","tf",".","layers",".","dense","(","from_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","query_act",",","name","=","\"query\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `key_layer` = [B*T, N*H]","key_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","key_act",",","name","=","\"key\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `value_layer` = [B*T, N*H]","value_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","value_act",",","name","=","\"value\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `query_layer` = [B, N, F, H]","query_layer","=","transpose_for_scores","(","query_layer",",","batch_size",",","num_attention_heads",",","from_seq_length",",","size_per_head",")","# `key_layer` = [B, N, T, H]","key_layer","=","transpose_for_scores","(","key_layer",",","batch_size",",","num_attention_heads",",","to_seq_length",",","size_per_head",")","# Take the dot product between \"query\" and \"key\" to get the raw","# attention scores.","# `attention_scores` = [B, N, F, T]","attention_scores","=","tf",".","matmul","(","query_layer",",","key_layer",",","transpose_b","=","True",")","attention_scores","=","tf",".","multiply","(","attention_scores",",","1.0","\/","math",".","sqrt","(","float","(","size_per_head",")",")",")","if","attention_mask","is","not","None",":","# `attention_mask` = [B, 1, F, T]","attention_mask","=","tf",".","expand_dims","(","attention_mask",",","axis","=","[","1","]",")","# Since attention_mask is 1.0 for positions we want to attend and 0.0 for","# masked positions, this operation will create a tensor which is 0.0 for","# positions we want to attend and -10000.0 for masked positions.","adder","=","(","1.0","-","tf",".","cast","(","attention_mask",",","tf",".","float32",")",")","*","-","10000.0","# Since we are adding it to the raw scores before the softmax, this is","# effectively the same as removing these entirely.","attention_scores","+=","adder","# Normalize the attention scores to probabilities.","# `attention_probs` = [B, N, F, T]","attention_probs","=","tf",".","nn",".","softmax","(","attention_scores",")","# This is actually dropping out entire tokens to attend to, which might","# seem a bit unusual, but is taken from the original Transformer paper.","attention_probs","=","dropout","(","attention_probs",",","attention_probs_dropout_prob",")","# `value_layer` = [B, T, N, H]","value_layer","=","tf",".","reshape","(","value_layer",",","[","batch_size",",","to_seq_length",",","num_attention_heads",",","size_per_head","]",")","# `value_layer` = [B, N, T, H]","value_layer","=","tf",".","transpose","(","value_layer",",","[","0",",","2",",","1",",","3","]",")","# `context_layer` = [B, N, F, H]","context_layer","=","tf",".","matmul","(","attention_probs",",","value_layer",")","# `context_layer` = [B, F, N, H]","context_layer","=","tf",".","transpose","(","context_layer",",","[","0",",","2",",","1",",","3","]",")","if","do_return_2d_tensor",":","# `context_layer` = [B*F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size","*","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","else",":","# `context_layer` = [B, F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size",",","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","return","context_layer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L558-L751"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"transformer_model","parameters":"(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False)","argument_list":"","return_statement":"","docstring":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.","docstring_summary":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".","docstring_tokens":["Multi","-","headed","multi","-","layer","Transformer","from","Attention","is","All","You","Need","."],"function":"def transformer_model(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False):\n \"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.\n \"\"\"\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n\n attention_head_size = int(hidden_size \/ num_attention_heads)\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n input_width = input_shape[2]\n\n # The Transformer performs sum residuals on all layers so the input needs\n # to be the same as the hidden size.\n if input_width != hidden_size:\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n (input_width, hidden_size))\n\n # We keep the representation as a 2D tensor to avoid re-shaping it back and\n # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\n # the GPU\/CPU but may not be free on the TPU, so we want to minimize them to\n # help the optimizer.\n prev_output = reshape_to_matrix(input_tensor)\n\n all_layer_outputs = []\n for layer_idx in range(num_hidden_layers):\n with tf.variable_scope(\"layer_%d\" % layer_idx):\n layer_input = prev_output\n\n with tf.variable_scope(\"attention\"):\n attention_heads = []\n with tf.variable_scope(\"self\"):\n attention_head = attention_layer(\n from_tensor=layer_input,\n to_tensor=layer_input,\n attention_mask=attention_mask,\n num_attention_heads=num_attention_heads,\n size_per_head=attention_head_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n initializer_range=initializer_range,\n do_return_2d_tensor=True,\n batch_size=batch_size,\n from_seq_length=seq_length,\n to_seq_length=seq_length)\n attention_heads.append(attention_head)\n\n attention_output = None\n if len(attention_heads) == 1:\n attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n attention_output = tf.concat(attention_heads, axis=-1)\n\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n with tf.variable_scope(\"output\"):\n attention_output = tf.layers.dense(\n attention_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n attention_output = dropout(attention_output, hidden_dropout_prob)\n attention_output = layer_norm(attention_output + layer_input)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = tf.layers.dense(\n attention_output,\n intermediate_size,\n activation=intermediate_act_fn,\n kernel_initializer=create_initializer(initializer_range))\n\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"):\n layer_output = tf.layers.dense(\n intermediate_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n layer_output = dropout(layer_output, hidden_dropout_prob)\n layer_output = layer_norm(layer_output + attention_output)\n prev_output = layer_output\n all_layer_outputs.append(layer_output)\n\n if do_return_all_layers:\n final_outputs = []\n for layer_output in all_layer_outputs:\n final_output = reshape_from_matrix(layer_output, input_shape)\n final_outputs.append(final_output)\n return final_outputs\n else:\n final_output = reshape_from_matrix(prev_output, input_shape)\n return final_output","function_tokens":["def","transformer_model","(","input_tensor",",","attention_mask","=","None",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","intermediate_act_fn","=","gelu",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","initializer_range","=","0.02",",","do_return_all_layers","=","False",")",":","if","hidden_size","%","num_attention_heads","!=","0",":","raise","ValueError","(","\"The hidden size (%d) is not a multiple of the number of attention \"","\"heads (%d)\"","%","(","hidden_size",",","num_attention_heads",")",")","attention_head_size","=","int","(","hidden_size","\/","num_attention_heads",")","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","input_width","=","input_shape","[","2","]","# The Transformer performs sum residuals on all layers so the input needs","# to be the same as the hidden size.","if","input_width","!=","hidden_size",":","raise","ValueError","(","\"The width of the input tensor (%d) != hidden size (%d)\"","%","(","input_width",",","hidden_size",")",")","# We keep the representation as a 2D tensor to avoid re-shaping it back and","# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on","# the GPU\/CPU but may not be free on the TPU, so we want to minimize them to","# help the optimizer.","prev_output","=","reshape_to_matrix","(","input_tensor",")","all_layer_outputs","=","[","]","for","layer_idx","in","range","(","num_hidden_layers",")",":","with","tf",".","variable_scope","(","\"layer_%d\"","%","layer_idx",")",":","layer_input","=","prev_output","with","tf",".","variable_scope","(","\"attention\"",")",":","attention_heads","=","[","]","with","tf",".","variable_scope","(","\"self\"",")",":","attention_head","=","attention_layer","(","from_tensor","=","layer_input",",","to_tensor","=","layer_input",",","attention_mask","=","attention_mask",",","num_attention_heads","=","num_attention_heads",",","size_per_head","=","attention_head_size",",","attention_probs_dropout_prob","=","attention_probs_dropout_prob",",","initializer_range","=","initializer_range",",","do_return_2d_tensor","=","True",",","batch_size","=","batch_size",",","from_seq_length","=","seq_length",",","to_seq_length","=","seq_length",")","attention_heads",".","append","(","attention_head",")","attention_output","=","None","if","len","(","attention_heads",")","==","1",":","attention_output","=","attention_heads","[","0","]","else",":","# In the case where we have other sequences, we just concatenate","# them to the self-attention head before the projection.","attention_output","=","tf",".","concat","(","attention_heads",",","axis","=","-","1",")","# Run a linear projection of `hidden_size` then add a residual","# with `layer_input`.","with","tf",".","variable_scope","(","\"output\"",")",":","attention_output","=","tf",".","layers",".","dense","(","attention_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","attention_output","=","dropout","(","attention_output",",","hidden_dropout_prob",")","attention_output","=","layer_norm","(","attention_output","+","layer_input",")","# The activation is only applied to the \"intermediate\" hidden layer.","with","tf",".","variable_scope","(","\"intermediate\"",")",":","intermediate_output","=","tf",".","layers",".","dense","(","attention_output",",","intermediate_size",",","activation","=","intermediate_act_fn",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# Down-project back to `hidden_size` then add the residual.","with","tf",".","variable_scope","(","\"output\"",")",":","layer_output","=","tf",".","layers",".","dense","(","intermediate_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","layer_output","=","dropout","(","layer_output",",","hidden_dropout_prob",")","layer_output","=","layer_norm","(","layer_output","+","attention_output",")","prev_output","=","layer_output","all_layer_outputs",".","append","(","layer_output",")","if","do_return_all_layers",":","final_outputs","=","[","]","for","layer_output","in","all_layer_outputs",":","final_output","=","reshape_from_matrix","(","layer_output",",","input_shape",")","final_outputs",".","append","(","final_output",")","return","final_outputs","else",":","final_output","=","reshape_from_matrix","(","prev_output",",","input_shape",")","return","final_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L754-L892"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"get_shape_list","parameters":"(tensor, expected_rank=None, name=None)","argument_list":"","return_statement":"return shape","docstring":"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.","docstring_summary":"Returns a list of the shape of tensor, preferring static dimensions.","docstring_tokens":["Returns","a","list","of","the","shape","of","tensor","preferring","static","dimensions","."],"function":"def get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if name is None:\n name = tensor.name\n\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape","function_tokens":["def","get_shape_list","(","tensor",",","expected_rank","=","None",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","if","expected_rank","is","not","None",":","assert_rank","(","tensor",",","expected_rank",",","name",")","shape","=","tensor",".","shape",".","as_list","(",")","non_static_indexes","=","[","]","for","(","index",",","dim",")","in","enumerate","(","shape",")",":","if","dim","is","None",":","non_static_indexes",".","append","(","index",")","if","not","non_static_indexes",":","return","shape","dyn_shape","=","tf",".","shape","(","tensor",")","for","index","in","non_static_indexes",":","shape","[","index","]","=","dyn_shape","[","index","]","return","shape"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L895-L929"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"reshape_to_matrix","parameters":"(input_tensor)","argument_list":"","return_statement":"return output_tensor","docstring":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_summary":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_tokens":["Reshapes","a",">","=","rank","2","tensor","to","a","rank","2","tensor","(","i",".","e",".","a","matrix",")","."],"function":"def reshape_to_matrix(input_tensor):\n \"\"\"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).\"\"\"\n ndims = input_tensor.shape.ndims\n if ndims < 2:\n raise ValueError(\"Input tensor must have at least rank 2. Shape = %s\" %\n (input_tensor.shape))\n if ndims == 2:\n return input_tensor\n\n width = input_tensor.shape[-1]\n output_tensor = tf.reshape(input_tensor, [-1, width])\n return output_tensor","function_tokens":["def","reshape_to_matrix","(","input_tensor",")",":","ndims","=","input_tensor",".","shape",".","ndims","if","ndims","<","2",":","raise","ValueError","(","\"Input tensor must have at least rank 2. Shape = %s\"","%","(","input_tensor",".","shape",")",")","if","ndims","==","2",":","return","input_tensor","width","=","input_tensor",".","shape","[","-","1","]","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","-","1",",","width","]",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L932-L943"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"reshape_from_matrix","parameters":"(output_tensor, orig_shape_list)","argument_list":"","return_statement":"return tf.reshape(output_tensor, orig_dims + [width])","docstring":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_summary":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_tokens":["Reshapes","a","rank","2","tensor","back","to","its","original","rank",">","=","2","tensor","."],"function":"def reshape_from_matrix(output_tensor, orig_shape_list):\n \"\"\"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.\"\"\"\n if len(orig_shape_list) == 2:\n return output_tensor\n\n output_shape = get_shape_list(output_tensor)\n\n orig_dims = orig_shape_list[0:-1]\n width = output_shape[-1]\n\n return tf.reshape(output_tensor, orig_dims + [width])","function_tokens":["def","reshape_from_matrix","(","output_tensor",",","orig_shape_list",")",":","if","len","(","orig_shape_list",")","==","2",":","return","output_tensor","output_shape","=","get_shape_list","(","output_tensor",")","orig_dims","=","orig_shape_list","[","0",":","-","1","]","width","=","output_shape","[","-","1","]","return","tf",".","reshape","(","output_tensor",",","orig_dims","+","[","width","]",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L946-L956"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"assert_rank","parameters":"(tensor, expected_rank, name=None)","argument_list":"","return_statement":"","docstring":"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.","docstring_summary":"Raises an exception if the tensor rank is not of the expected rank.","docstring_tokens":["Raises","an","exception","if","the","tensor","rank","is","not","of","the","expected","rank","."],"function":"def assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n if name is None:\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n scope_name = tf.get_variable_scope().name\n raise ValueError(\n \"For the tensor `%s` in scope `%s`, the actual rank \"\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))","function_tokens":["def","assert_rank","(","tensor",",","expected_rank",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","expected_rank_dict","=","{","}","if","isinstance","(","expected_rank",",","six",".","integer_types",")",":","expected_rank_dict","[","expected_rank","]","=","True","else",":","for","x","in","expected_rank",":","expected_rank_dict","[","x","]","=","True","actual_rank","=","tensor",".","shape",".","ndims","if","actual_rank","not","in","expected_rank_dict",":","scope_name","=","tf",".","get_variable_scope","(",")",".","name","raise","ValueError","(","\"For the tensor `%s` in scope `%s`, the actual rank \"","\"`%d` (shape = %s) is not equal to the expected rank `%s`\"","%","(","name",",","scope_name",",","actual_rank",",","str","(","tensor",".","shape",")",",","str","(","expected_rank",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L959-L986"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"BertConfig.__init__","parameters":"(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02)","argument_list":"","return_statement":"","docstring":"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.","docstring_summary":"Constructs BertConfig.","docstring_tokens":["Constructs","BertConfig","."],"function":"def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range","function_tokens":["def","__init__","(","self",",","vocab_size",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","hidden_act","=","\"gelu\"",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","max_position_embeddings","=","512",",","type_vocab_size","=","16",",","initializer_range","=","0.02",")",":","self",".","vocab_size","=","vocab_size","self",".","hidden_size","=","hidden_size","self",".","num_hidden_layers","=","num_hidden_layers","self",".","num_attention_heads","=","num_attention_heads","self",".","hidden_act","=","hidden_act","self",".","intermediate_size","=","intermediate_size","self",".","hidden_dropout_prob","=","hidden_dropout_prob","self",".","attention_probs_dropout_prob","=","attention_probs_dropout_prob","self",".","max_position_embeddings","=","max_position_embeddings","self",".","type_vocab_size","=","type_vocab_size","self",".","initializer_range","=","initializer_range"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L34-L80"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"BertConfig.from_dict","parameters":"(cls, json_object)","argument_list":"","return_statement":"return config","docstring":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_summary":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","Python","dictionary","of","parameters","."],"function":"def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config","function_tokens":["def","from_dict","(","cls",",","json_object",")",":","config","=","BertConfig","(","vocab_size","=","None",")","for","(","key",",","value",")","in","six",".","iteritems","(","json_object",")",":","config",".","__dict__","[","key","]","=","value","return","config"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L83-L88"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"BertConfig.from_json_file","parameters":"(cls, json_file)","argument_list":"","return_statement":"return cls.from_dict(json.loads(text))","docstring":"Constructs a `BertConfig` from a json file of parameters.","docstring_summary":"Constructs a `BertConfig` from a json file of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","json","file","of","parameters","."],"function":"def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with tf.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))","function_tokens":["def","from_json_file","(","cls",",","json_file",")",":","with","tf",".","gfile",".","GFile","(","json_file",",","\"r\"",")","as","reader",":","text","=","reader",".","read","(",")","return","cls",".","from_dict","(","json",".","loads","(","text",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L91-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"BertConfig.to_dict","parameters":"(self)","argument_list":"","return_statement":"return output","docstring":"Serializes this instance to a Python dictionary.","docstring_summary":"Serializes this instance to a Python dictionary.","docstring_tokens":["Serializes","this","instance","to","a","Python","dictionary","."],"function":"def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output","function_tokens":["def","to_dict","(","self",")",":","output","=","copy",".","deepcopy","(","self",".","__dict__",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L97-L100"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"BertConfig.to_json_string","parameters":"(self)","argument_list":"","return_statement":"return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","docstring":"Serializes this instance to a JSON string.","docstring_summary":"Serializes this instance to a JSON string.","docstring_tokens":["Serializes","this","instance","to","a","JSON","string","."],"function":"def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","function_tokens":["def","to_json_string","(","self",")",":","return","json",".","dumps","(","self",".","to_dict","(",")",",","indent","=","2",",","sort_keys","=","True",")","+","\"\\n\""],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L102-L104"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"BertModel.__init__","parameters":"(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None)","argument_list":"","return_statement":"","docstring":"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.","docstring_summary":"Constructor for BertModel.","docstring_tokens":["Constructor","for","BertModel","."],"function":"def __init__(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n\n input_shape = get_shape_list(input_ids, expected_rank=2)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n\n if input_mask is None:\n input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)\n\n if token_type_ids is None:\n token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)\n\n with tf.variable_scope(scope, default_name=\"bert\"):\n with tf.variable_scope(\"embeddings\"):\n # Perform embedding lookup on the word ids.\n (self.embedding_output, self.embedding_table) = embedding_lookup(\n input_ids=input_ids,\n vocab_size=config.vocab_size,\n embedding_size=config.hidden_size,\n initializer_range=config.initializer_range,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n self.embedding_output = embedding_postprocessor(\n input_tensor=self.embedding_output,\n use_token_type=True,\n token_type_ids=token_type_ids,\n token_type_vocab_size=config.type_vocab_size,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=config.initializer_range,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n\n with tf.variable_scope(\"encoder\"):\n # This converts a 2D mask of shape [batch_size, seq_length] to a 3D\n # mask of shape [batch_size, seq_length, seq_length] which is used\n # for the attention scores.\n attention_mask = create_attention_mask_from_input_mask(\n input_ids, input_mask)\n\n # Run the stacked transformer.\n # `sequence_output` shape = [batch_size, seq_length, hidden_size].\n self.all_encoder_layers = transformer_model(\n input_tensor=self.embedding_output,\n attention_mask=attention_mask,\n hidden_size=config.hidden_size,\n num_hidden_layers=config.num_hidden_layers,\n num_attention_heads=config.num_attention_heads,\n intermediate_size=config.intermediate_size,\n intermediate_act_fn=get_activation(config.hidden_act),\n hidden_dropout_prob=config.hidden_dropout_prob,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n initializer_range=config.initializer_range,\n do_return_all_layers=True)\n\n self.sequence_output = self.all_encoder_layers[-1]\n # The \"pooler\" converts the encoded sequence tensor of shape\n # [batch_size, seq_length, hidden_size] to a tensor of shape\n # [batch_size, hidden_size]. This is necessary for segment-level\n # (or segment-pair-level) classification tasks where we need a fixed\n # dimensional representation of the segment.\n with tf.variable_scope(\"pooler\"):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token. We assume that this has been pre-trained\n first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)\n self.pooled_output = tf.layers.dense(\n first_token_tensor,\n config.hidden_size,\n activation=tf.tanh,\n kernel_initializer=create_initializer(config.initializer_range))","function_tokens":["def","__init__","(","self",",","config",",","is_training",",","input_ids",",","input_mask","=","None",",","token_type_ids","=","None",",","use_one_hot_embeddings","=","False",",","scope","=","None",")",":","config","=","copy",".","deepcopy","(","config",")","if","not","is_training",":","config",".","hidden_dropout_prob","=","0.0","config",".","attention_probs_dropout_prob","=","0.0","input_shape","=","get_shape_list","(","input_ids",",","expected_rank","=","2",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","if","input_mask","is","None",":","input_mask","=","tf",".","ones","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","if","token_type_ids","is","None",":","token_type_ids","=","tf",".","zeros","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","with","tf",".","variable_scope","(","scope",",","default_name","=","\"bert\"",")",":","with","tf",".","variable_scope","(","\"embeddings\"",")",":","# Perform embedding lookup on the word ids.","(","self",".","embedding_output",",","self",".","embedding_table",")","=","embedding_lookup","(","input_ids","=","input_ids",",","vocab_size","=","config",".","vocab_size",",","embedding_size","=","config",".","hidden_size",",","initializer_range","=","config",".","initializer_range",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# Add positional embeddings and token type embeddings, then layer","# normalize and perform dropout.","self",".","embedding_output","=","embedding_postprocessor","(","input_tensor","=","self",".","embedding_output",",","use_token_type","=","True",",","token_type_ids","=","token_type_ids",",","token_type_vocab_size","=","config",".","type_vocab_size",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","config",".","initializer_range",",","max_position_embeddings","=","config",".","max_position_embeddings",",","dropout_prob","=","config",".","hidden_dropout_prob",")","with","tf",".","variable_scope","(","\"encoder\"",")",":","# This converts a 2D mask of shape [batch_size, seq_length] to a 3D","# mask of shape [batch_size, seq_length, seq_length] which is used","# for the attention scores.","attention_mask","=","create_attention_mask_from_input_mask","(","input_ids",",","input_mask",")","# Run the stacked transformer.","# `sequence_output` shape = [batch_size, seq_length, hidden_size].","self",".","all_encoder_layers","=","transformer_model","(","input_tensor","=","self",".","embedding_output",",","attention_mask","=","attention_mask",",","hidden_size","=","config",".","hidden_size",",","num_hidden_layers","=","config",".","num_hidden_layers",",","num_attention_heads","=","config",".","num_attention_heads",",","intermediate_size","=","config",".","intermediate_size",",","intermediate_act_fn","=","get_activation","(","config",".","hidden_act",")",",","hidden_dropout_prob","=","config",".","hidden_dropout_prob",",","attention_probs_dropout_prob","=","config",".","attention_probs_dropout_prob",",","initializer_range","=","config",".","initializer_range",",","do_return_all_layers","=","True",")","self",".","sequence_output","=","self",".","all_encoder_layers","[","-","1","]","# The \"pooler\" converts the encoded sequence tensor of shape","# [batch_size, seq_length, hidden_size] to a tensor of shape","# [batch_size, hidden_size]. This is necessary for segment-level","# (or segment-pair-level) classification tasks where we need a fixed","# dimensional representation of the segment.","with","tf",".","variable_scope","(","\"pooler\"",")",":","# We \"pool\" the model by simply taking the hidden state corresponding","# to the first token. We assume that this has been pre-trained","first_token_tensor","=","tf",".","squeeze","(","self",".","sequence_output","[",":",",","0",":","1",",",":","]",",","axis","=","1",")","self",".","pooled_output","=","tf",".","layers",".","dense","(","first_token_tensor",",","config",".","hidden_size",",","activation","=","tf",".","tanh",",","kernel_initializer","=","create_initializer","(","config",".","initializer_range",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L131-L232"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"BertModel.get_sequence_output","parameters":"(self)","argument_list":"","return_statement":"return self.sequence_output","docstring":"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.","docstring_summary":"Gets final hidden layer of encoder.","docstring_tokens":["Gets","final","hidden","layer","of","encoder","."],"function":"def get_sequence_output(self):\n \"\"\"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.\n \"\"\"\n return self.sequence_output","function_tokens":["def","get_sequence_output","(","self",")",":","return","self",".","sequence_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L237-L244"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/modeling.py","language":"python","identifier":"BertModel.get_embedding_output","parameters":"(self)","argument_list":"","return_statement":"return self.embedding_output","docstring":"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.","docstring_summary":"Gets output of the embedding lookup (i.e., input to the transformer).","docstring_tokens":["Gets","output","of","the","embedding","lookup","(","i",".","e",".","input","to","the","transformer",")","."],"function":"def get_embedding_output(self):\n \"\"\"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.\n \"\"\"\n return self.embedding_output","function_tokens":["def","get_embedding_output","(","self",")",":","return","self",".","embedding_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/modeling.py#L249-L258"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_classifier.py","language":"python","identifier":"convert_example_list_for_inews","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature_list","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_example_list_for_inews(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return [InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)]\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n must_len = len(tokens_a) + 3\n extra_len = max_seq_length - must_len\n feature_list = []\n if example.text_b and extra_len > 0:\n extra_num = int((len(tokens_b) - 1) \/ extra_len) + 1\n for num in range(extra_num):\n max_len = min((num + 1) * extra_len, len(tokens_b))\n tokens_b_sub = tokens_b[num * extra_len: max_len]\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b_sub, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n else:\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n return feature_list","function_tokens":["def","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","[","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","]","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","must_len","=","len","(","tokens_a",")","+","3","extra_len","=","max_seq_length","-","must_len","feature_list","=","[","]","if","example",".","text_b","and","extra_len",">","0",":","extra_num","=","int","(","(","len","(","tokens_b",")","-","1",")","\/","extra_len",")","+","1","for","num","in","range","(","extra_num",")",":","max_len","=","min","(","(","num","+","1",")","*","extra_len",",","len","(","tokens_b",")",")","tokens_b_sub","=","tokens_b","[","num","*","extra_len",":","max_len","]","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b_sub",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","else",":","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","return","feature_list"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_classifier.py#L233-L268"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features_for_inews","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features_for_inews(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n num_example = 0\n for (ex_index, example) in enumerate(examples):\n if ex_index % 1000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature_list = convert_example_list_for_inews(ex_index, example, label_list,\n max_seq_length, tokenizer)\n num_example += len(feature_list)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n for feature in feature_list:\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n tf.logging.info(\"feature num: %s\", num_example)\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features_for_inews","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","num_example","=","0","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","1000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature_list","=","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","num_example","+=","len","(","feature_list",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","for","feature","in","feature_list",":","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","tf",".","logging",".","info","(","\"feature num: %s\"",",","num_example",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_classifier.py#L271-L301"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_classifier.py","language":"python","identifier":"convert_single_example","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature","function_tokens":["def","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","if","tokens_b",":","# Modifies `tokens_a` and `tokens_b` in place so that the total","# length is less than the specified length.","# Account for [CLS], [SEP], [SEP] with \"- 3\"","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_seq_length","-","3",")","else",":","# Account for [CLS] and [SEP] with \"- 2\"","if","len","(","tokens_a",")",">","max_seq_length","-","2",":","tokens_a","=","tokens_a","[","0",":","(","max_seq_length","-","2",")","]","# The convention in BERT is:","# (a) For sequence pairs:","# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]","# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1","# (b) For single sequences:","# tokens: [CLS] the dog is hairy . [SEP]","# type_ids: 0 0 0 0 0 0 0","#","# Where \"type_ids\" are used to indicate whether this is the first","# sequence or the second sequence. The embedding vectors for `type=0` and","# `type=1` were learned during pre-training and are added to the wordpiece","# embedding vector (and position vector). This is not *strictly* necessary","# since the [SEP] token unambiguously separates the sequences, but it makes","# it easier for the model to learn the concept of sequences.","#","# For classification tasks, the first vector (corresponding to [CLS]) is","# used as the \"sentence vector\". Note that this only makes sense because","# the entire model is fine-tuned.","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","label_id","=","label_map","[","example",".","label","]","if","ex_index","<","5",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"guid: %s\"","%","(","example",".","guid",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","tf",".","logging",".","info","(","\"label: %s (id = %d)\"","%","(","example",".","label",",","label_id",")",")","feature","=","InputFeatures","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","label_id","=","label_id",",","is_real_example","=","True",")","return","feature"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_classifier.py#L304-L403"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_classifier.py#L406-L433"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_classifier.py","language":"python","identifier":"file_based_input_fn_builder","parameters":"(input_file, seq_length, is_training,\n drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn","function_tokens":["def","file_based_input_fn_builder","(","input_file",",","seq_length",",","is_training",",","drop_remainder",")",":","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"label_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"is_real_example\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","}","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","d","=","tf",".","data",".","TFRecordDataset","(","input_file",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_classifier.py#L436-L481"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_classifier.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_classifier.py#L484-L498"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_classifier.py","language":"python","identifier":"create_model","parameters":"(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings)","argument_list":"","return_statement":"","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)","function_tokens":["def","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","labels",",","num_labels",",","use_one_hot_embeddings",")",":","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# In the demo, we are doing a simple classification task on the entire","# segment.","#","# If you want to use the token-level output, use model.get_sequence_output()","# instead.","output_layer","=","model",".","get_pooled_output","(",")","hidden_size","=","output_layer",".","shape","[","-","1","]",".","value","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","[","num_labels",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","[","num_labels","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","with","tf",".","variable_scope","(","\"loss\"",")",":","if","is_training",":","# I.e., 0.1 dropout","output_layer","=","tf",".","nn",".","dropout","(","output_layer",",","keep_prob","=","0.9",")","logits","=","tf",".","matmul","(","output_layer",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","probabilities","=","tf",".","nn",".","softmax","(","logits",",","axis","=","-","1",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","num_labels",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","logits",",","probabilities",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_classifier.py#L501-L543"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_classifier.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions, weights=is_real_example)\n loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn,\n [per_example_loss, label_ids, logits, is_real_example])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","num_labels",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","label_ids","=","features","[","\"label_ids\"","]","is_real_example","=","None","if","\"is_real_example\"","in","features",":","is_real_example","=","tf",".","cast","(","features","[","\"is_real_example\"","]",",","dtype","=","tf",".","float32",")","else",":","is_real_example","=","tf",".","ones","(","tf",".","shape","(","label_ids",")",",","dtype","=","tf",".","float32",")","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","total_loss",",","per_example_loss",",","logits",",","probabilities",")","=","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","label_ids",",","num_labels",",","use_one_hot_embeddings",")","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","per_example_loss",",","label_ids",",","logits",",","is_real_example",")",":","predictions","=","tf",".","argmax","(","logits",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","label_ids",",","predictions","=","predictions",",","weights","=","is_real_example",")","loss","=","tf",".","metrics",".","mean","(","values","=","per_example_loss",",","weights","=","is_real_example",")","return","{","\"eval_accuracy\"",":","accuracy",",","\"eval_loss\"",":","loss",",","}","eval_metrics","=","(","metric_fn",",","[","per_example_loss",",","label_ids",",","logits",",","is_real_example","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","{","\"probabilities\"",":","probabilities","}",",","scaffold_fn","=","scaffold_fn",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_classifier.py#L546-L635"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_classifier.py","language":"python","identifier":"input_fn_builder","parameters":"(features, seq_length, is_training, drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","features",",","seq_length",",","is_training",",","drop_remainder",")",":","all_input_ids","=","[","]","all_input_mask","=","[","]","all_segment_ids","=","[","]","all_label_ids","=","[","]","for","feature","in","features",":","all_input_ids",".","append","(","feature",".","input_ids",")","all_input_mask",".","append","(","feature",".","input_mask",")","all_segment_ids",".","append","(","feature",".","segment_ids",")","all_label_ids",".","append","(","feature",".","label_id",")","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","num_examples","=","len","(","features",")","# This is for demo purposes and does NOT scale to large data sets. We do","# not use Dataset.from_generator() because that uses tf.py_func which is","# not TPU compatible. The right way to load data is with TFRecordReader.","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","{","\"input_ids\"",":","tf",".","constant","(","all_input_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_mask\"",":","tf",".","constant","(","all_input_mask",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"segment_ids\"",":","tf",".","constant","(","all_segment_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"label_ids\"",":","tf",".","constant","(","all_label_ids",",","shape","=","[","num_examples","]",",","dtype","=","tf",".","int32",")",",","}",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","batch","(","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_classifier.py#L640-L689"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_classifier.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return features","docstring":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_summary":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","list","of","InputFeatures","."],"function":"def convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n features.append(feature)\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",")",":","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","features",".","append","(","feature",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_classifier.py#L694-L707"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/optimization.py","language":"python","identifier":"create_optimizer","parameters":"(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu)","argument_list":"","return_statement":"return train_op","docstring":"Creates an optimizer training op.","docstring_summary":"Creates an optimizer training op.","docstring_tokens":["Creates","an","optimizer","training","op","."],"function":"def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n \"\"\"Creates an optimizer training op.\"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step\/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float \/ warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m\/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n if use_tpu:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op","function_tokens":["def","create_optimizer","(","loss",",","init_lr",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")",":","global_step","=","tf",".","train",".","get_or_create_global_step","(",")","learning_rate","=","tf",".","constant","(","value","=","init_lr",",","shape","=","[","]",",","dtype","=","tf",".","float32",")","# Implements linear decay of the learning rate.","learning_rate","=","tf",".","train",".","polynomial_decay","(","learning_rate",",","global_step",",","num_train_steps",",","end_learning_rate","=","0.0",",","power","=","1.0",",","cycle","=","False",")","# Implements linear warmup. I.e., if global_step < num_warmup_steps, the","# learning rate will be `global_step\/num_warmup_steps * init_lr`.","if","num_warmup_steps",":","global_steps_int","=","tf",".","cast","(","global_step",",","tf",".","int32",")","warmup_steps_int","=","tf",".","constant","(","num_warmup_steps",",","dtype","=","tf",".","int32",")","global_steps_float","=","tf",".","cast","(","global_steps_int",",","tf",".","float32",")","warmup_steps_float","=","tf",".","cast","(","warmup_steps_int",",","tf",".","float32",")","warmup_percent_done","=","global_steps_float","\/","warmup_steps_float","warmup_learning_rate","=","init_lr","*","warmup_percent_done","is_warmup","=","tf",".","cast","(","global_steps_int","<","warmup_steps_int",",","tf",".","float32",")","learning_rate","=","(","(","1.0","-","is_warmup",")","*","learning_rate","+","is_warmup","*","warmup_learning_rate",")","# It is recommended that you use this optimizer for fine tuning, since this","# is how the model was trained (note that the Adam m\/v variables are NOT","# loaded from init_checkpoint.)","optimizer","=","AdamWeightDecayOptimizer","(","learning_rate","=","learning_rate",",","weight_decay_rate","=","0.01",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","[","\"LayerNorm\"",",","\"layer_norm\"",",","\"bias\"","]",")","if","use_tpu",":","optimizer","=","tf",".","contrib",".","tpu",".","CrossShardOptimizer","(","optimizer",")","tvars","=","tf",".","trainable_variables","(",")","grads","=","tf",".","gradients","(","loss",",","tvars",")","# This is how the model was pre-trained.","(","grads",",","_",")","=","tf",".","clip_by_global_norm","(","grads",",","clip_norm","=","1.0",")","train_op","=","optimizer",".","apply_gradients","(","zip","(","grads",",","tvars",")",",","global_step","=","global_step",")","# Normally the global step update is done inside of `apply_gradients`.","# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use","# a different optimizer, you should probably take this line out.","new_global_step","=","global_step","+","1","train_op","=","tf",".","group","(","train_op",",","[","global_step",".","assign","(","new_global_step",")","]",")","return","train_op"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/optimization.py#L25-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.__init__","parameters":"(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\")","argument_list":"","return_statement":"","docstring":"Constructs a AdamWeightDecayOptimizer.","docstring_summary":"Constructs a AdamWeightDecayOptimizer.","docstring_tokens":["Constructs","a","AdamWeightDecayOptimizer","."],"function":"def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay","function_tokens":["def","__init__","(","self",",","learning_rate",",","weight_decay_rate","=","0.0",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","None",",","name","=","\"AdamWeightDecayOptimizer\"",")",":","super","(","AdamWeightDecayOptimizer",",","self",")",".","__init__","(","False",",","name",")","self",".","learning_rate","=","learning_rate","self",".","weight_decay_rate","=","weight_decay_rate","self",".","beta_1","=","beta_1","self",".","beta_2","=","beta_2","self",".","epsilon","=","epsilon","self",".","exclude_from_weight_decay","=","exclude_from_weight_decay"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/optimization.py#L90-L106"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.apply_gradients","parameters":"(self, grads_and_vars, global_step=None, name=None)","argument_list":"","return_statement":"return tf.group(*assignments, name=name)","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"\/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"\/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m \/ (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization\/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m\/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)","function_tokens":["def","apply_gradients","(","self",",","grads_and_vars",",","global_step","=","None",",","name","=","None",")",":","assignments","=","[","]","for","(","grad",",","param",")","in","grads_and_vars",":","if","grad","is","None","or","param","is","None",":","continue","param_name","=","self",".","_get_variable_name","(","param",".","name",")","m","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_m\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","v","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_v\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","# Standard Adam update.","next_m","=","(","tf",".","multiply","(","self",".","beta_1",",","m",")","+","tf",".","multiply","(","1.0","-","self",".","beta_1",",","grad",")",")","next_v","=","(","tf",".","multiply","(","self",".","beta_2",",","v",")","+","tf",".","multiply","(","1.0","-","self",".","beta_2",",","tf",".","square","(","grad",")",")",")","update","=","next_m","\/","(","tf",".","sqrt","(","next_v",")","+","self",".","epsilon",")","# Just adding the square of the weights to the loss function is *not*","# the correct way of using L2 regularization\/weight decay with Adam,","# since that will interact with the m and v parameters in strange ways.","#","# Instead we want ot decay the weights in a manner that doesn't interact","# with the m\/v parameters. This is equivalent to adding the square","# of the weights to the loss with plain (non-momentum) SGD.","if","self",".","_do_use_weight_decay","(","param_name",")",":","update","+=","self",".","weight_decay_rate","*","param","update_with_lr","=","self",".","learning_rate","*","update","next_param","=","param","-","update_with_lr","assignments",".","extend","(","[","param",".","assign","(","next_param",")",",","m",".","assign","(","next_m",")",",","v",".","assign","(","next_v",")","]",")","return","tf",".","group","(","*","assignments",",","name","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/optimization.py#L108-L157"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._do_use_weight_decay","parameters":"(self, param_name)","argument_list":"","return_statement":"return True","docstring":"Whether to use L2 weight decay for `param_name`.","docstring_summary":"Whether to use L2 weight decay for `param_name`.","docstring_tokens":["Whether","to","use","L2","weight","decay","for","param_name","."],"function":"def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True","function_tokens":["def","_do_use_weight_decay","(","self",",","param_name",")",":","if","not","self",".","weight_decay_rate",":","return","False","if","self",".","exclude_from_weight_decay",":","for","r","in","self",".","exclude_from_weight_decay",":","if","re",".","search","(","r",",","param_name",")","is","not","None",":","return","False","return","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/optimization.py#L159-L167"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._get_variable_name","parameters":"(self, param_name)","argument_list":"","return_statement":"return param_name","docstring":"Get the variable name from the tensor name.","docstring_summary":"Get the variable name from the tensor name.","docstring_tokens":["Get","the","variable","name","from","the","tensor","name","."],"function":"def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name","function_tokens":["def","_get_variable_name","(","self",",","param_name",")",":","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","param_name",")","if","m","is","not","None",":","param_name","=","m",".","group","(","1",")","return","param_name"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/optimization.py#L169-L174"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/create_pretraining_data.py","language":"python","identifier":"write_instance_to_example_files","parameters":"(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files)","argument_list":"","return_statement":"","docstring":"Create TF example files from `TrainingInstance`s.","docstring_summary":"Create TF example files from `TrainingInstance`s.","docstring_tokens":["Create","TF","example","files","from","TrainingInstance","s","."],"function":"def write_instance_to_example_files(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files):\n \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n writers = []\n for output_file in output_files:\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n writer_index = 0\n\n total_written = 0\n for (inst_index, instance) in enumerate(instances):\n input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)\n input_mask = [1] * len(input_ids)\n segment_ids = list(instance.segment_ids)\n assert len(input_ids) <= max_seq_length\n\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n while len(masked_lm_positions) < max_predictions_per_seq:\n masked_lm_positions.append(0)\n masked_lm_ids.append(0)\n masked_lm_weights.append(0.0)\n\n next_sentence_label = 1 if instance.is_random_next else 0\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n features[\"masked_lm_positions\"] = create_int_feature(masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n features[\"next_sentence_labels\"] = create_int_feature([next_sentence_label])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n\n writers[writer_index].write(tf_example.SerializeToString())\n writer_index = (writer_index + 1) % len(writers)\n\n total_written += 1\n\n if inst_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in instance.tokens]))\n\n for feature_name in features.keys():\n feature = features[feature_name]\n values = []\n if feature.int64_list.value:\n values = feature.int64_list.value\n elif feature.float_list.value:\n values = feature.float_list.value\n tf.logging.info(\n \"%s: %s\" % (feature_name, \" \".join([str(x) for x in values])))\n\n for writer in writers:\n writer.close()\n\n tf.logging.info(\"Wrote %d total instances\", total_written)","function_tokens":["def","write_instance_to_example_files","(","instances",",","tokenizer",",","max_seq_length",",","max_predictions_per_seq",",","output_files",")",":","writers","=","[","]","for","output_file","in","output_files",":","writers",".","append","(","tf",".","python_io",".","TFRecordWriter","(","output_file",")",")","writer_index","=","0","total_written","=","0","for","(","inst_index",",","instance",")","in","enumerate","(","instances",")",":","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","tokens",")","input_mask","=","[","1","]","*","len","(","input_ids",")","segment_ids","=","list","(","instance",".","segment_ids",")","assert","len","(","input_ids",")","<=","max_seq_length","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","masked_lm_positions","=","list","(","instance",".","masked_lm_positions",")","masked_lm_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","masked_lm_labels",")","masked_lm_weights","=","[","1.0","]","*","len","(","masked_lm_ids",")","while","len","(","masked_lm_positions",")","<","max_predictions_per_seq",":","masked_lm_positions",".","append","(","0",")","masked_lm_ids",".","append","(","0",")","masked_lm_weights",".","append","(","0.0",")","next_sentence_label","=","1","if","instance",".","is_random_next","else","0","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","segment_ids",")","features","[","\"masked_lm_positions\"","]","=","create_int_feature","(","masked_lm_positions",")","features","[","\"masked_lm_ids\"","]","=","create_int_feature","(","masked_lm_ids",")","features","[","\"masked_lm_weights\"","]","=","create_float_feature","(","masked_lm_weights",")","features","[","\"next_sentence_labels\"","]","=","create_int_feature","(","[","next_sentence_label","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writers","[","writer_index","]",".","write","(","tf_example",".","SerializeToString","(",")",")","writer_index","=","(","writer_index","+","1",")","%","len","(","writers",")","total_written","+=","1","if","inst_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","instance",".","tokens","]",")",")","for","feature_name","in","features",".","keys","(",")",":","feature","=","features","[","feature_name","]","values","=","[","]","if","feature",".","int64_list",".","value",":","values","=","feature",".","int64_list",".","value","elif","feature",".","float_list",".","value",":","values","=","feature",".","float_list",".","value","tf",".","logging",".","info","(","\"%s: %s\"","%","(","feature_name",",","\" \"",".","join","(","[","str","(","x",")","for","x","in","values","]",")",")",")","for","writer","in","writers",":","writer",".","close","(",")","tf",".","logging",".","info","(","\"Wrote %d total instances\"",",","total_written",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/create_pretraining_data.py#L96-L166"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/create_pretraining_data.py","language":"python","identifier":"create_training_instances","parameters":"(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng)","argument_list":"","return_statement":"return instances","docstring":"Create `TrainingInstance`s from raw text.","docstring_summary":"Create `TrainingInstance`s from raw text.","docstring_tokens":["Create","TrainingInstance","s","from","raw","text","."],"function":"def create_training_instances(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng):\n \"\"\"Create `TrainingInstance`s from raw text.\"\"\"\n all_documents = [[]]\n\n # Input file format:\n # (1) One sentence per line. These should ideally be actual sentences, not\n # entire paragraphs or arbitrary spans of text. (Because we use the\n # sentence boundaries for the \"next sentence prediction\" task).\n # (2) Blank lines between documents. Document boundaries are needed so\n # that the \"next sentence prediction\" task doesn't span between documents.\n for input_file in input_files:\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n\n # Empty lines are used as document delimiters\n if not line:\n all_documents.append([])\n tokens = tokenizer.tokenize(line)\n if tokens:\n all_documents[-1].append(tokens)\n\n # Remove empty documents\n all_documents = [x for x in all_documents if x]\n rng.shuffle(all_documents)\n\n vocab_words = list(tokenizer.vocab.keys())\n instances = []\n for _ in range(dupe_factor):\n for document_index in range(len(all_documents)):\n instances.extend(\n create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng))\n\n rng.shuffle(instances)\n return instances","function_tokens":["def","create_training_instances","(","input_files",",","tokenizer",",","max_seq_length",",","dupe_factor",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","rng",")",":","all_documents","=","[","[","]","]","# Input file format:","# (1) One sentence per line. These should ideally be actual sentences, not","# entire paragraphs or arbitrary spans of text. (Because we use the","# sentence boundaries for the \"next sentence prediction\" task).","# (2) Blank lines between documents. Document boundaries are needed so","# that the \"next sentence prediction\" task doesn't span between documents.","for","input_file","in","input_files",":","with","tf",".","gfile",".","GFile","(","input_file",",","\"r\"",")","as","reader",":","while","True",":","line","=","tokenization",".","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","line",":","break","line","=","line",".","strip","(",")","# Empty lines are used as document delimiters","if","not","line",":","all_documents",".","append","(","[","]",")","tokens","=","tokenizer",".","tokenize","(","line",")","if","tokens",":","all_documents","[","-","1","]",".","append","(","tokens",")","# Remove empty documents","all_documents","=","[","x","for","x","in","all_documents","if","x","]","rng",".","shuffle","(","all_documents",")","vocab_words","=","list","(","tokenizer",".","vocab",".","keys","(",")",")","instances","=","[","]","for","_","in","range","(","dupe_factor",")",":","for","document_index","in","range","(","len","(","all_documents",")",")",":","instances",".","extend","(","create_instances_from_document","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",")","rng",".","shuffle","(","instances",")","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/create_pretraining_data.py#L179-L220"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/create_pretraining_data.py","language":"python","identifier":"create_instances_from_document","parameters":"(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return instances","docstring":"Creates `TrainingInstance`s for a single document.","docstring_summary":"Creates `TrainingInstance`s for a single document.","docstring_tokens":["Creates","TrainingInstance","s","for","a","single","document","."],"function":"def create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[document_index]\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if rng.random() < short_seq_prob:\n target_seq_length = rng.randint(2, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n instances = []\n current_chunk = []\n current_length = 0\n i = 0\n while i < len(document):\n segment = document[i]\n current_chunk.append(segment)\n current_length += len(segment)\n if i == len(document) - 1 or current_length >= target_seq_length:\n if current_chunk:\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2:\n a_end = rng.randint(1, len(current_chunk) - 1)\n\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n tokens_b = []\n # Random next\n is_random_next = False\n if len(current_chunk) == 1 or rng.random() < 0.5:\n is_random_next = True\n target_b_length = target_seq_length - len(tokens_a)\n\n # This should rarely go for more than one iteration for large\n # corpora. However, just to be careful, we try to make sure that\n # the random document is not the same as the document\n # we're processing.\n for _ in range(10):\n random_document_index = rng.randint(0, len(all_documents) - 1)\n if random_document_index != document_index:\n break\n\n random_document = all_documents[random_document_index]\n random_start = rng.randint(0, len(random_document) - 1)\n for j in range(random_start, len(random_document)):\n tokens_b.extend(random_document[j])\n if len(tokens_b) >= target_b_length:\n break\n # We didn't actually use these segments so we \"put them back\" so\n # they don't go to waste.\n num_unused_segments = len(current_chunk) - a_end\n i -= num_unused_segments\n # Actual next\n else:\n is_random_next = False\n for j in range(a_end, len(current_chunk)):\n tokens_b.extend(current_chunk[j])\n truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)\n\n assert len(tokens_a) >= 1\n assert len(tokens_b) >= 1\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n current_chunk = []\n current_length = 0\n i += 1\n\n return instances","function_tokens":["def","create_instances_from_document","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","document","=","all_documents","[","document_index","]","# Account for [CLS], [SEP], [SEP]","max_num_tokens","=","max_seq_length","-","3","# We *usually* want to fill up the entire sequence since we are padding","# to `max_seq_length` anyways, so short sequences are generally wasted","# computation. However, we *sometimes*","# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter","# sequences to minimize the mismatch between pre-training and fine-tuning.","# The `target_seq_length` is just a rough target however, whereas","# `max_seq_length` is a hard limit.","target_seq_length","=","max_num_tokens","if","rng",".","random","(",")","<","short_seq_prob",":","target_seq_length","=","rng",".","randint","(","2",",","max_num_tokens",")","# We DON'T just concatenate all of the tokens from a document into a long","# sequence and choose an arbitrary split point because this would make the","# next sentence prediction task too easy. Instead, we split the input into","# segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user","# input.","instances","=","[","]","current_chunk","=","[","]","current_length","=","0","i","=","0","while","i","<","len","(","document",")",":","segment","=","document","[","i","]","current_chunk",".","append","(","segment",")","current_length","+=","len","(","segment",")","if","i","==","len","(","document",")","-","1","or","current_length",">=","target_seq_length",":","if","current_chunk",":","# `a_end` is how many segments from `current_chunk` go into the `A`","# (first) sentence.","a_end","=","1","if","len","(","current_chunk",")",">=","2",":","a_end","=","rng",".","randint","(","1",",","len","(","current_chunk",")","-","1",")","tokens_a","=","[","]","for","j","in","range","(","a_end",")",":","tokens_a",".","extend","(","current_chunk","[","j","]",")","tokens_b","=","[","]","# Random next","is_random_next","=","False","if","len","(","current_chunk",")","==","1","or","rng",".","random","(",")","<","0.5",":","is_random_next","=","True","target_b_length","=","target_seq_length","-","len","(","tokens_a",")","# This should rarely go for more than one iteration for large","# corpora. However, just to be careful, we try to make sure that","# the random document is not the same as the document","# we're processing.","for","_","in","range","(","10",")",":","random_document_index","=","rng",".","randint","(","0",",","len","(","all_documents",")","-","1",")","if","random_document_index","!=","document_index",":","break","random_document","=","all_documents","[","random_document_index","]","random_start","=","rng",".","randint","(","0",",","len","(","random_document",")","-","1",")","for","j","in","range","(","random_start",",","len","(","random_document",")",")",":","tokens_b",".","extend","(","random_document","[","j","]",")","if","len","(","tokens_b",")",">=","target_b_length",":","break","# We didn't actually use these segments so we \"put them back\" so","# they don't go to waste.","num_unused_segments","=","len","(","current_chunk",")","-","a_end","i","-=","num_unused_segments","# Actual next","else",":","is_random_next","=","False","for","j","in","range","(","a_end",",","len","(","current_chunk",")",")",":","tokens_b",".","extend","(","current_chunk","[","j","]",")","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")","assert","len","(","tokens_a",")",">=","1","assert","len","(","tokens_b",")",">=","1","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","(","tokens",",","masked_lm_positions",",","masked_lm_labels",")","=","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")","instance","=","TrainingInstance","(","tokens","=","tokens",",","segment_ids","=","segment_ids",",","is_random_next","=","is_random_next",",","masked_lm_positions","=","masked_lm_positions",",","masked_lm_labels","=","masked_lm_labels",")","instances",".","append","(","instance",")","current_chunk","=","[","]","current_length","=","0","i","+=","1","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/create_pretraining_data.py#L223-L335"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/create_pretraining_data.py","language":"python","identifier":"create_masked_lm_predictions","parameters":"(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return (output_tokens, masked_lm_positions, masked_lm_labels)","docstring":"Creates the predictions for the masked LM objective.","docstring_summary":"Creates the predictions for the masked LM objective.","docstring_tokens":["Creates","the","predictions","for","the","masked","LM","objective","."],"function":"def create_masked_lm_predictions(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n continue\n # Whole Word Masking means that if we mask all of the wordpieces\n # corresponding to an original word. When a word has been split into\n # WordPieces, the first token does not have any marker and any subsequence\n # tokens are prefixed with ##. So whenever we see the ## token, we\n # append it to the previous set of word indexes.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each WordPiece independently, softmaxed\n # over the entire vocabulary.\n if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and\n token.startswith(\"##\")):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n rng.shuffle(cand_indexes)\n\n output_tokens = list(tokens)\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n assert len(masked_lms) <= num_to_predict\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)","function_tokens":["def","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","cand_indexes","=","[","]","for","(","i",",","token",")","in","enumerate","(","tokens",")",":","if","token","==","\"[CLS]\"","or","token","==","\"[SEP]\"",":","continue","# Whole Word Masking means that if we mask all of the wordpieces","# corresponding to an original word. When a word has been split into","# WordPieces, the first token does not have any marker and any subsequence","# tokens are prefixed with ##. So whenever we see the ## token, we","# append it to the previous set of word indexes.","#","# Note that Whole Word Masking does *not* change the training code","# at all -- we still predict each WordPiece independently, softmaxed","# over the entire vocabulary.","if","(","FLAGS",".","do_whole_word_mask","and","len","(","cand_indexes",")",">=","1","and","token",".","startswith","(","\"##\"",")",")",":","cand_indexes","[","-","1","]",".","append","(","i",")","else",":","cand_indexes",".","append","(","[","i","]",")","rng",".","shuffle","(","cand_indexes",")","output_tokens","=","list","(","tokens",")","num_to_predict","=","min","(","max_predictions_per_seq",",","max","(","1",",","int","(","round","(","len","(","tokens",")","*","masked_lm_prob",")",")",")",")","masked_lms","=","[","]","covered_indexes","=","set","(",")","for","index_set","in","cand_indexes",":","if","len","(","masked_lms",")",">=","num_to_predict",":","break","# If adding a whole-word mask would exceed the maximum number of","# predictions, then just skip this candidate.","if","len","(","masked_lms",")","+","len","(","index_set",")",">","num_to_predict",":","continue","is_any_index_covered","=","False","for","index","in","index_set",":","if","index","in","covered_indexes",":","is_any_index_covered","=","True","break","if","is_any_index_covered",":","continue","for","index","in","index_set",":","covered_indexes",".","add","(","index",")","masked_token","=","None","# 80% of the time, replace with [MASK]","if","rng",".","random","(",")","<","0.8",":","masked_token","=","\"[MASK]\"","else",":","# 10% of the time, keep original","if","rng",".","random","(",")","<","0.5",":","masked_token","=","tokens","[","index","]","# 10% of the time, replace with random word","else",":","masked_token","=","vocab_words","[","rng",".","randint","(","0",",","len","(","vocab_words",")","-","1",")","]","output_tokens","[","index","]","=","masked_token","masked_lms",".","append","(","MaskedLmInstance","(","index","=","index",",","label","=","tokens","[","index","]",")",")","assert","len","(","masked_lms",")","<=","num_to_predict","masked_lms","=","sorted","(","masked_lms",",","key","=","lambda","x",":","x",".","index",")","masked_lm_positions","=","[","]","masked_lm_labels","=","[","]","for","p","in","masked_lms",":","masked_lm_positions",".","append","(","p",".","index",")","masked_lm_labels",".","append","(","p",".","label",")","return","(","output_tokens",",","masked_lm_positions",",","masked_lm_labels",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/create_pretraining_data.py#L342-L415"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/create_pretraining_data.py","language":"python","identifier":"truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_num_tokens, rng)","argument_list":"","return_statement":"","docstring":"Truncates a pair of sequences to a maximum sequence length.","docstring_summary":"Truncates a pair of sequences to a maximum sequence length.","docstring_tokens":["Truncates","a","pair","of","sequences","to","a","maximum","sequence","length","."],"function":"def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):\n \"\"\"Truncates a pair of sequences to a maximum sequence length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()","function_tokens":["def","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")",":","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_num_tokens",":","break","trunc_tokens","=","tokens_a","if","len","(","tokens_a",")",">","len","(","tokens_b",")","else","tokens_b","assert","len","(","trunc_tokens",")",">=","1","# We want to sometimes truncate from the front and sometimes from the","# back to add more randomness and avoid biases.","if","rng",".","random","(",")","<","0.5",":","del","trunc_tokens","[","0","]","else",":","trunc_tokens",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/create_pretraining_data.py#L418-L433"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_ner.py","language":"python","identifier":"InputExample.__init__","parameters":"(self, guid, text, label=None)","argument_list":"","return_statement":"","docstring":"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.","docstring_summary":"Constructs a InputExample.","docstring_tokens":["Constructs","a","InputExample","."],"function":"def __init__(self, guid, text, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text = text\n self.label = label","function_tokens":["def","__init__","(","self",",","guid",",","text",",","label","=","None",")",":","self",".","guid","=","guid","self",".","text","=","text","self",".","label","=","label"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_ner.py#L123-L135"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_ner.py","language":"python","identifier":"DataProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the train set.","docstring_summary":"Gets a collection of `InputExample`s for the train set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","train","set","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_ner.py#L152-L154"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_ner.py","language":"python","identifier":"DataProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the dev set.","docstring_summary":"Gets a collection of `InputExample`s for the dev set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","dev","set","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_ner.py#L156-L158"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_ner.py","language":"python","identifier":"DataProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Gets the list of labels for this data set.","docstring_summary":"Gets the list of labels for this data set.","docstring_tokens":["Gets","the","list","of","labels","for","this","data","set","."],"function":"def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_labels","(","self",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_ner.py#L160-L162"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_ner.py","language":"python","identifier":"DataProcessor._read_data","parameters":"(cls, input_file)","argument_list":"","return_statement":"","docstring":"Reads a BIO data.","docstring_summary":"Reads a BIO data.","docstring_tokens":["Reads","a","BIO","data","."],"function":"def _read_data(cls, input_file):\n \"\"\"Reads a BIO data.\"\"\"\n with open(input_file) as f:\n lines = []\n words = []\n labels = []\n for line in f:\n contends = line.strip()\n word = line.strip().split(' ')[0]\n label = line.strip().split(' ')[-1]\n if contends.startswith(\"-DOCSTART-\"):\n words.append('')\n continue\n if len(contends) == 0 and words[-1] == '.':\n l = ' '.join([label for label in labels if len(label) > 0])\n w = ' '.join([word for word in words if len(word) > 0])\n lines.append([l, w])\n words = []\n labels = []\n continue\n if len(contends) == 0:\n continue\n words.append(word)\n labels.append(label)\n return lines","function_tokens":["def","_read_data","(","cls",",","input_file",")",":","with","open","(","input_file",")","as","f",":","lines","=","[","]","words","=","[","]","labels","=","[","]","for","line","in","f",":","contends","=","line",".","strip","(",")","word","=","line",".","strip","(",")",".","split","(","' '",")","[","0","]","label","=","line",".","strip","(",")",".","split","(","' '",")","[","-","1","]","if","contends",".","startswith","(","\"-DOCSTART-\"",")",":","words",".","append","(","''",")","continue","if","len","(","contends",")","==","0","and","words","[","-","1","]","==","'.'",":","l","=","' '",".","join","(","[","label","for","label","in","labels","if","len","(","label",")",">","0","]",")","w","=","' '",".","join","(","[","word","for","word","in","words","if","len","(","word",")",">","0","]",")","lines",".","append","(","[","l",",","w","]",")","words","=","[","]","labels","=","[","]","continue","if","len","(","contends",")","==","0",":","continue","words",".","append","(","word",")","labels",".","append","(","label",")","return","lines"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_ner.py#L165-L189"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/extract_features.py","language":"python","identifier":"input_fn_builder","parameters":"(features, seq_length)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(features, seq_length):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_unique_ids = []\n all_input_ids = []\n all_input_mask = []\n all_input_type_ids = []\n\n for feature in features:\n all_unique_ids.append(feature.unique_id)\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_input_type_ids.append(feature.input_type_ids)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"unique_ids\":\n tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_type_ids\":\n tf.constant(\n all_input_type_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n })\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","features",",","seq_length",")",":","all_unique_ids","=","[","]","all_input_ids","=","[","]","all_input_mask","=","[","]","all_input_type_ids","=","[","]","for","feature","in","features",":","all_unique_ids",".","append","(","feature",".","unique_id",")","all_input_ids",".","append","(","feature",".","input_ids",")","all_input_mask",".","append","(","feature",".","input_mask",")","all_input_type_ids",".","append","(","feature",".","input_type_ids",")","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","num_examples","=","len","(","features",")","# This is for demo purposes and does NOT scale to large data sets. We do","# not use Dataset.from_generator() because that uses tf.py_func which is","# not TPU compatible. The right way to load data is with TFRecordReader.","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","{","\"unique_ids\"",":","tf",".","constant","(","all_unique_ids",",","shape","=","[","num_examples","]",",","dtype","=","tf",".","int32",")",",","\"input_ids\"",":","tf",".","constant","(","all_input_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_mask\"",":","tf",".","constant","(","all_input_mask",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_type_ids\"",":","tf",".","constant","(","all_input_type_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","}",")","d","=","d",".","batch","(","batch_size","=","batch_size",",","drop_remainder","=","False",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/extract_features.py#L100-L145"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/extract_features.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, layer_indexes, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n input_type_ids = features[\"input_type_ids\"]\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=False,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=input_type_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n raise ValueError(\"Only PREDICT modes are supported: %s\" % (mode))\n\n tvars = tf.trainable_variables()\n scaffold_fn = None\n (assignment_map,\n initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n all_layers = model.get_all_encoder_layers()\n\n predictions = {\n \"unique_id\": unique_ids,\n }\n\n for (i, layer_index) in enumerate(layer_indexes):\n predictions[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","layer_indexes",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","unique_ids","=","features","[","\"unique_ids\"","]","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","input_type_ids","=","features","[","\"input_type_ids\"","]","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","False",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","input_type_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","if","mode","!=","tf",".","estimator",".","ModeKeys",".","PREDICT",":","raise","ValueError","(","\"Only PREDICT modes are supported: %s\"","%","(","mode",")",")","tvars","=","tf",".","trainable_variables","(",")","scaffold_fn","=","None","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","all_layers","=","model",".","get_all_encoder_layers","(",")","predictions","=","{","\"unique_id\"",":","unique_ids",",","}","for","(","i",",","layer_index",")","in","enumerate","(","layer_indexes",")",":","predictions","[","\"layer_output_%d\"","%","i","]","=","all_layers","[","layer_index","]","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","predictions",",","scaffold_fn","=","scaffold_fn",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/extract_features.py#L148-L207"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/extract_features.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, seq_length, tokenizer)","argument_list":"","return_statement":"return features","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > seq_length - 2:\n tokens_a = tokens_a[0:(seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n input_type_ids = []\n tokens.append(\"[CLS]\")\n input_type_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n input_type_ids.append(0)\n tokens.append(\"[SEP]\")\n input_type_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n input_type_ids.append(1)\n tokens.append(\"[SEP]\")\n input_type_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < seq_length:\n input_ids.append(0)\n input_mask.append(0)\n input_type_ids.append(0)\n\n assert len(input_ids) == seq_length\n assert len(input_mask) == seq_length\n assert len(input_type_ids) == seq_length\n\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (example.unique_id))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"input_type_ids: %s\" % \" \".join([str(x) for x in input_type_ids]))\n\n features.append(\n InputFeatures(\n unique_id=example.unique_id,\n tokens=tokens,\n input_ids=input_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids))\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","seq_length",",","tokenizer",")",":","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","if","tokens_b",":","# Modifies `tokens_a` and `tokens_b` in place so that the total","# length is less than the specified length.","# Account for [CLS], [SEP], [SEP] with \"- 3\"","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","seq_length","-","3",")","else",":","# Account for [CLS] and [SEP] with \"- 2\"","if","len","(","tokens_a",")",">","seq_length","-","2",":","tokens_a","=","tokens_a","[","0",":","(","seq_length","-","2",")","]","# The convention in BERT is:","# (a) For sequence pairs:","# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]","# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1","# (b) For single sequences:","# tokens: [CLS] the dog is hairy . [SEP]","# type_ids: 0 0 0 0 0 0 0","#","# Where \"type_ids\" are used to indicate whether this is the first","# sequence or the second sequence. The embedding vectors for `type=0` and","# `type=1` were learned during pre-training and are added to the wordpiece","# embedding vector (and position vector). This is not *strictly* necessary","# since the [SEP] token unambiguously separates the sequences, but it makes","# it easier for the model to learn the concept of sequences.","#","# For classification tasks, the first vector (corresponding to [CLS]) is","# used as as the \"sentence vector\". Note that this only makes sense because","# the entire model is fine-tuned.","tokens","=","[","]","input_type_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","input_type_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","input_type_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","input_type_ids",".","append","(","0",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","input_type_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","input_type_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","input_type_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","seq_length","assert","len","(","input_mask",")","==","seq_length","assert","len","(","input_type_ids",")","==","seq_length","if","ex_index","<","5",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"unique_id: %s\"","%","(","example",".","unique_id",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"input_type_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_type_ids","]",")",")","features",".","append","(","InputFeatures","(","unique_id","=","example",".","unique_id",",","tokens","=","tokens",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","input_type_ids","=","input_type_ids",")",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/extract_features.py#L210-L299"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/extract_features.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/extract_features.py#L302-L316"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/extract_features.py","language":"python","identifier":"read_examples","parameters":"(input_file)","argument_list":"","return_statement":"return examples","docstring":"Read a list of `InputExample`s from an input file.","docstring_summary":"Read a list of `InputExample`s from an input file.","docstring_tokens":["Read","a","list","of","InputExample","s","from","an","input","file","."],"function":"def read_examples(input_file):\n \"\"\"Read a list of `InputExample`s from an input file.\"\"\"\n examples = []\n unique_id = 0\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n examples.append(\n InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))\n unique_id += 1\n return examples","function_tokens":["def","read_examples","(","input_file",")",":","examples","=","[","]","unique_id","=","0","with","tf",".","gfile",".","GFile","(","input_file",",","\"r\"",")","as","reader",":","while","True",":","line","=","tokenization",".","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","line",":","break","line","=","line",".","strip","(",")","text_a","=","None","text_b","=","None","m","=","re",".","match","(","r\"^(.*) \\|\\|\\| (.*)$\"",",","line",")","if","m","is","None",":","text_a","=","line","else",":","text_a","=","m",".","group","(","1",")","text_b","=","m",".","group","(","2",")","examples",".","append","(","InputExample","(","unique_id","=","unique_id",",","text_a","=","text_a",",","text_b","=","text_b",")",")","unique_id","+=","1","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/extract_features.py#L319-L340"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_classifier_with_tfhub.py","language":"python","identifier":"create_model","parameters":"(is_training, input_ids, input_mask, segment_ids, labels,\n num_labels, bert_hub_module_handle)","argument_list":"","return_statement":"","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(is_training, input_ids, input_mask, segment_ids, labels,\n num_labels, bert_hub_module_handle):\n \"\"\"Creates a classification model.\"\"\"\n tags = set()\n if is_training:\n tags.add(\"train\")\n bert_module = hub.Module(bert_hub_module_handle, tags=tags, trainable=True)\n bert_inputs = dict(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids)\n bert_outputs = bert_module(\n inputs=bert_inputs,\n signature=\"tokens\",\n as_dict=True)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use\n # bert_outputs[\"sequence_output\"] instead.\n output_layer = bert_outputs[\"pooled_output\"]\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)","function_tokens":["def","create_model","(","is_training",",","input_ids",",","input_mask",",","segment_ids",",","labels",",","num_labels",",","bert_hub_module_handle",")",":","tags","=","set","(",")","if","is_training",":","tags",".","add","(","\"train\"",")","bert_module","=","hub",".","Module","(","bert_hub_module_handle",",","tags","=","tags",",","trainable","=","True",")","bert_inputs","=","dict","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",")","bert_outputs","=","bert_module","(","inputs","=","bert_inputs",",","signature","=","\"tokens\"",",","as_dict","=","True",")","# In the demo, we are doing a simple classification task on the entire","# segment.","#","# If you want to use the token-level output, use","# bert_outputs[\"sequence_output\"] instead.","output_layer","=","bert_outputs","[","\"pooled_output\"","]","hidden_size","=","output_layer",".","shape","[","-","1","]",".","value","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","[","num_labels",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","[","num_labels","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","with","tf",".","variable_scope","(","\"loss\"",")",":","if","is_training",":","# I.e., 0.1 dropout","output_layer","=","tf",".","nn",".","dropout","(","output_layer",",","keep_prob","=","0.9",")","logits","=","tf",".","matmul","(","output_layer",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","probabilities","=","tf",".","nn",".","softmax","(","logits",",","axis","=","-","1",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","num_labels",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","logits",",","probabilities",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_classifier_with_tfhub.py#L37-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_classifier_with_tfhub.py","language":"python","identifier":"model_fn_builder","parameters":"(num_labels, learning_rate, num_train_steps,\n num_warmup_steps, use_tpu, bert_hub_module_handle)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(num_labels, learning_rate, num_train_steps,\n num_warmup_steps, use_tpu, bert_hub_module_handle):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n is_training, input_ids, input_mask, segment_ids, label_ids, num_labels,\n bert_hub_module_handle)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions={\"probabilities\": probabilities})\n else:\n raise ValueError(\n \"Only TRAIN, EVAL and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","num_labels",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","bert_hub_module_handle",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","label_ids","=","features","[","\"label_ids\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","total_loss",",","per_example_loss",",","logits",",","probabilities",")","=","create_model","(","is_training",",","input_ids",",","input_mask",",","segment_ids",",","label_ids",",","num_labels",",","bert_hub_module_handle",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","per_example_loss",",","label_ids",",","logits",")",":","predictions","=","tf",".","argmax","(","logits",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","accuracy","=","tf",".","metrics",".","accuracy","(","label_ids",",","predictions",")","loss","=","tf",".","metrics",".","mean","(","per_example_loss",")","return","{","\"eval_accuracy\"",":","accuracy",",","\"eval_loss\"",":","loss",",","}","eval_metrics","=","(","metric_fn",",","[","per_example_loss",",","label_ids",",","logits","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","PREDICT",":","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","{","\"probabilities\"",":","probabilities","}",")","else",":","raise","ValueError","(","\"Only TRAIN, EVAL and PREDICT modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_classifier_with_tfhub.py#L87-L143"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert_wwm_ext\/run_classifier_with_tfhub.py","language":"python","identifier":"create_tokenizer_from_hub_module","parameters":"(bert_hub_module_handle)","argument_list":"","return_statement":"return tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)","docstring":"Get the vocab file and casing info from the Hub module.","docstring_summary":"Get the vocab file and casing info from the Hub module.","docstring_tokens":["Get","the","vocab","file","and","casing","info","from","the","Hub","module","."],"function":"def create_tokenizer_from_hub_module(bert_hub_module_handle):\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n bert_module = hub.Module(bert_hub_module_handle)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n return tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)","function_tokens":["def","create_tokenizer_from_hub_module","(","bert_hub_module_handle",")",":","with","tf",".","Graph","(",")",".","as_default","(",")",":","bert_module","=","hub",".","Module","(","bert_hub_module_handle",")","tokenization_info","=","bert_module","(","signature","=","\"tokenization_info\"",",","as_dict","=","True",")","with","tf",".","Session","(",")","as","sess",":","vocab_file",",","do_lower_case","=","sess",".","run","(","[","tokenization_info","[","\"vocab_file\"","]",",","tokenization_info","[","\"do_lower_case\"","]","]",")","return","tokenization",".","FullTokenizer","(","vocab_file","=","vocab_file",",","do_lower_case","=","do_lower_case",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert_wwm_ext\/run_classifier_with_tfhub.py#L146-L155"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_pretraining.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n next_sentence_labels = features[\"next_sentence_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), model.get_embedding_table(),\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n (next_sentence_loss, next_sentence_example_loss,\n next_sentence_log_probs) = get_next_sentence_output(\n bert_config, model.get_pooled_output(), next_sentence_labels)\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n next_sentence_log_probs = tf.reshape(\n next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])\n next_sentence_predictions = tf.argmax(\n next_sentence_log_probs, axis=-1, output_type=tf.int32)\n next_sentence_labels = tf.reshape(next_sentence_labels, [-1])\n next_sentence_accuracy = tf.metrics.accuracy(\n labels=next_sentence_labels, predictions=next_sentence_predictions)\n next_sentence_mean_loss = tf.metrics.mean(\n values=next_sentence_example_loss)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n \"next_sentence_accuracy\": next_sentence_accuracy,\n \"next_sentence_loss\": next_sentence_mean_loss,\n }\n\n eval_metrics = (metric_fn, [\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels\n ])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\"Only TRAIN and EVAL modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","masked_lm_positions","=","features","[","\"masked_lm_positions\"","]","masked_lm_ids","=","features","[","\"masked_lm_ids\"","]","masked_lm_weights","=","features","[","\"masked_lm_weights\"","]","next_sentence_labels","=","features","[","\"next_sentence_labels\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","(","masked_lm_loss",",","masked_lm_example_loss",",","masked_lm_log_probs",")","=","get_masked_lm_output","(","bert_config",",","model",".","get_sequence_output","(",")",",","model",".","get_embedding_table","(",")",",","masked_lm_positions",",","masked_lm_ids",",","masked_lm_weights",")","(","next_sentence_loss",",","next_sentence_example_loss",",","next_sentence_log_probs",")","=","get_next_sentence_output","(","bert_config",",","model",".","get_pooled_output","(",")",",","next_sentence_labels",")","total_loss","=","masked_lm_loss","+","next_sentence_loss","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels",")",":","\"\"\"Computes the loss and accuracy of the model.\"\"\"","masked_lm_log_probs","=","tf",".","reshape","(","masked_lm_log_probs",",","[","-","1",",","masked_lm_log_probs",".","shape","[","-","1","]","]",")","masked_lm_predictions","=","tf",".","argmax","(","masked_lm_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","masked_lm_example_loss","=","tf",".","reshape","(","masked_lm_example_loss",",","[","-","1","]",")","masked_lm_ids","=","tf",".","reshape","(","masked_lm_ids",",","[","-","1","]",")","masked_lm_weights","=","tf",".","reshape","(","masked_lm_weights",",","[","-","1","]",")","masked_lm_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","masked_lm_ids",",","predictions","=","masked_lm_predictions",",","weights","=","masked_lm_weights",")","masked_lm_mean_loss","=","tf",".","metrics",".","mean","(","values","=","masked_lm_example_loss",",","weights","=","masked_lm_weights",")","next_sentence_log_probs","=","tf",".","reshape","(","next_sentence_log_probs",",","[","-","1",",","next_sentence_log_probs",".","shape","[","-","1","]","]",")","next_sentence_predictions","=","tf",".","argmax","(","next_sentence_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","next_sentence_labels","=","tf",".","reshape","(","next_sentence_labels",",","[","-","1","]",")","next_sentence_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","next_sentence_labels",",","predictions","=","next_sentence_predictions",")","next_sentence_mean_loss","=","tf",".","metrics",".","mean","(","values","=","next_sentence_example_loss",")","return","{","\"masked_lm_accuracy\"",":","masked_lm_accuracy",",","\"masked_lm_loss\"",":","masked_lm_mean_loss",",","\"next_sentence_accuracy\"",":","next_sentence_accuracy",",","\"next_sentence_loss\"",":","next_sentence_mean_loss",",","}","eval_metrics","=","(","metric_fn",",","[","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","raise","ValueError","(","\"Only TRAIN and EVAL modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_pretraining.py#L109-L237"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_pretraining.py","language":"python","identifier":"get_masked_lm_output","parameters":"(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights)","argument_list":"","return_statement":"return (loss, per_example_loss, log_probs)","docstring":"Get loss and log probs for the masked LM.","docstring_summary":"Get loss and log probs for the masked LM.","docstring_tokens":["Get","loss","and","log","probs","for","the","masked","LM","."],"function":"def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights):\n \"\"\"Get loss and log probs for the masked LM.\"\"\"\n input_tensor = gather_indexes(input_tensor, positions)\n\n with tf.variable_scope(\"cls\/predictions\"):\n # We apply one more non-linear transformation before the output layer.\n # This matrix is not used after pre-training.\n with tf.variable_scope(\"transform\"):\n input_tensor = tf.layers.dense(\n input_tensor,\n units=bert_config.hidden_size,\n activation=modeling.get_activation(bert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n bert_config.initializer_range))\n input_tensor = modeling.layer_norm(input_tensor)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n output_bias = tf.get_variable(\n \"output_bias\",\n shape=[bert_config.vocab_size],\n initializer=tf.zeros_initializer())\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n label_ids = tf.reshape(label_ids, [-1])\n label_weights = tf.reshape(label_weights, [-1])\n\n one_hot_labels = tf.one_hot(\n label_ids, depth=bert_config.vocab_size, dtype=tf.float32)\n\n # The `positions` tensor might be zero-padded (if the sequence is too\n # short to have the maximum number of predictions). The `label_weights`\n # tensor has a value of 1.0 for every real prediction and 0.0 for the\n # padding predictions.\n per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])\n numerator = tf.reduce_sum(label_weights * per_example_loss)\n denominator = tf.reduce_sum(label_weights) + 1e-5\n loss = numerator \/ denominator\n\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_masked_lm_output","(","bert_config",",","input_tensor",",","output_weights",",","positions",",","label_ids",",","label_weights",")",":","input_tensor","=","gather_indexes","(","input_tensor",",","positions",")","with","tf",".","variable_scope","(","\"cls\/predictions\"",")",":","# We apply one more non-linear transformation before the output layer.","# This matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"transform\"",")",":","input_tensor","=","tf",".","layers",".","dense","(","input_tensor",",","units","=","bert_config",".","hidden_size",",","activation","=","modeling",".","get_activation","(","bert_config",".","hidden_act",")",",","kernel_initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","input_tensor","=","modeling",".","layer_norm","(","input_tensor",")","# The output weights are the same as the input embeddings, but there is","# an output-only bias for each token.","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","bert_config",".","vocab_size","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","matmul","(","input_tensor",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","label_ids","=","tf",".","reshape","(","label_ids",",","[","-","1","]",")","label_weights","=","tf",".","reshape","(","label_weights",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","label_ids",",","depth","=","bert_config",".","vocab_size",",","dtype","=","tf",".","float32",")","# The `positions` tensor might be zero-padded (if the sequence is too","# short to have the maximum number of predictions). The `label_weights`","# tensor has a value of 1.0 for every real prediction and 0.0 for the","# padding predictions.","per_example_loss","=","-","tf",".","reduce_sum","(","log_probs","*","one_hot_labels",",","axis","=","[","-","1","]",")","numerator","=","tf",".","reduce_sum","(","label_weights","*","per_example_loss",")","denominator","=","tf",".","reduce_sum","(","label_weights",")","+","1e-5","loss","=","numerator","\/","denominator","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_pretraining.py#L240-L282"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_pretraining.py","language":"python","identifier":"get_next_sentence_output","parameters":"(bert_config, input_tensor, labels)","argument_list":"","return_statement":"","docstring":"Get loss and log probs for the next sentence prediction.","docstring_summary":"Get loss and log probs for the next sentence prediction.","docstring_tokens":["Get","loss","and","log","probs","for","the","next","sentence","prediction","."],"function":"def get_next_sentence_output(bert_config, input_tensor, labels):\n \"\"\"Get loss and log probs for the next sentence prediction.\"\"\"\n\n # Simple binary classification. Note that 0 is \"next sentence\" and 1 is\n # \"random sentence\". This weight matrix is not used after pre-training.\n with tf.variable_scope(\"cls\/seq_relationship\"):\n output_weights = tf.get_variable(\n \"output_weights\",\n shape=[2, bert_config.hidden_size],\n initializer=modeling.create_initializer(bert_config.initializer_range))\n output_bias = tf.get_variable(\n \"output_bias\", shape=[2], initializer=tf.zeros_initializer())\n\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n labels = tf.reshape(labels, [-1])\n one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_next_sentence_output","(","bert_config",",","input_tensor",",","labels",")",":","# Simple binary classification. Note that 0 is \"next sentence\" and 1 is","# \"random sentence\". This weight matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"cls\/seq_relationship\"",")",":","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","shape","=","[","2",",","bert_config",".","hidden_size","]",",","initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","2","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","matmul","(","input_tensor",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","labels","=","tf",".","reshape","(","labels",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","2",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_pretraining.py#L285-L305"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_pretraining.py","language":"python","identifier":"gather_indexes","parameters":"(sequence_tensor, positions)","argument_list":"","return_statement":"return output_tensor","docstring":"Gathers the vectors at the specific positions over a minibatch.","docstring_summary":"Gathers the vectors at the specific positions over a minibatch.","docstring_tokens":["Gathers","the","vectors","at","the","specific","positions","over","a","minibatch","."],"function":"def gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor","function_tokens":["def","gather_indexes","(","sequence_tensor",",","positions",")",":","sequence_shape","=","modeling",".","get_shape_list","(","sequence_tensor",",","expected_rank","=","3",")","batch_size","=","sequence_shape","[","0","]","seq_length","=","sequence_shape","[","1","]","width","=","sequence_shape","[","2","]","flat_offsets","=","tf",".","reshape","(","tf",".","range","(","0",",","batch_size",",","dtype","=","tf",".","int32",")","*","seq_length",",","[","-","1",",","1","]",")","flat_positions","=","tf",".","reshape","(","positions","+","flat_offsets",",","[","-","1","]",")","flat_sequence_tensor","=","tf",".","reshape","(","sequence_tensor",",","[","batch_size","*","seq_length",",","width","]",")","output_tensor","=","tf",".","gather","(","flat_sequence_tensor",",","flat_positions",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_pretraining.py#L308-L321"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_pretraining.py","language":"python","identifier":"input_fn_builder","parameters":"(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n name_to_features = {\n \"input_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"input_mask\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"segment_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"masked_lm_positions\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_ids\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_weights\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.float32),\n \"next_sentence_labels\":\n tf.FixedLenFeature([1], tf.int64),\n }\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n if is_training:\n d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))\n d = d.repeat()\n d = d.shuffle(buffer_size=len(input_files))\n\n # `cycle_length` is the number of parallel files that get read.\n cycle_length = min(num_cpu_threads, len(input_files))\n\n # `sloppy` mode means that the interleaving is not exact. This adds\n # even more randomness to the training pipeline.\n d = d.apply(\n tf.contrib.data.parallel_interleave(\n tf.data.TFRecordDataset,\n sloppy=is_training,\n cycle_length=cycle_length))\n d = d.shuffle(buffer_size=100)\n else:\n d = tf.data.TFRecordDataset(input_files)\n # Since we evaluate for a fixed number of steps we don't want to encounter\n # out-of-range exceptions.\n d = d.repeat()\n\n # We must `drop_remainder` on training because the TPU requires fixed\n # size dimensions. For eval, we assume we are evaluating on the CPU or GPU\n # and we *don't* want to drop the remainder, otherwise we wont cover\n # every sample.\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n num_parallel_batches=num_cpu_threads,\n drop_remainder=True))\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","input_files",",","max_seq_length",",","max_predictions_per_seq",",","is_training",",","num_cpu_threads","=","4",")",":","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"masked_lm_positions\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_ids\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_weights\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","float32",")",",","\"next_sentence_labels\"",":","tf",".","FixedLenFeature","(","[","1","]",",","tf",".","int64",")",",","}","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","if","is_training",":","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","tf",".","constant","(","input_files",")",")","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","len","(","input_files",")",")","# `cycle_length` is the number of parallel files that get read.","cycle_length","=","min","(","num_cpu_threads",",","len","(","input_files",")",")","# `sloppy` mode means that the interleaving is not exact. This adds","# even more randomness to the training pipeline.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","parallel_interleave","(","tf",".","data",".","TFRecordDataset",",","sloppy","=","is_training",",","cycle_length","=","cycle_length",")",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","else",":","d","=","tf",".","data",".","TFRecordDataset","(","input_files",")","# Since we evaluate for a fixed number of steps we don't want to encounter","# out-of-range exceptions.","d","=","d",".","repeat","(",")","# We must `drop_remainder` on training because the TPU requires fixed","# size dimensions. For eval, we assume we are evaluating on the CPU or GPU","# and we *don't* want to drop the remainder, otherwise we wont cover","# every sample.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","num_parallel_batches","=","num_cpu_threads",",","drop_remainder","=","True",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_pretraining.py#L324-L388"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_pretraining.py","language":"python","identifier":"_decode_record","parameters":"(record, name_to_features)","argument_list":"","return_statement":"return example","docstring":"Decodes a record to a TensorFlow example.","docstring_summary":"Decodes a record to a TensorFlow example.","docstring_tokens":["Decodes","a","record","to","a","TensorFlow","example","."],"function":"def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example","function_tokens":["def","_decode_record","(","record",",","name_to_features",")",":","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_pretraining.py#L391-L403"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_squad.py","language":"python","identifier":"read_squad_examples","parameters":"(input_file, is_training)","argument_list":"","return_statement":"return examples","docstring":"Read a SQuAD json file into a list of SquadExample.","docstring_summary":"Read a SQuAD json file into a list of SquadExample.","docstring_tokens":["Read","a","SQuAD","json","file","into","a","list","of","SquadExample","."],"function":"def read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n\n if FLAGS.version_2_with_negative:\n is_impossible = qa[\"is_impossible\"]\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\")\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length -\n 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(\n doc_tokens[start_position:(end_position + 1)])\n cleaned_answer_text = \" \".join(\n tokenization.whitespace_tokenize(orig_answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n tf.logging.warning(\"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n\n return examples","function_tokens":["def","read_squad_examples","(","input_file",",","is_training",")",":","with","tf",".","gfile",".","Open","(","input_file",",","\"r\"",")","as","reader",":","input_data","=","json",".","load","(","reader",")","[","\"data\"","]","def","is_whitespace","(","c",")",":","if","c","==","\" \"","or","c","==","\"\\t\"","or","c","==","\"\\r\"","or","c","==","\"\\n\"","or","ord","(","c",")","==","0x202F",":","return","True","return","False","examples","=","[","]","for","entry","in","input_data",":","for","paragraph","in","entry","[","\"paragraphs\"","]",":","paragraph_text","=","paragraph","[","\"context\"","]","doc_tokens","=","[","]","char_to_word_offset","=","[","]","prev_is_whitespace","=","True","for","c","in","paragraph_text",":","if","is_whitespace","(","c",")",":","prev_is_whitespace","=","True","else",":","if","prev_is_whitespace",":","doc_tokens",".","append","(","c",")","else",":","doc_tokens","[","-","1","]","+=","c","prev_is_whitespace","=","False","char_to_word_offset",".","append","(","len","(","doc_tokens",")","-","1",")","for","qa","in","paragraph","[","\"qas\"","]",":","qas_id","=","qa","[","\"id\"","]","question_text","=","qa","[","\"question\"","]","start_position","=","None","end_position","=","None","orig_answer_text","=","None","is_impossible","=","False","if","is_training",":","if","FLAGS",".","version_2_with_negative",":","is_impossible","=","qa","[","\"is_impossible\"","]","if","(","len","(","qa","[","\"answers\"","]",")","!=","1",")","and","(","not","is_impossible",")",":","raise","ValueError","(","\"For training, each question should have exactly 1 answer.\"",")","if","not","is_impossible",":","answer","=","qa","[","\"answers\"","]","[","0","]","orig_answer_text","=","answer","[","\"text\"","]","answer_offset","=","answer","[","\"answer_start\"","]","answer_length","=","len","(","orig_answer_text",")","start_position","=","char_to_word_offset","[","answer_offset","]","end_position","=","char_to_word_offset","[","answer_offset","+","answer_length","-","1","]","# Only add answers where the text can be exactly recovered from the","# document. If this CAN'T happen it's likely due to weird Unicode","# stuff so we will just skip the example.","#","# Note that this means for training mode, every example is NOT","# guaranteed to be preserved.","actual_text","=","\" \"",".","join","(","doc_tokens","[","start_position",":","(","end_position","+","1",")","]",")","cleaned_answer_text","=","\" \"",".","join","(","tokenization",".","whitespace_tokenize","(","orig_answer_text",")",")","if","actual_text",".","find","(","cleaned_answer_text",")","==","-","1",":","tf",".","logging",".","warning","(","\"Could not find answer: '%s' vs. '%s'\"",",","actual_text",",","cleaned_answer_text",")","continue","else",":","start_position","=","-","1","end_position","=","-","1","orig_answer_text","=","\"\"","example","=","SquadExample","(","qas_id","=","qas_id",",","question_text","=","question_text",",","doc_tokens","=","doc_tokens",",","orig_answer_text","=","orig_answer_text",",","start_position","=","start_position",",","end_position","=","end_position",",","is_impossible","=","is_impossible",")","examples",".","append","(","example",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_squad.py#L227-L306"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_squad.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn)","argument_list":"","return_statement":"","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"token_to_orig_map: %s\" % \" \".join(\n [\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))\n tf.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n tf.logging.info(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\n \"answer: %s\" % (tokenization.printable_text(answer_text)))\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1","function_tokens":["def","convert_examples_to_features","(","examples",",","tokenizer",",","max_seq_length",",","doc_stride",",","max_query_length",",","is_training",",","output_fn",")",":","unique_id","=","1000000000","for","(","example_index",",","example",")","in","enumerate","(","examples",")",":","query_tokens","=","tokenizer",".","tokenize","(","example",".","question_text",")","if","len","(","query_tokens",")",">","max_query_length",":","query_tokens","=","query_tokens","[","0",":","max_query_length","]","tok_to_orig_index","=","[","]","orig_to_tok_index","=","[","]","all_doc_tokens","=","[","]","for","(","i",",","token",")","in","enumerate","(","example",".","doc_tokens",")",":","orig_to_tok_index",".","append","(","len","(","all_doc_tokens",")",")","sub_tokens","=","tokenizer",".","tokenize","(","token",")","for","sub_token","in","sub_tokens",":","tok_to_orig_index",".","append","(","i",")","all_doc_tokens",".","append","(","sub_token",")","tok_start_position","=","None","tok_end_position","=","None","if","is_training","and","example",".","is_impossible",":","tok_start_position","=","-","1","tok_end_position","=","-","1","if","is_training","and","not","example",".","is_impossible",":","tok_start_position","=","orig_to_tok_index","[","example",".","start_position","]","if","example",".","end_position","<","len","(","example",".","doc_tokens",")","-","1",":","tok_end_position","=","orig_to_tok_index","[","example",".","end_position","+","1","]","-","1","else",":","tok_end_position","=","len","(","all_doc_tokens",")","-","1","(","tok_start_position",",","tok_end_position",")","=","_improve_answer_span","(","all_doc_tokens",",","tok_start_position",",","tok_end_position",",","tokenizer",",","example",".","orig_answer_text",")","# The -3 accounts for [CLS], [SEP] and [SEP]","max_tokens_for_doc","=","max_seq_length","-","len","(","query_tokens",")","-","3","# We can have documents that are longer than the maximum sequence length.","# To deal with this we do a sliding window approach, where we take chunks","# of the up to our max length with a stride of `doc_stride`.","_DocSpan","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"DocSpan\"",",","[","\"start\"",",","\"length\"","]",")","doc_spans","=","[","]","start_offset","=","0","while","start_offset","<","len","(","all_doc_tokens",")",":","length","=","len","(","all_doc_tokens",")","-","start_offset","if","length",">","max_tokens_for_doc",":","length","=","max_tokens_for_doc","doc_spans",".","append","(","_DocSpan","(","start","=","start_offset",",","length","=","length",")",")","if","start_offset","+","length","==","len","(","all_doc_tokens",")",":","break","start_offset","+=","min","(","length",",","doc_stride",")","for","(","doc_span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","tokens","=","[","]","token_to_orig_map","=","{","}","token_is_max_context","=","{","}","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","query_tokens",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","i","in","range","(","doc_span",".","length",")",":","split_token_index","=","doc_span",".","start","+","i","token_to_orig_map","[","len","(","tokens",")","]","=","tok_to_orig_index","[","split_token_index","]","is_max_context","=","_check_is_max_context","(","doc_spans",",","doc_span_index",",","split_token_index",")","token_is_max_context","[","len","(","tokens",")","]","=","is_max_context","tokens",".","append","(","all_doc_tokens","[","split_token_index","]",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","start_position","=","None","end_position","=","None","if","is_training","and","not","example",".","is_impossible",":","# For training, if our document chunk does not contain an annotation","# we throw it out, since there is nothing to predict.","doc_start","=","doc_span",".","start","doc_end","=","doc_span",".","start","+","doc_span",".","length","-","1","out_of_span","=","False","if","not","(","tok_start_position",">=","doc_start","and","tok_end_position","<=","doc_end",")",":","out_of_span","=","True","if","out_of_span",":","start_position","=","0","end_position","=","0","else",":","doc_offset","=","len","(","query_tokens",")","+","2","start_position","=","tok_start_position","-","doc_start","+","doc_offset","end_position","=","tok_end_position","-","doc_start","+","doc_offset","if","is_training","and","example",".","is_impossible",":","start_position","=","0","end_position","=","0","if","example_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"unique_id: %s\"","%","(","unique_id",")",")","tf",".","logging",".","info","(","\"example_index: %s\"","%","(","example_index",")",")","tf",".","logging",".","info","(","\"doc_span_index: %s\"","%","(","doc_span_index",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"token_to_orig_map: %s\"","%","\" \"",".","join","(","[","\"%d:%d\"","%","(","x",",","y",")","for","(","x",",","y",")","in","six",".","iteritems","(","token_to_orig_map",")","]",")",")","tf",".","logging",".","info","(","\"token_is_max_context: %s\"","%","\" \"",".","join","(","[","\"%d:%s\"","%","(","x",",","y",")","for","(","x",",","y",")","in","six",".","iteritems","(","token_is_max_context",")","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","if","is_training","and","example",".","is_impossible",":","tf",".","logging",".","info","(","\"impossible example\"",")","if","is_training","and","not","example",".","is_impossible",":","answer_text","=","\" \"",".","join","(","tokens","[","start_position",":","(","end_position","+","1",")","]",")","tf",".","logging",".","info","(","\"start_position: %d\"","%","(","start_position",")",")","tf",".","logging",".","info","(","\"end_position: %d\"","%","(","end_position",")",")","tf",".","logging",".","info","(","\"answer: %s\"","%","(","tokenization",".","printable_text","(","answer_text",")",")",")","feature","=","InputFeatures","(","unique_id","=","unique_id",",","example_index","=","example_index",",","doc_span_index","=","doc_span_index",",","tokens","=","tokens",",","token_to_orig_map","=","token_to_orig_map",",","token_is_max_context","=","token_is_max_context",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","start_position","=","start_position",",","end_position","=","end_position",",","is_impossible","=","example",".","is_impossible",")","# Run callback","output_fn","(","feature",")","unique_id","+=","1"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_squad.py#L309-L473"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_squad.py","language":"python","identifier":"_improve_answer_span","parameters":"(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text)","argument_list":"","return_statement":"return (input_start, input_end)","docstring":"Returns tokenized answer spans that better match the annotated answer.","docstring_summary":"Returns tokenized answer spans that better match the annotated answer.","docstring_tokens":["Returns","tokenized","answer","spans","that","better","match","the","annotated","answer","."],"function":"def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)","function_tokens":["def","_improve_answer_span","(","doc_tokens",",","input_start",",","input_end",",","tokenizer",",","orig_answer_text",")",":","# The SQuAD annotations are character based. We first project them to","# whitespace-tokenized words. But then after WordPiece tokenization, we can","# often find a \"better match\". For example:","#","# Question: What year was John Smith born?","# Context: The leader was John Smith (1895-1943).","# Answer: 1895","#","# The original whitespace-tokenized answer will be \"(1895-1943).\". However","# after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match","# the exact answer, 1895.","#","# However, this is not always possible. Consider the following:","#","# Question: What country is the top exporter of electornics?","# Context: The Japanese electronics industry is the lagest in the world.","# Answer: Japan","#","# In this case, the annotator chose \"Japan\" as a character sub-span of","# the word \"Japanese\". Since our WordPiece tokenizer does not split","# \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare","# in SQuAD, but does happen.","tok_answer_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_answer_text",")",")","for","new_start","in","range","(","input_start",",","input_end","+","1",")",":","for","new_end","in","range","(","input_end",",","new_start","-","1",",","-","1",")",":","text_span","=","\" \"",".","join","(","doc_tokens","[","new_start",":","(","new_end","+","1",")","]",")","if","text_span","==","tok_answer_text",":","return","(","new_start",",","new_end",")","return","(","input_start",",","input_end",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_squad.py#L476-L510"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_squad.py","language":"python","identifier":"_check_is_max_context","parameters":"(doc_spans, cur_span_index, position)","argument_list":"","return_statement":"return cur_span_index == best_span_index","docstring":"Check if this is the 'max context' doc span for the token.","docstring_summary":"Check if this is the 'max context' doc span for the token.","docstring_tokens":["Check","if","this","is","the","max","context","doc","span","for","the","token","."],"function":"def _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index","function_tokens":["def","_check_is_max_context","(","doc_spans",",","cur_span_index",",","position",")",":","# Because of the sliding window approach taken to scoring documents, a single","# token can appear in multiple documents. E.g.","# Doc: the man went to the store and bought a gallon of milk","# Span A: the man went to the","# Span B: to the store and bought","# Span C: and bought a gallon of","# ...","#","# Now the word 'bought' will have two scores from spans B and C. We only","# want to consider the score with \"maximum context\", which we define as","# the *minimum* of its left and right context (the *sum* of left and","# right context will always be the same, of course).","#","# In the example the maximum context for 'bought' would be span C since","# it has 1 left context and 3 right context, while span B has 4 left context","# and 0 right context.","best_score","=","None","best_span_index","=","None","for","(","span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","end","=","doc_span",".","start","+","doc_span",".","length","-","1","if","position","<","doc_span",".","start",":","continue","if","position",">","end",":","continue","num_left_context","=","position","-","doc_span",".","start","num_right_context","=","end","-","position","score","=","min","(","num_left_context",",","num_right_context",")","+","0.01","*","doc_span",".","length","if","best_score","is","None","or","score",">","best_score",":","best_score","=","score","best_span_index","=","span_index","return","cur_span_index","==","best_span_index"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_squad.py#L513-L547"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_squad.py","language":"python","identifier":"create_model","parameters":"(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return (start_logits, end_logits)","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n final_hidden = model.get_sequence_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n hidden_size = final_hidden_shape[2]\n\n output_weights = tf.get_variable(\n \"cls\/squad\/output_weights\", [2, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"cls\/squad\/output_bias\", [2], initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden,\n [batch_size * seq_length, hidden_size])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(logits, [2, 0, 1])\n\n unstacked_logits = tf.unstack(logits, axis=0)\n\n (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n return (start_logits, end_logits)","function_tokens":["def","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","use_one_hot_embeddings",")",":","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","final_hidden","=","model",".","get_sequence_output","(",")","final_hidden_shape","=","modeling",".","get_shape_list","(","final_hidden",",","expected_rank","=","3",")","batch_size","=","final_hidden_shape","[","0","]","seq_length","=","final_hidden_shape","[","1","]","hidden_size","=","final_hidden_shape","[","2","]","output_weights","=","tf",".","get_variable","(","\"cls\/squad\/output_weights\"",",","[","2",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"cls\/squad\/output_bias\"",",","[","2","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","final_hidden_matrix","=","tf",".","reshape","(","final_hidden",",","[","batch_size","*","seq_length",",","hidden_size","]",")","logits","=","tf",".","matmul","(","final_hidden_matrix",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","logits","=","tf",".","reshape","(","logits",",","[","batch_size",",","seq_length",",","2","]",")","logits","=","tf",".","transpose","(","logits",",","[","2",",","0",",","1","]",")","unstacked_logits","=","tf",".","unstack","(","logits",",","axis","=","0",")","(","start_logits",",","end_logits",")","=","(","unstacked_logits","[","0","]",",","unstacked_logits","[","1","]",")","return","(","start_logits",",","end_logits",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_squad.py#L550-L587"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_squad.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (start_logits, end_logits) = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n start_positions = features[\"start_positions\"]\n end_positions = features[\"end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) \/ 2.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"start_logits\": start_logits,\n \"end_logits\": end_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","unique_ids","=","features","[","\"unique_ids\"","]","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","start_logits",",","end_logits",")","=","create_model","(","bert_config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","seq_length","=","modeling",".","get_shape_list","(","input_ids",")","[","1","]","def","compute_loss","(","logits",",","positions",")",":","one_hot_positions","=","tf",".","one_hot","(","positions",",","depth","=","seq_length",",","dtype","=","tf",".","float32",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","loss","=","-","tf",".","reduce_mean","(","tf",".","reduce_sum","(","one_hot_positions","*","log_probs",",","axis","=","-","1",")",")","return","loss","start_positions","=","features","[","\"start_positions\"","]","end_positions","=","features","[","\"end_positions\"","]","start_loss","=","compute_loss","(","start_logits",",","start_positions",")","end_loss","=","compute_loss","(","end_logits",",","end_positions",")","total_loss","=","(","start_loss","+","end_loss",")","\/","2.0","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","PREDICT",":","predictions","=","{","\"unique_ids\"",":","unique_ids",",","\"start_logits\"",":","start_logits",",","\"end_logits\"",":","end_logits",",","}","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","predictions",",","scaffold_fn","=","scaffold_fn",")","else",":","raise","ValueError","(","\"Only TRAIN and PREDICT modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_squad.py#L590-L684"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_squad.py","language":"python","identifier":"input_fn_builder","parameters":"(input_file, seq_length, is_training, drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(input_file, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","input_file",",","seq_length",",","is_training",",","drop_remainder",")",":","name_to_features","=","{","\"unique_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","}","if","is_training",":","name_to_features","[","\"start_positions\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")","name_to_features","[","\"end_positions\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","d","=","tf",".","data",".","TFRecordDataset","(","input_file",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_squad.py#L687-L734"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_squad.py","language":"python","identifier":"write_predictions","parameters":"(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file)","argument_list":"","return_statement":"","docstring":"Write final predictions to the json file and log-odds of null if needed.","docstring_summary":"Write final predictions to the json file and log-odds of null if needed.","docstring_tokens":["Write","final","predictions","to","the","json","file","and","log","-","odds","of","null","if","needed","."],"function":"def write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if FLAGS.version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n\n if FLAGS.version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # if we didn't inlude the empty option in the n-best, inlcude it\n if FLAGS.version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\", start_logit=null_start_logit,\n end_logit=null_end_logit))\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not FLAGS.version_2_with_negative:\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (\n best_non_null_entry.end_logit)\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > FLAGS.null_score_diff_threshold:\n all_predictions[example.qas_id] = \"\"\n else:\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n if FLAGS.version_2_with_negative:\n with tf.gfile.GFile(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")","function_tokens":["def","write_predictions","(","all_examples",",","all_features",",","all_results",",","n_best_size",",","max_answer_length",",","do_lower_case",",","output_prediction_file",",","output_nbest_file",",","output_null_log_odds_file",")",":","tf",".","logging",".","info","(","\"Writing predictions to: %s\"","%","(","output_prediction_file",")",")","tf",".","logging",".","info","(","\"Writing nbest to: %s\"","%","(","output_nbest_file",")",")","example_index_to_features","=","collections",".","defaultdict","(","list",")","for","feature","in","all_features",":","example_index_to_features","[","feature",".","example_index","]",".","append","(","feature",")","unique_id_to_result","=","{","}","for","result","in","all_results",":","unique_id_to_result","[","result",".","unique_id","]","=","result","_PrelimPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"PrelimPrediction\"",",","[","\"feature_index\"",",","\"start_index\"",",","\"end_index\"",",","\"start_logit\"",",","\"end_logit\"","]",")","all_predictions","=","collections",".","OrderedDict","(",")","all_nbest_json","=","collections",".","OrderedDict","(",")","scores_diff_json","=","collections",".","OrderedDict","(",")","for","(","example_index",",","example",")","in","enumerate","(","all_examples",")",":","features","=","example_index_to_features","[","example_index","]","prelim_predictions","=","[","]","# keep track of the minimum score of null start+end of position 0","score_null","=","1000000","# large and positive","min_null_feature_index","=","0","# the paragraph slice with min mull score","null_start_logit","=","0","# the start logit at the slice with min null score","null_end_logit","=","0","# the end logit at the slice with min null score","for","(","feature_index",",","feature",")","in","enumerate","(","features",")",":","result","=","unique_id_to_result","[","feature",".","unique_id","]","start_indexes","=","_get_best_indexes","(","result",".","start_logits",",","n_best_size",")","end_indexes","=","_get_best_indexes","(","result",".","end_logits",",","n_best_size",")","# if we could have irrelevant answers, get the min score of irrelevant","if","FLAGS",".","version_2_with_negative",":","feature_null_score","=","result",".","start_logits","[","0","]","+","result",".","end_logits","[","0","]","if","feature_null_score","<","score_null",":","score_null","=","feature_null_score","min_null_feature_index","=","feature_index","null_start_logit","=","result",".","start_logits","[","0","]","null_end_logit","=","result",".","end_logits","[","0","]","for","start_index","in","start_indexes",":","for","end_index","in","end_indexes",":","# We could hypothetically create invalid predictions, e.g., predict","# that the start of the span is in the question. We throw out all","# invalid predictions.","if","start_index",">=","len","(","feature",".","tokens",")",":","continue","if","end_index",">=","len","(","feature",".","tokens",")",":","continue","if","start_index","not","in","feature",".","token_to_orig_map",":","continue","if","end_index","not","in","feature",".","token_to_orig_map",":","continue","if","not","feature",".","token_is_max_context",".","get","(","start_index",",","False",")",":","continue","if","end_index","<","start_index",":","continue","length","=","end_index","-","start_index","+","1","if","length",">","max_answer_length",":","continue","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","feature_index",",","start_index","=","start_index",",","end_index","=","end_index",",","start_logit","=","result",".","start_logits","[","start_index","]",",","end_logit","=","result",".","end_logits","[","end_index","]",")",")","if","FLAGS",".","version_2_with_negative",":","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","min_null_feature_index",",","start_index","=","0",",","end_index","=","0",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","prelim_predictions","=","sorted","(","prelim_predictions",",","key","=","lambda","x",":","(","x",".","start_logit","+","x",".","end_logit",")",",","reverse","=","True",")","_NbestPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"NbestPrediction\"",",","[","\"text\"",",","\"start_logit\"",",","\"end_logit\"","]",")","seen_predictions","=","{","}","nbest","=","[","]","for","pred","in","prelim_predictions",":","if","len","(","nbest",")",">=","n_best_size",":","break","feature","=","features","[","pred",".","feature_index","]","if","pred",".","start_index",">","0",":","# this is a non-null prediction","tok_tokens","=","feature",".","tokens","[","pred",".","start_index",":","(","pred",".","end_index","+","1",")","]","orig_doc_start","=","feature",".","token_to_orig_map","[","pred",".","start_index","]","orig_doc_end","=","feature",".","token_to_orig_map","[","pred",".","end_index","]","orig_tokens","=","example",".","doc_tokens","[","orig_doc_start",":","(","orig_doc_end","+","1",")","]","tok_text","=","\" \"",".","join","(","tok_tokens",")","# De-tokenize WordPieces that have been split off.","tok_text","=","tok_text",".","replace","(","\" ##\"",",","\"\"",")","tok_text","=","tok_text",".","replace","(","\"##\"",",","\"\"",")","# Clean whitespace","tok_text","=","tok_text",".","strip","(",")","tok_text","=","\" \"",".","join","(","tok_text",".","split","(",")",")","orig_text","=","\" \"",".","join","(","orig_tokens",")","final_text","=","get_final_text","(","tok_text",",","orig_text",",","do_lower_case",")","if","final_text","in","seen_predictions",":","continue","seen_predictions","[","final_text","]","=","True","else",":","final_text","=","\"\"","seen_predictions","[","final_text","]","=","True","nbest",".","append","(","_NbestPrediction","(","text","=","final_text",",","start_logit","=","pred",".","start_logit",",","end_logit","=","pred",".","end_logit",")",")","# if we didn't inlude the empty option in the n-best, inlcude it","if","FLAGS",".","version_2_with_negative",":","if","\"\"","not","in","seen_predictions",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"\"",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","# In very rare edge cases we could have no valid predictions. So we","# just create a nonce prediction in this case to avoid failure.","if","not","nbest",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"empty\"",",","start_logit","=","0.0",",","end_logit","=","0.0",")",")","assert","len","(","nbest",")",">=","1","total_scores","=","[","]","best_non_null_entry","=","None","for","entry","in","nbest",":","total_scores",".","append","(","entry",".","start_logit","+","entry",".","end_logit",")","if","not","best_non_null_entry",":","if","entry",".","text",":","best_non_null_entry","=","entry","probs","=","_compute_softmax","(","total_scores",")","nbest_json","=","[","]","for","(","i",",","entry",")","in","enumerate","(","nbest",")",":","output","=","collections",".","OrderedDict","(",")","output","[","\"text\"","]","=","entry",".","text","output","[","\"probability\"","]","=","probs","[","i","]","output","[","\"start_logit\"","]","=","entry",".","start_logit","output","[","\"end_logit\"","]","=","entry",".","end_logit","nbest_json",".","append","(","output",")","assert","len","(","nbest_json",")",">=","1","if","not","FLAGS",".","version_2_with_negative",":","all_predictions","[","example",".","qas_id","]","=","nbest_json","[","0","]","[","\"text\"","]","else",":","# predict \"\" iff the null score - the score of best non-null > threshold","score_diff","=","score_null","-","best_non_null_entry",".","start_logit","-","(","best_non_null_entry",".","end_logit",")","scores_diff_json","[","example",".","qas_id","]","=","score_diff","if","score_diff",">","FLAGS",".","null_score_diff_threshold",":","all_predictions","[","example",".","qas_id","]","=","\"\"","else",":","all_predictions","[","example",".","qas_id","]","=","best_non_null_entry",".","text","all_nbest_json","[","example",".","qas_id","]","=","nbest_json","with","tf",".","gfile",".","GFile","(","output_prediction_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_predictions",",","indent","=","4",")","+","\"\\n\"",")","with","tf",".","gfile",".","GFile","(","output_nbest_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_nbest_json",",","indent","=","4",")","+","\"\\n\"",")","if","FLAGS",".","version_2_with_negative",":","with","tf",".","gfile",".","GFile","(","output_null_log_odds_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","scores_diff_json",",","indent","=","4",")","+","\"\\n\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_squad.py#L741-L924"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_squad.py","language":"python","identifier":"get_final_text","parameters":"(pred_text, orig_text, do_lower_case)","argument_list":"","return_statement":"return output_text","docstring":"Project the tokenized prediction back to the original text.","docstring_summary":"Project the tokenized prediction back to the original text.","docstring_tokens":["Project","the","tokenized","prediction","back","to","the","original","text","."],"function":"def get_final_text(pred_text, orig_text, do_lower_case):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping\/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if FLAGS.verbose_logging:\n tf.logging.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if FLAGS.verbose_logging:\n tf.logging.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text","function_tokens":["def","get_final_text","(","pred_text",",","orig_text",",","do_lower_case",")",":","# When we created the data, we kept track of the alignment between original","# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So","# now `orig_text` contains the span of our original text corresponding to the","# span that we predicted.","#","# However, `orig_text` may contain extra characters that we don't want in","# our prediction.","#","# For example, let's say:","# pred_text = steve smith","# orig_text = Steve Smith's","#","# We don't want to return `orig_text` because it contains the extra \"'s\".","#","# We don't want to return `pred_text` because it's already been normalized","# (the SQuAD eval script also does punctuation stripping\/lower casing but","# our tokenizer does additional normalization like stripping accent","# characters).","#","# What we really want to return is \"Steve Smith\".","#","# Therefore, we have to apply a semi-complicated alignment heruistic between","# `pred_text` and `orig_text` to get a character-to-charcter alignment. This","# can fail in certain cases in which case we just return `orig_text`.","def","_strip_spaces","(","text",")",":","ns_chars","=","[","]","ns_to_s_map","=","collections",".","OrderedDict","(",")","for","(","i",",","c",")","in","enumerate","(","text",")",":","if","c","==","\" \"",":","continue","ns_to_s_map","[","len","(","ns_chars",")","]","=","i","ns_chars",".","append","(","c",")","ns_text","=","\"\"",".","join","(","ns_chars",")","return","(","ns_text",",","ns_to_s_map",")","# We first tokenize `orig_text`, strip whitespace from the result","# and `pred_text`, and check if they are the same length. If they are","# NOT the same length, the heuristic has failed. If they are the same","# length, we assume the characters are one-to-one aligned.","tokenizer","=","tokenization",".","BasicTokenizer","(","do_lower_case","=","do_lower_case",")","tok_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_text",")",")","start_position","=","tok_text",".","find","(","pred_text",")","if","start_position","==","-","1",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Unable to find text: '%s' in '%s'\"","%","(","pred_text",",","orig_text",")",")","return","orig_text","end_position","=","start_position","+","len","(","pred_text",")","-","1","(","orig_ns_text",",","orig_ns_to_s_map",")","=","_strip_spaces","(","orig_text",")","(","tok_ns_text",",","tok_ns_to_s_map",")","=","_strip_spaces","(","tok_text",")","if","len","(","orig_ns_text",")","!=","len","(","tok_ns_text",")",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Length not equal after stripping spaces: '%s' vs '%s'\"",",","orig_ns_text",",","tok_ns_text",")","return","orig_text","# We then project the characters in `pred_text` back to `orig_text` using","# the character-to-character alignment.","tok_s_to_ns_map","=","{","}","for","(","i",",","tok_index",")","in","six",".","iteritems","(","tok_ns_to_s_map",")",":","tok_s_to_ns_map","[","tok_index","]","=","i","orig_start_position","=","None","if","start_position","in","tok_s_to_ns_map",":","ns_start_position","=","tok_s_to_ns_map","[","start_position","]","if","ns_start_position","in","orig_ns_to_s_map",":","orig_start_position","=","orig_ns_to_s_map","[","ns_start_position","]","if","orig_start_position","is","None",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Couldn't map start position\"",")","return","orig_text","orig_end_position","=","None","if","end_position","in","tok_s_to_ns_map",":","ns_end_position","=","tok_s_to_ns_map","[","end_position","]","if","ns_end_position","in","orig_ns_to_s_map",":","orig_end_position","=","orig_ns_to_s_map","[","ns_end_position","]","if","orig_end_position","is","None",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Couldn't map end position\"",")","return","orig_text","output_text","=","orig_text","[","orig_start_position",":","(","orig_end_position","+","1",")","]","return","output_text"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_squad.py#L927-L1020"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_squad.py","language":"python","identifier":"_get_best_indexes","parameters":"(logits, n_best_size)","argument_list":"","return_statement":"return best_indexes","docstring":"Get the n-best logits from a list.","docstring_summary":"Get the n-best logits from a list.","docstring_tokens":["Get","the","n","-","best","logits","from","a","list","."],"function":"def _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes","function_tokens":["def","_get_best_indexes","(","logits",",","n_best_size",")",":","index_and_score","=","sorted","(","enumerate","(","logits",")",",","key","=","lambda","x",":","x","[","1","]",",","reverse","=","True",")","best_indexes","=","[","]","for","i","in","range","(","len","(","index_and_score",")",")",":","if","i",">=","n_best_size",":","break","best_indexes",".","append","(","index_and_score","[","i","]","[","0","]",")","return","best_indexes"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_squad.py#L1023-L1032"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_squad.py","language":"python","identifier":"_compute_softmax","parameters":"(scores)","argument_list":"","return_statement":"return probs","docstring":"Compute softmax probability over raw logits.","docstring_summary":"Compute softmax probability over raw logits.","docstring_tokens":["Compute","softmax","probability","over","raw","logits","."],"function":"def _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score \/ total_sum)\n return probs","function_tokens":["def","_compute_softmax","(","scores",")",":","if","not","scores",":","return","[","]","max_score","=","None","for","score","in","scores",":","if","max_score","is","None","or","score",">","max_score",":","max_score","=","score","exp_scores","=","[","]","total_sum","=","0.0","for","score","in","scores",":","x","=","math",".","exp","(","score","-","max_score",")","exp_scores",".","append","(","x",")","total_sum","+=","x","probs","=","[","]","for","score","in","exp_scores",":","probs",".","append","(","score","\/","total_sum",")","return","probs"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_squad.py#L1035-L1055"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_squad.py","language":"python","identifier":"validate_flags_or_throw","parameters":"(bert_config)","argument_list":"","return_statement":"","docstring":"Validate the input FLAGS or throw an exception.","docstring_summary":"Validate the input FLAGS or throw an exception.","docstring_tokens":["Validate","the","input","FLAGS","or","throw","an","exception","."],"function":"def validate_flags_or_throw(bert_config):\n \"\"\"Validate the input FLAGS or throw an exception.\"\"\"\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_predict` must be True.\")\n\n if FLAGS.do_train:\n if not FLAGS.train_file:\n raise ValueError(\n \"If `do_train` is True, then `train_file` must be specified.\")\n if FLAGS.do_predict:\n if not FLAGS.predict_file:\n raise ValueError(\n \"If `do_predict` is True, then `predict_file` must be specified.\")\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:\n raise ValueError(\n \"The max_seq_length (%d) must be greater than max_query_length \"\n \"(%d) + 3\" % (FLAGS.max_seq_length, FLAGS.max_query_length))","function_tokens":["def","validate_flags_or_throw","(","bert_config",")",":","tokenization",".","validate_case_matches_checkpoint","(","FLAGS",".","do_lower_case",",","FLAGS",".","init_checkpoint",")","if","not","FLAGS",".","do_train","and","not","FLAGS",".","do_predict",":","raise","ValueError","(","\"At least one of `do_train` or `do_predict` must be True.\"",")","if","FLAGS",".","do_train",":","if","not","FLAGS",".","train_file",":","raise","ValueError","(","\"If `do_train` is True, then `train_file` must be specified.\"",")","if","FLAGS",".","do_predict",":","if","not","FLAGS",".","predict_file",":","raise","ValueError","(","\"If `do_predict` is True, then `predict_file` must be specified.\"",")","if","FLAGS",".","max_seq_length",">","bert_config",".","max_position_embeddings",":","raise","ValueError","(","\"Cannot use sequence length %d because the BERT model \"","\"was only trained up to sequence length %d\"","%","(","FLAGS",".","max_seq_length",",","bert_config",".","max_position_embeddings",")",")","if","FLAGS",".","max_seq_length","<=","FLAGS",".","max_query_length","+","3",":","raise","ValueError","(","\"The max_seq_length (%d) must be greater than max_query_length \"","\"(%d) + 3\"","%","(","FLAGS",".","max_seq_length",",","FLAGS",".","max_query_length",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_squad.py#L1097-L1123"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_squad.py","language":"python","identifier":"FeatureWriter.process_feature","parameters":"(self, feature)","argument_list":"","return_statement":"","docstring":"Write a InputFeature to the TFRecordWriter as a tf.train.Example.","docstring_summary":"Write a InputFeature to the TFRecordWriter as a tf.train.Example.","docstring_tokens":["Write","a","InputFeature","to","the","TFRecordWriter","as","a","tf",".","train",".","Example","."],"function":"def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())","function_tokens":["def","process_feature","(","self",",","feature",")",":","self",".","num_features","+=","1","def","create_int_feature","(","values",")",":","feature","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","feature","features","=","collections",".","OrderedDict","(",")","features","[","\"unique_ids\"","]","=","create_int_feature","(","[","feature",".","unique_id","]",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","if","self",".","is_training",":","features","[","\"start_positions\"","]","=","create_int_feature","(","[","feature",".","start_position","]",")","features","[","\"end_positions\"","]","=","create_int_feature","(","[","feature",".","end_position","]",")","impossible","=","0","if","feature",".","is_impossible",":","impossible","=","1","features","[","\"is_impossible\"","]","=","create_int_feature","(","[","impossible","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","self",".","_writer",".","write","(","tf_example",".","SerializeToString","(",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_squad.py#L1067-L1091"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"validate_case_matches_checkpoint","parameters":"(do_lower_case, init_checkpoint)","argument_list":"","return_statement":"","docstring":"Checks whether the casing config is consistent with the checkpoint name.","docstring_summary":"Checks whether the casing config is consistent with the checkpoint name.","docstring_tokens":["Checks","whether","the","casing","config","is","consistent","with","the","checkpoint","name","."],"function":"def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):\n \"\"\"Checks whether the casing config is consistent with the checkpoint name.\"\"\"\n\n # The casing has to be passed in by the user and there is no explicit check\n # as to whether it matches the checkpoint. The casing information probably\n # should have been stored in the bert_config.json file, but it's not, so\n # we have to heuristically detect it to validate.\n\n if not init_checkpoint:\n return\n\n m = re.match(\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\", init_checkpoint)\n if m is None:\n return\n\n model_name = m.group(1)\n\n lower_models = [\n \"uncased_L-24_H-1024_A-16\", \"uncased_L-12_H-768_A-12\",\n \"multilingual_L-12_H-768_A-12\", \"chinese_L-12_H-768_A-12\"\n ]\n\n cased_models = [\n \"cased_L-12_H-768_A-12\", \"cased_L-24_H-1024_A-16\",\n \"multi_cased_L-12_H-768_A-12\"\n ]\n\n is_bad_config = False\n if model_name in lower_models and not do_lower_case:\n is_bad_config = True\n actual_flag = \"False\"\n case_name = \"lowercased\"\n opposite_flag = \"True\"\n\n if model_name in cased_models and do_lower_case:\n is_bad_config = True\n actual_flag = \"True\"\n case_name = \"cased\"\n opposite_flag = \"False\"\n\n if is_bad_config:\n raise ValueError(\n \"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"\n \"However, `%s` seems to be a %s model, so you \"\n \"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"\n \"how the model was pre-training. If this error is wrong, please \"\n \"just comment out this check.\" % (actual_flag, init_checkpoint,\n model_name, case_name, opposite_flag))","function_tokens":["def","validate_case_matches_checkpoint","(","do_lower_case",",","init_checkpoint",")",":","# The casing has to be passed in by the user and there is no explicit check","# as to whether it matches the checkpoint. The casing information probably","# should have been stored in the bert_config.json file, but it's not, so","# we have to heuristically detect it to validate.","if","not","init_checkpoint",":","return","m","=","re",".","match","(","\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\"",",","init_checkpoint",")","if","m","is","None",":","return","model_name","=","m",".","group","(","1",")","lower_models","=","[","\"uncased_L-24_H-1024_A-16\"",",","\"uncased_L-12_H-768_A-12\"",",","\"multilingual_L-12_H-768_A-12\"",",","\"chinese_L-12_H-768_A-12\"","]","cased_models","=","[","\"cased_L-12_H-768_A-12\"",",","\"cased_L-24_H-1024_A-16\"",",","\"multi_cased_L-12_H-768_A-12\"","]","is_bad_config","=","False","if","model_name","in","lower_models","and","not","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"False\"","case_name","=","\"lowercased\"","opposite_flag","=","\"True\"","if","model_name","in","cased_models","and","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"True\"","case_name","=","\"cased\"","opposite_flag","=","\"False\"","if","is_bad_config",":","raise","ValueError","(","\"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"","\"However, `%s` seems to be a %s model, so you \"","\"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"","\"how the model was pre-training. If this error is wrong, please \"","\"just comment out this check.\"","%","(","actual_flag",",","init_checkpoint",",","model_name",",","case_name",",","opposite_flag",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L28-L75"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"convert_to_unicode","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_summary":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_tokens":["Converts","text","to","Unicode","(","if","it","s","not","already",")","assuming","utf","-","8","input","."],"function":"def convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","convert_to_unicode","(","text",")",":","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","elif","isinstance","(","text",",","unicode",")",":","return","text","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L78-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"printable_text","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_summary":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_tokens":["Returns","text","encoded","in","a","way","suitable","for","print","or","tf",".","logging","."],"function":"def printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","printable_text","(","text",")",":","# These functions want `str` for both Python2 and Python3, but in one case","# it's a Unicode string and in the other it's a byte string.","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","unicode",")",":","return","text",".","encode","(","\"utf-8\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L98-L118"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"load_vocab","parameters":"(vocab_file)","argument_list":"","return_statement":"return vocab","docstring":"Loads a vocabulary file into a dictionary.","docstring_summary":"Loads a vocabulary file into a dictionary.","docstring_tokens":["Loads","a","vocabulary","file","into","a","dictionary","."],"function":"def load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab","function_tokens":["def","load_vocab","(","vocab_file",")",":","vocab","=","collections",".","OrderedDict","(",")","index","=","0","with","tf",".","gfile",".","GFile","(","vocab_file",",","\"r\"",")","as","reader",":","while","True",":","token","=","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","token",":","break","token","=","token",".","strip","(",")","vocab","[","token","]","=","index","index","+=","1","return","vocab"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L121-L133"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"convert_by_vocab","parameters":"(vocab, items)","argument_list":"","return_statement":"return output","docstring":"Converts a sequence of [tokens|ids] using the vocab.","docstring_summary":"Converts a sequence of [tokens|ids] using the vocab.","docstring_tokens":["Converts","a","sequence","of","[","tokens|ids","]","using","the","vocab","."],"function":"def convert_by_vocab(vocab, items):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n output = []\n for item in items:\n output.append(vocab[item])\n return output","function_tokens":["def","convert_by_vocab","(","vocab",",","items",")",":","output","=","[","]","for","item","in","items",":","output",".","append","(","vocab","[","item","]",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L136-L141"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"whitespace_tokenize","parameters":"(text)","argument_list":"","return_statement":"return tokens","docstring":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_summary":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_tokens":["Runs","basic","whitespace","cleaning","and","splitting","on","a","piece","of","text","."],"function":"def whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens","function_tokens":["def","whitespace_tokenize","(","text",")",":","text","=","text",".","strip","(",")","if","not","text",":","return","[","]","tokens","=","text",".","split","(",")","return","tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L152-L158"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"_is_whitespace","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a whitespace character.","docstring_summary":"Checks whether `chars` is a whitespace character.","docstring_tokens":["Checks","whether","chars","is","a","whitespace","character","."],"function":"def _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False","function_tokens":["def","_is_whitespace","(","char",")",":","# \\t, \\n, and \\r are technically contorl characters but we treat them","# as whitespace since they are generally considered as such.","if","char","==","\" \"","or","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Zs\"",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L362-L371"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"_is_control","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a control character.","docstring_summary":"Checks whether `chars` is a control character.","docstring_tokens":["Checks","whether","chars","is","a","control","character","."],"function":"def _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False","function_tokens":["def","_is_control","(","char",")",":","# These are technically control characters but we count them as whitespace","# characters.","if","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","False","cat","=","unicodedata",".","category","(","char",")","if","cat","in","(","\"Cc\"",",","\"Cf\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L374-L383"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"_is_punctuation","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a punctuation character.","docstring_summary":"Checks whether `chars` is a punctuation character.","docstring_tokens":["Checks","whether","chars","is","a","punctuation","character","."],"function":"def _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter\/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False","function_tokens":["def","_is_punctuation","(","char",")",":","cp","=","ord","(","char",")","# We treat all non-letter\/number ASCII as punctuation.","# Characters such as \"^\", \"$\", and \"`\" are not in the Unicode","# Punctuation class but we treat them as punctuation anyways, for","# consistency.","if","(","(","cp",">=","33","and","cp","<=","47",")","or","(","cp",">=","58","and","cp","<=","64",")","or","(","cp",">=","91","and","cp","<=","96",")","or","(","cp",">=","123","and","cp","<=","126",")",")",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat",".","startswith","(","\"P\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L386-L399"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"BasicTokenizer.__init__","parameters":"(self, do_lower_case=True)","argument_list":"","return_statement":"","docstring":"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.","docstring_summary":"Constructs a BasicTokenizer.","docstring_tokens":["Constructs","a","BasicTokenizer","."],"function":"def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case","function_tokens":["def","__init__","(","self",",","do_lower_case","=","True",")",":","self",".","do_lower_case","=","do_lower_case"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L188-L194"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"BasicTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text.","docstring_summary":"Tokenizes a piece of text.","docstring_tokens":["Tokenizes","a","piece","of","text","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","text","=","self",".","_clean_text","(","text",")","# This was added on November 1st, 2018 for the multilingual and Chinese","# models. This is also applied to the English models now, but it doesn't","# matter since the English models were not trained on any Chinese data","# and generally don't have any Chinese data in them (there are Chinese","# characters in the vocabulary because Wikipedia does have some Chinese","# words in the English Wikipedia.).","text","=","self",".","_tokenize_chinese_chars","(","text",")","orig_tokens","=","whitespace_tokenize","(","text",")","split_tokens","=","[","]","for","token","in","orig_tokens",":","if","self",".","do_lower_case",":","token","=","token",".","lower","(",")","token","=","self",".","_run_strip_accents","(","token",")","split_tokens",".","extend","(","self",".","_run_split_on_punc","(","token",")",")","output_tokens","=","whitespace_tokenize","(","\" \"",".","join","(","split_tokens",")",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L196-L218"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_strip_accents","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Strips accents from a piece of text.","docstring_summary":"Strips accents from a piece of text.","docstring_tokens":["Strips","accents","from","a","piece","of","text","."],"function":"def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_run_strip_accents","(","self",",","text",")",":","text","=","unicodedata",".","normalize","(","\"NFD\"",",","text",")","output","=","[","]","for","char","in","text",":","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Mn\"",":","continue","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L220-L229"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_split_on_punc","parameters":"(self, text)","argument_list":"","return_statement":"return [\"\".join(x) for x in output]","docstring":"Splits punctuation on a piece of text.","docstring_summary":"Splits punctuation on a piece of text.","docstring_tokens":["Splits","punctuation","on","a","piece","of","text","."],"function":"def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]","function_tokens":["def","_run_split_on_punc","(","self",",","text",")",":","chars","=","list","(","text",")","i","=","0","start_new_word","=","True","output","=","[","]","while","i","<","len","(","chars",")",":","char","=","chars","[","i","]","if","_is_punctuation","(","char",")",":","output",".","append","(","[","char","]",")","start_new_word","=","True","else",":","if","start_new_word",":","output",".","append","(","[","]",")","start_new_word","=","False","output","[","-","1","]",".","append","(","char",")","i","+=","1","return","[","\"\"",".","join","(","x",")","for","x","in","output","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L231-L249"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"BasicTokenizer._tokenize_chinese_chars","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Adds whitespace around any CJK character.","docstring_summary":"Adds whitespace around any CJK character.","docstring_tokens":["Adds","whitespace","around","any","CJK","character","."],"function":"def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_tokenize_chinese_chars","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","self",".","_is_chinese_char","(","cp",")",":","output",".","append","(","\" \"",")","output",".","append","(","char",")","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L251-L262"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"BasicTokenizer._is_chinese_char","parameters":"(self, cp)","argument_list":"","return_statement":"return False","docstring":"Checks whether CP is the codepoint of a CJK character.","docstring_summary":"Checks whether CP is the codepoint of a CJK character.","docstring_tokens":["Checks","whether","CP","is","the","codepoint","of","a","CJK","character","."],"function":"def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False","function_tokens":["def","_is_chinese_char","(","self",",","cp",")",":","# This defines a \"chinese character\" as anything in the CJK Unicode block:","# https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)","#","# Note that the CJK Unicode block is NOT all Japanese and Korean characters,","# despite its name. The modern Korean Hangul alphabet is a different block,","# as is Japanese Hiragana and Katakana. Those alphabets are used to write","# space-separated words, so they are not treated specially and handled","# like the all of the other languages.","if","(","(","cp",">=","0x4E00","and","cp","<=","0x9FFF",")","or","#","(","cp",">=","0x3400","and","cp","<=","0x4DBF",")","or","#","(","cp",">=","0x20000","and","cp","<=","0x2A6DF",")","or","#","(","cp",">=","0x2A700","and","cp","<=","0x2B73F",")","or","#","(","cp",">=","0x2B740","and","cp","<=","0x2B81F",")","or","#","(","cp",">=","0x2B820","and","cp","<=","0x2CEAF",")","or","(","cp",">=","0xF900","and","cp","<=","0xFAFF",")","or","#","(","cp",">=","0x2F800","and","cp","<=","0x2FA1F",")",")",":","#","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L264-L284"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"BasicTokenizer._clean_text","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Performs invalid character removal and whitespace cleanup on text.","docstring_summary":"Performs invalid character removal and whitespace cleanup on text.","docstring_tokens":["Performs","invalid","character","removal","and","whitespace","cleanup","on","text","."],"function":"def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_clean_text","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","cp","==","0","or","cp","==","0xfffd","or","_is_control","(","char",")",":","continue","if","_is_whitespace","(","char",")",":","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L286-L297"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tokenization.py","language":"python","identifier":"WordpieceTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.","docstring_summary":"Tokenizes a piece of text into its word pieces.","docstring_tokens":["Tokenizes","a","piece","of","text","into","its","word","pieces","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","output_tokens","=","[","]","for","token","in","whitespace_tokenize","(","text",")",":","chars","=","list","(","token",")","if","len","(","chars",")",">","self",".","max_input_chars_per_word",":","output_tokens",".","append","(","self",".","unk_token",")","continue","is_bad","=","False","start","=","0","sub_tokens","=","[","]","while","start","<","len","(","chars",")",":","end","=","len","(","chars",")","cur_substr","=","None","while","start","<","end",":","substr","=","\"\"",".","join","(","chars","[","start",":","end","]",")","if","start",">","0",":","substr","=","\"##\"","+","substr","if","substr","in","self",".","vocab",":","cur_substr","=","substr","break","end","-=","1","if","cur_substr","is","None",":","is_bad","=","True","break","sub_tokens",".","append","(","cur_substr",")","start","=","end","if","is_bad",":","output_tokens",".","append","(","self",".","unk_token",")","else",":","output_tokens",".","extend","(","sub_tokens",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tokenization.py#L308-L359"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tf_metrics.py","language":"python","identifier":"precision","parameters":"(labels, predictions, num_classes, pos_indices=None,\n weights=None, average='micro')","argument_list":"","return_statement":"return (pr, op)","docstring":"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","precision","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def precision(labels, predictions, num_classes, pos_indices=None,\n weights=None, average='micro'):\n \"\"\"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n pr, _, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n op, _, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (pr, op)","function_tokens":["def","precision","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","pr",",","_",",","_","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",")","op",",","_",",","_","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",")","return","(","pr",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tf_metrics.py#L15-L50"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tf_metrics.py","language":"python","identifier":"recall","parameters":"(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro')","argument_list":"","return_statement":"return (re, op)","docstring":"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","recall","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def recall(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro'):\n \"\"\"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, re, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n _, op, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (re, op)","function_tokens":["def","recall","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","_",",","re",",","_","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",")","_",",","op",",","_","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",")","return","(","re",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tf_metrics.py#L53-L88"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tf_metrics.py","language":"python","identifier":"fbeta","parameters":"(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro', beta=1)","argument_list":"","return_statement":"return (fbeta, op)","docstring":"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","fbeta","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","beta",":","int","optional","Weight","of","precision","in","harmonic","mean","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def fbeta(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro', beta=1):\n \"\"\"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, _, fbeta = metrics_from_confusion_matrix(\n cm, pos_indices, average=average, beta=beta)\n _, _, op = metrics_from_confusion_matrix(\n op, pos_indices, average=average, beta=beta)\n return (fbeta, op)","function_tokens":["def","fbeta","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",",","beta","=","1",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","_",",","_",",","fbeta","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",",","beta","=","beta",")","_",",","_",",","op","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",",","beta","=","beta",")","return","(","fbeta",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tf_metrics.py#L97-L134"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tf_metrics.py","language":"python","identifier":"safe_div","parameters":"(numerator, denominator)","argument_list":"","return_statement":"return tf.where(denominator_is_zero, zeros, numerator \/ denominator)","docstring":"Safe division, return 0 if denominator is 0","docstring_summary":"Safe division, return 0 if denominator is 0","docstring_tokens":["Safe","division","return","0","if","denominator","is","0"],"function":"def safe_div(numerator, denominator):\n \"\"\"Safe division, return 0 if denominator is 0\"\"\"\n numerator, denominator = tf.to_float(numerator), tf.to_float(denominator)\n zeros = tf.zeros_like(numerator, dtype=numerator.dtype)\n denominator_is_zero = tf.equal(denominator, zeros)\n return tf.where(denominator_is_zero, zeros, numerator \/ denominator)","function_tokens":["def","safe_div","(","numerator",",","denominator",")",":","numerator",",","denominator","=","tf",".","to_float","(","numerator",")",",","tf",".","to_float","(","denominator",")","zeros","=","tf",".","zeros_like","(","numerator",",","dtype","=","numerator",".","dtype",")","denominator_is_zero","=","tf",".","equal","(","denominator",",","zeros",")","return","tf",".","where","(","denominator_is_zero",",","zeros",",","numerator","\/","denominator",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tf_metrics.py#L137-L142"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tf_metrics.py","language":"python","identifier":"pr_re_fbeta","parameters":"(cm, pos_indices, beta=1)","argument_list":"","return_statement":"return pr, re, fbeta","docstring":"Uses a confusion matrix to compute precision, recall and fbeta","docstring_summary":"Uses a confusion matrix to compute precision, recall and fbeta","docstring_tokens":["Uses","a","confusion","matrix","to","compute","precision","recall","and","fbeta"],"function":"def pr_re_fbeta(cm, pos_indices, beta=1):\n \"\"\"Uses a confusion matrix to compute precision, recall and fbeta\"\"\"\n num_classes = cm.shape[0]\n neg_indices = [i for i in range(num_classes) if i not in pos_indices]\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, neg_indices] = 0\n diag_sum = tf.reduce_sum(tf.diag_part(cm * cm_mask))\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[:, neg_indices] = 0\n tot_pred = tf.reduce_sum(cm * cm_mask)\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, :] = 0\n tot_gold = tf.reduce_sum(cm * cm_mask)\n\n pr = safe_div(diag_sum, tot_pred)\n re = safe_div(diag_sum, tot_gold)\n fbeta = safe_div((1. + beta**2) * pr * re, beta**2 * pr + re)\n\n return pr, re, fbeta","function_tokens":["def","pr_re_fbeta","(","cm",",","pos_indices",",","beta","=","1",")",":","num_classes","=","cm",".","shape","[","0","]","neg_indices","=","[","i","for","i","in","range","(","num_classes",")","if","i","not","in","pos_indices","]","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[","neg_indices",",","neg_indices","]","=","0","diag_sum","=","tf",".","reduce_sum","(","tf",".","diag_part","(","cm","*","cm_mask",")",")","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[",":",",","neg_indices","]","=","0","tot_pred","=","tf",".","reduce_sum","(","cm","*","cm_mask",")","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[","neg_indices",",",":","]","=","0","tot_gold","=","tf",".","reduce_sum","(","cm","*","cm_mask",")","pr","=","safe_div","(","diag_sum",",","tot_pred",")","re","=","safe_div","(","diag_sum",",","tot_gold",")","fbeta","=","safe_div","(","(","1.","+","beta","**","2",")","*","pr","*","re",",","beta","**","2","*","pr","+","re",")","return","pr",",","re",",","fbeta"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tf_metrics.py#L145-L165"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/tf_metrics.py","language":"python","identifier":"metrics_from_confusion_matrix","parameters":"(cm, pos_indices=None, average='micro',\n beta=1)","argument_list":"","return_statement":"","docstring":"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'","docstring_summary":"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'","docstring_tokens":["Precision","Recall","and","F1","from","the","confusion","matrix","Parameters","----------","cm",":","tf",".","Tensor","of","type","tf",".","int32","of","shape","(","num_classes","num_classes",")","The","streaming","confusion","matrix",".","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","beta",":","int","optional","Weight","of","precision","in","harmonic","mean","average",":","str","optional","micro","macro","or","weighted"],"function":"def metrics_from_confusion_matrix(cm, pos_indices=None, average='micro',\n beta=1):\n \"\"\"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'\n \"\"\"\n num_classes = cm.shape[0]\n if pos_indices is None:\n pos_indices = [i for i in range(num_classes)]\n\n if average == 'micro':\n return pr_re_fbeta(cm, pos_indices, beta)\n elif average in {'macro', 'weighted'}:\n precisions, recalls, fbetas, n_golds = [], [], [], []\n for idx in pos_indices:\n pr, re, fbeta = pr_re_fbeta(cm, [idx], beta)\n precisions.append(pr)\n recalls.append(re)\n fbetas.append(fbeta)\n cm_mask = np.zeros([num_classes, num_classes])\n cm_mask[idx, :] = 1\n n_golds.append(tf.to_float(tf.reduce_sum(cm * cm_mask)))\n\n if average == 'macro':\n pr = tf.reduce_mean(precisions)\n re = tf.reduce_mean(recalls)\n fbeta = tf.reduce_mean(fbetas)\n return pr, re, fbeta\n if average == 'weighted':\n n_gold = tf.reduce_sum(n_golds)\n pr_sum = sum(p * n for p, n in zip(precisions, n_golds))\n pr = safe_div(pr_sum, n_gold)\n re_sum = sum(r * n for r, n in zip(recalls, n_golds))\n re = safe_div(re_sum, n_gold)\n fbeta_sum = sum(f * n for f, n in zip(fbetas, n_golds))\n fbeta = safe_div(fbeta_sum, n_gold)\n return pr, re, fbeta\n\n else:\n raise NotImplementedError()","function_tokens":["def","metrics_from_confusion_matrix","(","cm",",","pos_indices","=","None",",","average","=","'micro'",",","beta","=","1",")",":","num_classes","=","cm",".","shape","[","0","]","if","pos_indices","is","None",":","pos_indices","=","[","i","for","i","in","range","(","num_classes",")","]","if","average","==","'micro'",":","return","pr_re_fbeta","(","cm",",","pos_indices",",","beta",")","elif","average","in","{","'macro'",",","'weighted'","}",":","precisions",",","recalls",",","fbetas",",","n_golds","=","[","]",",","[","]",",","[","]",",","[","]","for","idx","in","pos_indices",":","pr",",","re",",","fbeta","=","pr_re_fbeta","(","cm",",","[","idx","]",",","beta",")","precisions",".","append","(","pr",")","recalls",".","append","(","re",")","fbetas",".","append","(","fbeta",")","cm_mask","=","np",".","zeros","(","[","num_classes",",","num_classes","]",")","cm_mask","[","idx",",",":","]","=","1","n_golds",".","append","(","tf",".","to_float","(","tf",".","reduce_sum","(","cm","*","cm_mask",")",")",")","if","average","==","'macro'",":","pr","=","tf",".","reduce_mean","(","precisions",")","re","=","tf",".","reduce_mean","(","recalls",")","fbeta","=","tf",".","reduce_mean","(","fbetas",")","return","pr",",","re",",","fbeta","if","average","==","'weighted'",":","n_gold","=","tf",".","reduce_sum","(","n_golds",")","pr_sum","=","sum","(","p","*","n","for","p",",","n","in","zip","(","precisions",",","n_golds",")",")","pr","=","safe_div","(","pr_sum",",","n_gold",")","re_sum","=","sum","(","r","*","n","for","r",",","n","in","zip","(","recalls",",","n_golds",")",")","re","=","safe_div","(","re_sum",",","n_gold",")","fbeta_sum","=","sum","(","f","*","n","for","f",",","n","in","zip","(","fbetas",",","n_golds",")",")","fbeta","=","safe_div","(","fbeta_sum",",","n_gold",")","return","pr",",","re",",","fbeta","else",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/tf_metrics.py#L168-L215"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"gelu","parameters":"(x)","argument_list":"","return_statement":"return x * cdf","docstring":"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.","docstring_summary":"Gaussian Error Linear Unit.","docstring_tokens":["Gaussian","Error","Linear","Unit","."],"function":"def gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.tanh(\n (np.sqrt(2 \/ np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf","function_tokens":["def","gelu","(","x",")",":","cdf","=","0.5","*","(","1.0","+","tf",".","tanh","(","(","np",".","sqrt","(","2","\/","np",".","pi",")","*","(","x","+","0.044715","*","tf",".","pow","(","x",",","3",")",")",")",")",")","return","x","*","cdf"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L264-L277"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"get_activation","parameters":"(activation_string)","argument_list":"","return_statement":"","docstring":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.","docstring_summary":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.","docstring_tokens":["Maps","a","string","to","a","Python","function","e",".","g",".","relu","=",">","tf",".","nn",".","relu","."],"function":"def get_activation(activation_string):\n \"\"\"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.\n \"\"\"\n\n # We assume that anything that\"s not a string is already an activation\n # function, so we just return it.\n if not isinstance(activation_string, six.string_types):\n return activation_string\n\n if not activation_string:\n return None\n\n act = activation_string.lower()\n if act == \"linear\":\n return None\n elif act == \"relu\":\n return tf.nn.relu\n elif act == \"gelu\":\n return gelu\n elif act == \"tanh\":\n return tf.tanh\n else:\n raise ValueError(\"Unsupported activation: %s\" % act)","function_tokens":["def","get_activation","(","activation_string",")",":","# We assume that anything that\"s not a string is already an activation","# function, so we just return it.","if","not","isinstance","(","activation_string",",","six",".","string_types",")",":","return","activation_string","if","not","activation_string",":","return","None","act","=","activation_string",".","lower","(",")","if","act","==","\"linear\"",":","return","None","elif","act","==","\"relu\"",":","return","tf",".","nn",".","relu","elif","act","==","\"gelu\"",":","return","gelu","elif","act","==","\"tanh\"",":","return","tf",".","tanh","else",":","raise","ValueError","(","\"Unsupported activation: %s\"","%","act",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L280-L314"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"get_assignment_map_from_checkpoint","parameters":"(tvars, init_checkpoint)","argument_list":"","return_statement":"return (assignment_map, initialized_variable_names)","docstring":"Compute the union of the current variables and checkpoint variables.","docstring_summary":"Compute the union of the current variables and checkpoint variables.","docstring_tokens":["Compute","the","union","of","the","current","variables","and","checkpoint","variables","."],"function":"def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)","function_tokens":["def","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")",":","assignment_map","=","{","}","initialized_variable_names","=","{","}","name_to_variable","=","collections",".","OrderedDict","(",")","for","var","in","tvars",":","name","=","var",".","name","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","name",")","if","m","is","not","None",":","name","=","m",".","group","(","1",")","name_to_variable","[","name","]","=","var","init_vars","=","tf",".","train",".","list_variables","(","init_checkpoint",")","assignment_map","=","collections",".","OrderedDict","(",")","for","x","in","init_vars",":","(","name",",","var",")","=","(","x","[","0","]",",","x","[","1","]",")","if","name","not","in","name_to_variable",":","continue","assignment_map","[","name","]","=","name","initialized_variable_names","[","name","]","=","1","initialized_variable_names","[","name","+","\":0\"","]","=","1","return","(","assignment_map",",","initialized_variable_names",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L317-L341"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"dropout","parameters":"(input_tensor, dropout_prob)","argument_list":"","return_statement":"return output","docstring":"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.","docstring_summary":"Perform dropout.","docstring_tokens":["Perform","dropout","."],"function":"def dropout(input_tensor, dropout_prob):\n \"\"\"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.\n \"\"\"\n if dropout_prob is None or dropout_prob == 0.0:\n return input_tensor\n\n output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)\n return output","function_tokens":["def","dropout","(","input_tensor",",","dropout_prob",")",":","if","dropout_prob","is","None","or","dropout_prob","==","0.0",":","return","input_tensor","output","=","tf",".","nn",".","dropout","(","input_tensor",",","1.0","-","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L344-L359"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"layer_norm","parameters":"(input_tensor, name=None)","argument_list":"","return_statement":"return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","docstring":"Run layer normalization on the last dimension of the tensor.","docstring_summary":"Run layer normalization on the last dimension of the tensor.","docstring_tokens":["Run","layer","normalization","on","the","last","dimension","of","the","tensor","."],"function":"def layer_norm(input_tensor, name=None):\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\n return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","function_tokens":["def","layer_norm","(","input_tensor",",","name","=","None",")",":","return","tf",".","contrib",".","layers",".","layer_norm","(","inputs","=","input_tensor",",","begin_norm_axis","=","-","1",",","begin_params_axis","=","-","1",",","scope","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L362-L365"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"layer_norm_and_dropout","parameters":"(input_tensor, dropout_prob, name=None)","argument_list":"","return_statement":"return output_tensor","docstring":"Runs layer normalization followed by dropout.","docstring_summary":"Runs layer normalization followed by dropout.","docstring_tokens":["Runs","layer","normalization","followed","by","dropout","."],"function":"def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):\n \"\"\"Runs layer normalization followed by dropout.\"\"\"\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob)\n return output_tensor","function_tokens":["def","layer_norm_and_dropout","(","input_tensor",",","dropout_prob",",","name","=","None",")",":","output_tensor","=","layer_norm","(","input_tensor",",","name",")","output_tensor","=","dropout","(","output_tensor",",","dropout_prob",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L368-L372"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"create_initializer","parameters":"(initializer_range=0.02)","argument_list":"","return_statement":"return tf.truncated_normal_initializer(stddev=initializer_range)","docstring":"Creates a `truncated_normal_initializer` with the given range.","docstring_summary":"Creates a `truncated_normal_initializer` with the given range.","docstring_tokens":["Creates","a","truncated_normal_initializer","with","the","given","range","."],"function":"def create_initializer(initializer_range=0.02):\n \"\"\"Creates a `truncated_normal_initializer` with the given range.\"\"\"\n return tf.truncated_normal_initializer(stddev=initializer_range)","function_tokens":["def","create_initializer","(","initializer_range","=","0.02",")",":","return","tf",".","truncated_normal_initializer","(","stddev","=","initializer_range",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L375-L377"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"embedding_lookup","parameters":"(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False)","argument_list":"","return_statement":"return (output, embedding_table)","docstring":"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].","docstring_summary":"Looks up words embeddings for id tensor.","docstring_tokens":["Looks","up","words","embeddings","for","id","tensor","."],"function":"def embedding_lookup(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False):\n \"\"\"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].\n \"\"\"\n # This function assumes that the input is of shape [batch_size, seq_length,\n # num_inputs].\n #\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\n # reshape to [batch_size, seq_length, 1].\n if input_ids.shape.ndims == 2:\n input_ids = tf.expand_dims(input_ids, axis=[-1])\n\n embedding_table = tf.get_variable(\n name=word_embedding_name,\n shape=[vocab_size, embedding_size],\n initializer=create_initializer(initializer_range))\n\n flat_input_ids = tf.reshape(input_ids, [-1])\n if use_one_hot_embeddings:\n one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)\n output = tf.matmul(one_hot_input_ids, embedding_table)\n else:\n output = tf.gather(embedding_table, flat_input_ids)\n\n input_shape = get_shape_list(input_ids)\n\n output = tf.reshape(output,\n input_shape[0:-1] + [input_shape[-1] * embedding_size])\n return (output, embedding_table)","function_tokens":["def","embedding_lookup","(","input_ids",",","vocab_size",",","embedding_size","=","128",",","initializer_range","=","0.02",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","False",")",":","# This function assumes that the input is of shape [batch_size, seq_length,","# num_inputs].","#","# If the input is a 2D tensor of shape [batch_size, seq_length], we","# reshape to [batch_size, seq_length, 1].","if","input_ids",".","shape",".","ndims","==","2",":","input_ids","=","tf",".","expand_dims","(","input_ids",",","axis","=","[","-","1","]",")","embedding_table","=","tf",".","get_variable","(","name","=","word_embedding_name",",","shape","=","[","vocab_size",",","embedding_size","]",",","initializer","=","create_initializer","(","initializer_range",")",")","flat_input_ids","=","tf",".","reshape","(","input_ids",",","[","-","1","]",")","if","use_one_hot_embeddings",":","one_hot_input_ids","=","tf",".","one_hot","(","flat_input_ids",",","depth","=","vocab_size",")","output","=","tf",".","matmul","(","one_hot_input_ids",",","embedding_table",")","else",":","output","=","tf",".","gather","(","embedding_table",",","flat_input_ids",")","input_shape","=","get_shape_list","(","input_ids",")","output","=","tf",".","reshape","(","output",",","input_shape","[","0",":","-","1","]","+","[","input_shape","[","-","1","]","*","embedding_size","]",")","return","(","output",",","embedding_table",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L380-L425"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"embedding_postprocessor","parameters":"(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1)","argument_list":"","return_statement":"return output","docstring":"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.","docstring_summary":"Performs various post-processing on a word embedding tensor.","docstring_tokens":["Performs","various","post","-","processing","on","a","word","embedding","tensor","."],"function":"def embedding_postprocessor(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1):\n \"\"\"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.\n \"\"\"\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = input_tensor\n\n if use_token_type:\n if token_type_ids is None:\n raise ValueError(\"`token_type_ids` must be specified if\"\n \"`use_token_type` is True.\")\n token_type_table = tf.get_variable(\n name=token_type_embedding_name,\n shape=[token_type_vocab_size, width],\n initializer=create_initializer(initializer_range))\n # This vocab will be small so we always do one-hot here, since it is always\n # faster for a small vocabulary.\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)\n token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if use_position_embeddings:\n assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)\n with tf.control_dependencies([assert_op]):\n full_position_embeddings = tf.get_variable(\n name=position_embedding_name,\n shape=[max_position_embeddings, width],\n initializer=create_initializer(initializer_range))\n # Since the position embedding table is a learned variable, we create it\n # using a (long) sequence length `max_position_embeddings`. The actual\n # sequence length might be shorter than this, for faster training of\n # tasks that do not have long sequences.\n #\n # So `full_position_embeddings` is effectively an embedding table\n # for position [0, 1, 2, ..., max_position_embeddings-1], and the current\n # sequence has positions [0, 1, 2, ... seq_length-1], so we can just\n # perform a slice.\n position_embeddings = tf.slice(full_position_embeddings, [0, 0],\n [seq_length, -1])\n num_dims = len(output.shape.as_list())\n\n # Only the last two dimensions are relevant (`seq_length` and `width`), so\n # we broadcast among the first dimensions, which is typically just\n # the batch size.\n position_broadcast_shape = []\n for _ in range(num_dims - 2):\n position_broadcast_shape.append(1)\n position_broadcast_shape.extend([seq_length, width])\n position_embeddings = tf.reshape(position_embeddings,\n position_broadcast_shape)\n output += position_embeddings\n\n output = layer_norm_and_dropout(output, dropout_prob)\n return output","function_tokens":["def","embedding_postprocessor","(","input_tensor",",","use_token_type","=","False",",","token_type_ids","=","None",",","token_type_vocab_size","=","16",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","0.02",",","max_position_embeddings","=","512",",","dropout_prob","=","0.1",")",":","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","width","=","input_shape","[","2","]","output","=","input_tensor","if","use_token_type",":","if","token_type_ids","is","None",":","raise","ValueError","(","\"`token_type_ids` must be specified if\"","\"`use_token_type` is True.\"",")","token_type_table","=","tf",".","get_variable","(","name","=","token_type_embedding_name",",","shape","=","[","token_type_vocab_size",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# This vocab will be small so we always do one-hot here, since it is always","# faster for a small vocabulary.","flat_token_type_ids","=","tf",".","reshape","(","token_type_ids",",","[","-","1","]",")","one_hot_ids","=","tf",".","one_hot","(","flat_token_type_ids",",","depth","=","token_type_vocab_size",")","token_type_embeddings","=","tf",".","matmul","(","one_hot_ids",",","token_type_table",")","token_type_embeddings","=","tf",".","reshape","(","token_type_embeddings",",","[","batch_size",",","seq_length",",","width","]",")","output","+=","token_type_embeddings","if","use_position_embeddings",":","assert_op","=","tf",".","assert_less_equal","(","seq_length",",","max_position_embeddings",")","with","tf",".","control_dependencies","(","[","assert_op","]",")",":","full_position_embeddings","=","tf",".","get_variable","(","name","=","position_embedding_name",",","shape","=","[","max_position_embeddings",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# Since the position embedding table is a learned variable, we create it","# using a (long) sequence length `max_position_embeddings`. The actual","# sequence length might be shorter than this, for faster training of","# tasks that do not have long sequences.","#","# So `full_position_embeddings` is effectively an embedding table","# for position [0, 1, 2, ..., max_position_embeddings-1], and the current","# sequence has positions [0, 1, 2, ... seq_length-1], so we can just","# perform a slice.","position_embeddings","=","tf",".","slice","(","full_position_embeddings",",","[","0",",","0","]",",","[","seq_length",",","-","1","]",")","num_dims","=","len","(","output",".","shape",".","as_list","(",")",")","# Only the last two dimensions are relevant (`seq_length` and `width`), so","# we broadcast among the first dimensions, which is typically just","# the batch size.","position_broadcast_shape","=","[","]","for","_","in","range","(","num_dims","-","2",")",":","position_broadcast_shape",".","append","(","1",")","position_broadcast_shape",".","extend","(","[","seq_length",",","width","]",")","position_embeddings","=","tf",".","reshape","(","position_embeddings",",","position_broadcast_shape",")","output","+=","position_embeddings","output","=","layer_norm_and_dropout","(","output",",","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L428-L521"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"create_attention_mask_from_input_mask","parameters":"(from_tensor, to_mask)","argument_list":"","return_statement":"return mask","docstring":"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].","docstring_summary":"Create 3D attention mask from a 2D tensor mask.","docstring_tokens":["Create","3D","attention","mask","from","a","2D","tensor","mask","."],"function":"def create_attention_mask_from_input_mask(from_tensor, to_mask):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n\n return mask","function_tokens":["def","create_attention_mask_from_input_mask","(","from_tensor",",","to_mask",")",":","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_shape","=","get_shape_list","(","to_mask",",","expected_rank","=","2",")","to_seq_length","=","to_shape","[","1","]","to_mask","=","tf",".","cast","(","tf",".","reshape","(","to_mask",",","[","batch_size",",","1",",","to_seq_length","]",")",",","tf",".","float32",")","# We don't assume that `from_tensor` is a mask (although it could be). We","# don't actually care if we attend *from* padding tokens (only *to* padding)","# tokens so we create a tensor of all ones.","#","# `broadcast_ones` = [batch_size, from_seq_length, 1]","broadcast_ones","=","tf",".","ones","(","shape","=","[","batch_size",",","from_seq_length",",","1","]",",","dtype","=","tf",".","float32",")","# Here we broadcast along two dimensions to create the mask.","mask","=","broadcast_ones","*","to_mask","return","mask"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L524-L555"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"attention_layer","parameters":"(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None)","argument_list":"","return_statement":"return context_layer","docstring":"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.","docstring_summary":"Performs multi-headed attention from `from_tensor` to `to_tensor`.","docstring_tokens":["Performs","multi","-","headed","attention","from","from_tensor","to","to_tensor","."],"function":"def attention_layer(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n \"\"\"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])\n return output_tensor\n\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n from_tensor_2d = reshape_to_matrix(from_tensor)\n to_tensor_2d = reshape_to_matrix(to_tensor)\n\n # `query_layer` = [B*F, N*H]\n query_layer = tf.layers.dense(\n from_tensor_2d,\n num_attention_heads * size_per_head,\n activation=query_act,\n name=\"query\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `key_layer` = [B*T, N*H]\n key_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=key_act,\n name=\"key\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `value_layer` = [B*T, N*H]\n value_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=value_act,\n name=\"value\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `query_layer` = [B, N, F, H]\n query_layer = transpose_for_scores(query_layer, batch_size,\n num_attention_heads, from_seq_length,\n size_per_head)\n\n # `key_layer` = [B, N, T, H]\n key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,\n to_seq_length, size_per_head)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n # `attention_scores` = [B, N, F, T]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(attention_scores,\n 1.0 \/ math.sqrt(float(size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\n\n # `value_layer` = [B, T, N, H]\n value_layer = tf.reshape(\n value_layer,\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\n\n # `value_layer` = [B, N, T, H]\n value_layer = tf.transpose(value_layer, [0, 2, 1, 3])\n\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\n\n if do_return_2d_tensor:\n # `context_layer` = [B*F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\n else:\n # `context_layer` = [B, F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\n\n return context_layer","function_tokens":["def","attention_layer","(","from_tensor",",","to_tensor",",","attention_mask","=","None",",","num_attention_heads","=","1",",","size_per_head","=","512",",","query_act","=","None",",","key_act","=","None",",","value_act","=","None",",","attention_probs_dropout_prob","=","0.0",",","initializer_range","=","0.02",",","do_return_2d_tensor","=","False",",","batch_size","=","None",",","from_seq_length","=","None",",","to_seq_length","=","None",")",":","def","transpose_for_scores","(","input_tensor",",","batch_size",",","num_attention_heads",",","seq_length",",","width",")",":","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","batch_size",",","seq_length",",","num_attention_heads",",","width","]",")","output_tensor","=","tf",".","transpose","(","output_tensor",",","[","0",",","2",",","1",",","3","]",")","return","output_tensor","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","to_shape","=","get_shape_list","(","to_tensor",",","expected_rank","=","[","2",",","3","]",")","if","len","(","from_shape",")","!=","len","(","to_shape",")",":","raise","ValueError","(","\"The rank of `from_tensor` must match the rank of `to_tensor`.\"",")","if","len","(","from_shape",")","==","3",":","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_seq_length","=","to_shape","[","1","]","elif","len","(","from_shape",")","==","2",":","if","(","batch_size","is","None","or","from_seq_length","is","None","or","to_seq_length","is","None",")",":","raise","ValueError","(","\"When passing in rank 2 tensors to attention_layer, the values \"","\"for `batch_size`, `from_seq_length`, and `to_seq_length` \"","\"must all be specified.\"",")","# Scalar dimensions referenced here:","# B = batch size (number of sequences)","# F = `from_tensor` sequence length","# T = `to_tensor` sequence length","# N = `num_attention_heads`","# H = `size_per_head`","from_tensor_2d","=","reshape_to_matrix","(","from_tensor",")","to_tensor_2d","=","reshape_to_matrix","(","to_tensor",")","# `query_layer` = [B*F, N*H]","query_layer","=","tf",".","layers",".","dense","(","from_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","query_act",",","name","=","\"query\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `key_layer` = [B*T, N*H]","key_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","key_act",",","name","=","\"key\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `value_layer` = [B*T, N*H]","value_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","value_act",",","name","=","\"value\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `query_layer` = [B, N, F, H]","query_layer","=","transpose_for_scores","(","query_layer",",","batch_size",",","num_attention_heads",",","from_seq_length",",","size_per_head",")","# `key_layer` = [B, N, T, H]","key_layer","=","transpose_for_scores","(","key_layer",",","batch_size",",","num_attention_heads",",","to_seq_length",",","size_per_head",")","# Take the dot product between \"query\" and \"key\" to get the raw","# attention scores.","# `attention_scores` = [B, N, F, T]","attention_scores","=","tf",".","matmul","(","query_layer",",","key_layer",",","transpose_b","=","True",")","attention_scores","=","tf",".","multiply","(","attention_scores",",","1.0","\/","math",".","sqrt","(","float","(","size_per_head",")",")",")","if","attention_mask","is","not","None",":","# `attention_mask` = [B, 1, F, T]","attention_mask","=","tf",".","expand_dims","(","attention_mask",",","axis","=","[","1","]",")","# Since attention_mask is 1.0 for positions we want to attend and 0.0 for","# masked positions, this operation will create a tensor which is 0.0 for","# positions we want to attend and -10000.0 for masked positions.","adder","=","(","1.0","-","tf",".","cast","(","attention_mask",",","tf",".","float32",")",")","*","-","10000.0","# Since we are adding it to the raw scores before the softmax, this is","# effectively the same as removing these entirely.","attention_scores","+=","adder","# Normalize the attention scores to probabilities.","# `attention_probs` = [B, N, F, T]","attention_probs","=","tf",".","nn",".","softmax","(","attention_scores",")","# This is actually dropping out entire tokens to attend to, which might","# seem a bit unusual, but is taken from the original Transformer paper.","attention_probs","=","dropout","(","attention_probs",",","attention_probs_dropout_prob",")","# `value_layer` = [B, T, N, H]","value_layer","=","tf",".","reshape","(","value_layer",",","[","batch_size",",","to_seq_length",",","num_attention_heads",",","size_per_head","]",")","# `value_layer` = [B, N, T, H]","value_layer","=","tf",".","transpose","(","value_layer",",","[","0",",","2",",","1",",","3","]",")","# `context_layer` = [B, N, F, H]","context_layer","=","tf",".","matmul","(","attention_probs",",","value_layer",")","# `context_layer` = [B, F, N, H]","context_layer","=","tf",".","transpose","(","context_layer",",","[","0",",","2",",","1",",","3","]",")","if","do_return_2d_tensor",":","# `context_layer` = [B*F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size","*","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","else",":","# `context_layer` = [B, F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size",",","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","return","context_layer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L558-L751"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"transformer_model","parameters":"(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False)","argument_list":"","return_statement":"","docstring":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.","docstring_summary":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".","docstring_tokens":["Multi","-","headed","multi","-","layer","Transformer","from","Attention","is","All","You","Need","."],"function":"def transformer_model(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False):\n \"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.\n \"\"\"\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n\n attention_head_size = int(hidden_size \/ num_attention_heads)\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n input_width = input_shape[2]\n\n # The Transformer performs sum residuals on all layers so the input needs\n # to be the same as the hidden size.\n if input_width != hidden_size:\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n (input_width, hidden_size))\n\n # We keep the representation as a 2D tensor to avoid re-shaping it back and\n # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\n # the GPU\/CPU but may not be free on the TPU, so we want to minimize them to\n # help the optimizer.\n prev_output = reshape_to_matrix(input_tensor)\n\n all_layer_outputs = []\n for layer_idx in range(num_hidden_layers):\n with tf.variable_scope(\"layer_%d\" % layer_idx):\n layer_input = prev_output\n\n with tf.variable_scope(\"attention\"):\n attention_heads = []\n with tf.variable_scope(\"self\"):\n attention_head = attention_layer(\n from_tensor=layer_input,\n to_tensor=layer_input,\n attention_mask=attention_mask,\n num_attention_heads=num_attention_heads,\n size_per_head=attention_head_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n initializer_range=initializer_range,\n do_return_2d_tensor=True,\n batch_size=batch_size,\n from_seq_length=seq_length,\n to_seq_length=seq_length)\n attention_heads.append(attention_head)\n\n attention_output = None\n if len(attention_heads) == 1:\n attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n attention_output = tf.concat(attention_heads, axis=-1)\n\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n with tf.variable_scope(\"output\"):\n attention_output = tf.layers.dense(\n attention_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n attention_output = dropout(attention_output, hidden_dropout_prob)\n attention_output = layer_norm(attention_output + layer_input)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = tf.layers.dense(\n attention_output,\n intermediate_size,\n activation=intermediate_act_fn,\n kernel_initializer=create_initializer(initializer_range))\n\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"):\n layer_output = tf.layers.dense(\n intermediate_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n layer_output = dropout(layer_output, hidden_dropout_prob)\n layer_output = layer_norm(layer_output + attention_output)\n prev_output = layer_output\n all_layer_outputs.append(layer_output)\n\n if do_return_all_layers:\n final_outputs = []\n for layer_output in all_layer_outputs:\n final_output = reshape_from_matrix(layer_output, input_shape)\n final_outputs.append(final_output)\n return final_outputs\n else:\n final_output = reshape_from_matrix(prev_output, input_shape)\n return final_output","function_tokens":["def","transformer_model","(","input_tensor",",","attention_mask","=","None",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","intermediate_act_fn","=","gelu",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","initializer_range","=","0.02",",","do_return_all_layers","=","False",")",":","if","hidden_size","%","num_attention_heads","!=","0",":","raise","ValueError","(","\"The hidden size (%d) is not a multiple of the number of attention \"","\"heads (%d)\"","%","(","hidden_size",",","num_attention_heads",")",")","attention_head_size","=","int","(","hidden_size","\/","num_attention_heads",")","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","input_width","=","input_shape","[","2","]","# The Transformer performs sum residuals on all layers so the input needs","# to be the same as the hidden size.","if","input_width","!=","hidden_size",":","raise","ValueError","(","\"The width of the input tensor (%d) != hidden size (%d)\"","%","(","input_width",",","hidden_size",")",")","# We keep the representation as a 2D tensor to avoid re-shaping it back and","# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on","# the GPU\/CPU but may not be free on the TPU, so we want to minimize them to","# help the optimizer.","prev_output","=","reshape_to_matrix","(","input_tensor",")","all_layer_outputs","=","[","]","for","layer_idx","in","range","(","num_hidden_layers",")",":","with","tf",".","variable_scope","(","\"layer_%d\"","%","layer_idx",")",":","layer_input","=","prev_output","with","tf",".","variable_scope","(","\"attention\"",")",":","attention_heads","=","[","]","with","tf",".","variable_scope","(","\"self\"",")",":","attention_head","=","attention_layer","(","from_tensor","=","layer_input",",","to_tensor","=","layer_input",",","attention_mask","=","attention_mask",",","num_attention_heads","=","num_attention_heads",",","size_per_head","=","attention_head_size",",","attention_probs_dropout_prob","=","attention_probs_dropout_prob",",","initializer_range","=","initializer_range",",","do_return_2d_tensor","=","True",",","batch_size","=","batch_size",",","from_seq_length","=","seq_length",",","to_seq_length","=","seq_length",")","attention_heads",".","append","(","attention_head",")","attention_output","=","None","if","len","(","attention_heads",")","==","1",":","attention_output","=","attention_heads","[","0","]","else",":","# In the case where we have other sequences, we just concatenate","# them to the self-attention head before the projection.","attention_output","=","tf",".","concat","(","attention_heads",",","axis","=","-","1",")","# Run a linear projection of `hidden_size` then add a residual","# with `layer_input`.","with","tf",".","variable_scope","(","\"output\"",")",":","attention_output","=","tf",".","layers",".","dense","(","attention_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","attention_output","=","dropout","(","attention_output",",","hidden_dropout_prob",")","attention_output","=","layer_norm","(","attention_output","+","layer_input",")","# The activation is only applied to the \"intermediate\" hidden layer.","with","tf",".","variable_scope","(","\"intermediate\"",")",":","intermediate_output","=","tf",".","layers",".","dense","(","attention_output",",","intermediate_size",",","activation","=","intermediate_act_fn",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# Down-project back to `hidden_size` then add the residual.","with","tf",".","variable_scope","(","\"output\"",")",":","layer_output","=","tf",".","layers",".","dense","(","intermediate_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","layer_output","=","dropout","(","layer_output",",","hidden_dropout_prob",")","layer_output","=","layer_norm","(","layer_output","+","attention_output",")","prev_output","=","layer_output","all_layer_outputs",".","append","(","layer_output",")","if","do_return_all_layers",":","final_outputs","=","[","]","for","layer_output","in","all_layer_outputs",":","final_output","=","reshape_from_matrix","(","layer_output",",","input_shape",")","final_outputs",".","append","(","final_output",")","return","final_outputs","else",":","final_output","=","reshape_from_matrix","(","prev_output",",","input_shape",")","return","final_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L754-L892"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"get_shape_list","parameters":"(tensor, expected_rank=None, name=None)","argument_list":"","return_statement":"return shape","docstring":"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.","docstring_summary":"Returns a list of the shape of tensor, preferring static dimensions.","docstring_tokens":["Returns","a","list","of","the","shape","of","tensor","preferring","static","dimensions","."],"function":"def get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if name is None:\n name = tensor.name\n\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape","function_tokens":["def","get_shape_list","(","tensor",",","expected_rank","=","None",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","if","expected_rank","is","not","None",":","assert_rank","(","tensor",",","expected_rank",",","name",")","shape","=","tensor",".","shape",".","as_list","(",")","non_static_indexes","=","[","]","for","(","index",",","dim",")","in","enumerate","(","shape",")",":","if","dim","is","None",":","non_static_indexes",".","append","(","index",")","if","not","non_static_indexes",":","return","shape","dyn_shape","=","tf",".","shape","(","tensor",")","for","index","in","non_static_indexes",":","shape","[","index","]","=","dyn_shape","[","index","]","return","shape"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L895-L929"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"reshape_to_matrix","parameters":"(input_tensor)","argument_list":"","return_statement":"return output_tensor","docstring":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_summary":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_tokens":["Reshapes","a",">","=","rank","2","tensor","to","a","rank","2","tensor","(","i",".","e",".","a","matrix",")","."],"function":"def reshape_to_matrix(input_tensor):\n \"\"\"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).\"\"\"\n ndims = input_tensor.shape.ndims\n if ndims < 2:\n raise ValueError(\"Input tensor must have at least rank 2. Shape = %s\" %\n (input_tensor.shape))\n if ndims == 2:\n return input_tensor\n\n width = input_tensor.shape[-1]\n output_tensor = tf.reshape(input_tensor, [-1, width])\n return output_tensor","function_tokens":["def","reshape_to_matrix","(","input_tensor",")",":","ndims","=","input_tensor",".","shape",".","ndims","if","ndims","<","2",":","raise","ValueError","(","\"Input tensor must have at least rank 2. Shape = %s\"","%","(","input_tensor",".","shape",")",")","if","ndims","==","2",":","return","input_tensor","width","=","input_tensor",".","shape","[","-","1","]","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","-","1",",","width","]",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L932-L943"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"reshape_from_matrix","parameters":"(output_tensor, orig_shape_list)","argument_list":"","return_statement":"return tf.reshape(output_tensor, orig_dims + [width])","docstring":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_summary":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_tokens":["Reshapes","a","rank","2","tensor","back","to","its","original","rank",">","=","2","tensor","."],"function":"def reshape_from_matrix(output_tensor, orig_shape_list):\n \"\"\"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.\"\"\"\n if len(orig_shape_list) == 2:\n return output_tensor\n\n output_shape = get_shape_list(output_tensor)\n\n orig_dims = orig_shape_list[0:-1]\n width = output_shape[-1]\n\n return tf.reshape(output_tensor, orig_dims + [width])","function_tokens":["def","reshape_from_matrix","(","output_tensor",",","orig_shape_list",")",":","if","len","(","orig_shape_list",")","==","2",":","return","output_tensor","output_shape","=","get_shape_list","(","output_tensor",")","orig_dims","=","orig_shape_list","[","0",":","-","1","]","width","=","output_shape","[","-","1","]","return","tf",".","reshape","(","output_tensor",",","orig_dims","+","[","width","]",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L946-L956"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"assert_rank","parameters":"(tensor, expected_rank, name=None)","argument_list":"","return_statement":"","docstring":"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.","docstring_summary":"Raises an exception if the tensor rank is not of the expected rank.","docstring_tokens":["Raises","an","exception","if","the","tensor","rank","is","not","of","the","expected","rank","."],"function":"def assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n if name is None:\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n scope_name = tf.get_variable_scope().name\n raise ValueError(\n \"For the tensor `%s` in scope `%s`, the actual rank \"\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))","function_tokens":["def","assert_rank","(","tensor",",","expected_rank",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","expected_rank_dict","=","{","}","if","isinstance","(","expected_rank",",","six",".","integer_types",")",":","expected_rank_dict","[","expected_rank","]","=","True","else",":","for","x","in","expected_rank",":","expected_rank_dict","[","x","]","=","True","actual_rank","=","tensor",".","shape",".","ndims","if","actual_rank","not","in","expected_rank_dict",":","scope_name","=","tf",".","get_variable_scope","(",")",".","name","raise","ValueError","(","\"For the tensor `%s` in scope `%s`, the actual rank \"","\"`%d` (shape = %s) is not equal to the expected rank `%s`\"","%","(","name",",","scope_name",",","actual_rank",",","str","(","tensor",".","shape",")",",","str","(","expected_rank",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L959-L986"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"BertConfig.__init__","parameters":"(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02)","argument_list":"","return_statement":"","docstring":"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.","docstring_summary":"Constructs BertConfig.","docstring_tokens":["Constructs","BertConfig","."],"function":"def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range","function_tokens":["def","__init__","(","self",",","vocab_size",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","hidden_act","=","\"gelu\"",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","max_position_embeddings","=","512",",","type_vocab_size","=","16",",","initializer_range","=","0.02",")",":","self",".","vocab_size","=","vocab_size","self",".","hidden_size","=","hidden_size","self",".","num_hidden_layers","=","num_hidden_layers","self",".","num_attention_heads","=","num_attention_heads","self",".","hidden_act","=","hidden_act","self",".","intermediate_size","=","intermediate_size","self",".","hidden_dropout_prob","=","hidden_dropout_prob","self",".","attention_probs_dropout_prob","=","attention_probs_dropout_prob","self",".","max_position_embeddings","=","max_position_embeddings","self",".","type_vocab_size","=","type_vocab_size","self",".","initializer_range","=","initializer_range"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L34-L80"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"BertConfig.from_dict","parameters":"(cls, json_object)","argument_list":"","return_statement":"return config","docstring":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_summary":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","Python","dictionary","of","parameters","."],"function":"def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config","function_tokens":["def","from_dict","(","cls",",","json_object",")",":","config","=","BertConfig","(","vocab_size","=","None",")","for","(","key",",","value",")","in","six",".","iteritems","(","json_object",")",":","config",".","__dict__","[","key","]","=","value","return","config"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L83-L88"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"BertConfig.from_json_file","parameters":"(cls, json_file)","argument_list":"","return_statement":"return cls.from_dict(json.loads(text))","docstring":"Constructs a `BertConfig` from a json file of parameters.","docstring_summary":"Constructs a `BertConfig` from a json file of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","json","file","of","parameters","."],"function":"def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with tf.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))","function_tokens":["def","from_json_file","(","cls",",","json_file",")",":","with","tf",".","gfile",".","GFile","(","json_file",",","\"r\"",")","as","reader",":","text","=","reader",".","read","(",")","return","cls",".","from_dict","(","json",".","loads","(","text",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L91-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"BertConfig.to_dict","parameters":"(self)","argument_list":"","return_statement":"return output","docstring":"Serializes this instance to a Python dictionary.","docstring_summary":"Serializes this instance to a Python dictionary.","docstring_tokens":["Serializes","this","instance","to","a","Python","dictionary","."],"function":"def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output","function_tokens":["def","to_dict","(","self",")",":","output","=","copy",".","deepcopy","(","self",".","__dict__",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L97-L100"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"BertConfig.to_json_string","parameters":"(self)","argument_list":"","return_statement":"return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","docstring":"Serializes this instance to a JSON string.","docstring_summary":"Serializes this instance to a JSON string.","docstring_tokens":["Serializes","this","instance","to","a","JSON","string","."],"function":"def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","function_tokens":["def","to_json_string","(","self",")",":","return","json",".","dumps","(","self",".","to_dict","(",")",",","indent","=","2",",","sort_keys","=","True",")","+","\"\\n\""],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L102-L104"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"BertModel.__init__","parameters":"(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None)","argument_list":"","return_statement":"","docstring":"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.","docstring_summary":"Constructor for BertModel.","docstring_tokens":["Constructor","for","BertModel","."],"function":"def __init__(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n\n input_shape = get_shape_list(input_ids, expected_rank=2)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n\n if input_mask is None:\n input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)\n\n if token_type_ids is None:\n token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)\n\n with tf.variable_scope(scope, default_name=\"bert\"):\n with tf.variable_scope(\"embeddings\"):\n # Perform embedding lookup on the word ids.\n (self.embedding_output, self.embedding_table) = embedding_lookup(\n input_ids=input_ids,\n vocab_size=config.vocab_size,\n embedding_size=config.hidden_size,\n initializer_range=config.initializer_range,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n self.embedding_output = embedding_postprocessor(\n input_tensor=self.embedding_output,\n use_token_type=True,\n token_type_ids=token_type_ids,\n token_type_vocab_size=config.type_vocab_size,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=config.initializer_range,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n\n with tf.variable_scope(\"encoder\"):\n # This converts a 2D mask of shape [batch_size, seq_length] to a 3D\n # mask of shape [batch_size, seq_length, seq_length] which is used\n # for the attention scores.\n attention_mask = create_attention_mask_from_input_mask(\n input_ids, input_mask)\n\n # Run the stacked transformer.\n # `sequence_output` shape = [batch_size, seq_length, hidden_size].\n self.all_encoder_layers = transformer_model(\n input_tensor=self.embedding_output,\n attention_mask=attention_mask,\n hidden_size=config.hidden_size,\n num_hidden_layers=config.num_hidden_layers,\n num_attention_heads=config.num_attention_heads,\n intermediate_size=config.intermediate_size,\n intermediate_act_fn=get_activation(config.hidden_act),\n hidden_dropout_prob=config.hidden_dropout_prob,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n initializer_range=config.initializer_range,\n do_return_all_layers=True)\n\n self.sequence_output = self.all_encoder_layers[-1]\n # The \"pooler\" converts the encoded sequence tensor of shape\n # [batch_size, seq_length, hidden_size] to a tensor of shape\n # [batch_size, hidden_size]. This is necessary for segment-level\n # (or segment-pair-level) classification tasks where we need a fixed\n # dimensional representation of the segment.\n with tf.variable_scope(\"pooler\"):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token. We assume that this has been pre-trained\n first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)\n self.pooled_output = tf.layers.dense(\n first_token_tensor,\n config.hidden_size,\n activation=tf.tanh,\n kernel_initializer=create_initializer(config.initializer_range))","function_tokens":["def","__init__","(","self",",","config",",","is_training",",","input_ids",",","input_mask","=","None",",","token_type_ids","=","None",",","use_one_hot_embeddings","=","False",",","scope","=","None",")",":","config","=","copy",".","deepcopy","(","config",")","if","not","is_training",":","config",".","hidden_dropout_prob","=","0.0","config",".","attention_probs_dropout_prob","=","0.0","input_shape","=","get_shape_list","(","input_ids",",","expected_rank","=","2",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","if","input_mask","is","None",":","input_mask","=","tf",".","ones","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","if","token_type_ids","is","None",":","token_type_ids","=","tf",".","zeros","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","with","tf",".","variable_scope","(","scope",",","default_name","=","\"bert\"",")",":","with","tf",".","variable_scope","(","\"embeddings\"",")",":","# Perform embedding lookup on the word ids.","(","self",".","embedding_output",",","self",".","embedding_table",")","=","embedding_lookup","(","input_ids","=","input_ids",",","vocab_size","=","config",".","vocab_size",",","embedding_size","=","config",".","hidden_size",",","initializer_range","=","config",".","initializer_range",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# Add positional embeddings and token type embeddings, then layer","# normalize and perform dropout.","self",".","embedding_output","=","embedding_postprocessor","(","input_tensor","=","self",".","embedding_output",",","use_token_type","=","True",",","token_type_ids","=","token_type_ids",",","token_type_vocab_size","=","config",".","type_vocab_size",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","config",".","initializer_range",",","max_position_embeddings","=","config",".","max_position_embeddings",",","dropout_prob","=","config",".","hidden_dropout_prob",")","with","tf",".","variable_scope","(","\"encoder\"",")",":","# This converts a 2D mask of shape [batch_size, seq_length] to a 3D","# mask of shape [batch_size, seq_length, seq_length] which is used","# for the attention scores.","attention_mask","=","create_attention_mask_from_input_mask","(","input_ids",",","input_mask",")","# Run the stacked transformer.","# `sequence_output` shape = [batch_size, seq_length, hidden_size].","self",".","all_encoder_layers","=","transformer_model","(","input_tensor","=","self",".","embedding_output",",","attention_mask","=","attention_mask",",","hidden_size","=","config",".","hidden_size",",","num_hidden_layers","=","config",".","num_hidden_layers",",","num_attention_heads","=","config",".","num_attention_heads",",","intermediate_size","=","config",".","intermediate_size",",","intermediate_act_fn","=","get_activation","(","config",".","hidden_act",")",",","hidden_dropout_prob","=","config",".","hidden_dropout_prob",",","attention_probs_dropout_prob","=","config",".","attention_probs_dropout_prob",",","initializer_range","=","config",".","initializer_range",",","do_return_all_layers","=","True",")","self",".","sequence_output","=","self",".","all_encoder_layers","[","-","1","]","# The \"pooler\" converts the encoded sequence tensor of shape","# [batch_size, seq_length, hidden_size] to a tensor of shape","# [batch_size, hidden_size]. This is necessary for segment-level","# (or segment-pair-level) classification tasks where we need a fixed","# dimensional representation of the segment.","with","tf",".","variable_scope","(","\"pooler\"",")",":","# We \"pool\" the model by simply taking the hidden state corresponding","# to the first token. We assume that this has been pre-trained","first_token_tensor","=","tf",".","squeeze","(","self",".","sequence_output","[",":",",","0",":","1",",",":","]",",","axis","=","1",")","self",".","pooled_output","=","tf",".","layers",".","dense","(","first_token_tensor",",","config",".","hidden_size",",","activation","=","tf",".","tanh",",","kernel_initializer","=","create_initializer","(","config",".","initializer_range",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L131-L232"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"BertModel.get_sequence_output","parameters":"(self)","argument_list":"","return_statement":"return self.sequence_output","docstring":"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.","docstring_summary":"Gets final hidden layer of encoder.","docstring_tokens":["Gets","final","hidden","layer","of","encoder","."],"function":"def get_sequence_output(self):\n \"\"\"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.\n \"\"\"\n return self.sequence_output","function_tokens":["def","get_sequence_output","(","self",")",":","return","self",".","sequence_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L237-L244"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/modeling.py","language":"python","identifier":"BertModel.get_embedding_output","parameters":"(self)","argument_list":"","return_statement":"return self.embedding_output","docstring":"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.","docstring_summary":"Gets output of the embedding lookup (i.e., input to the transformer).","docstring_tokens":["Gets","output","of","the","embedding","lookup","(","i",".","e",".","input","to","the","transformer",")","."],"function":"def get_embedding_output(self):\n \"\"\"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.\n \"\"\"\n return self.embedding_output","function_tokens":["def","get_embedding_output","(","self",")",":","return","self",".","embedding_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/modeling.py#L249-L258"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_classifier.py","language":"python","identifier":"convert_example_list_for_inews","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature_list","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_example_list_for_inews(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return [InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)]\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n must_len = len(tokens_a) + 3\n extra_len = max_seq_length - must_len\n feature_list = []\n if example.text_b and extra_len > 0:\n extra_num = int((len(tokens_b) - 1) \/ extra_len) + 1\n for num in range(extra_num):\n max_len = min((num + 1) * extra_len, len(tokens_b))\n tokens_b_sub = tokens_b[num * extra_len: max_len]\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b_sub, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n else:\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n return feature_list","function_tokens":["def","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","[","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","]","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","must_len","=","len","(","tokens_a",")","+","3","extra_len","=","max_seq_length","-","must_len","feature_list","=","[","]","if","example",".","text_b","and","extra_len",">","0",":","extra_num","=","int","(","(","len","(","tokens_b",")","-","1",")","\/","extra_len",")","+","1","for","num","in","range","(","extra_num",")",":","max_len","=","min","(","(","num","+","1",")","*","extra_len",",","len","(","tokens_b",")",")","tokens_b_sub","=","tokens_b","[","num","*","extra_len",":","max_len","]","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b_sub",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","else",":","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","return","feature_list"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_classifier.py#L233-L268"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features_for_inews","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features_for_inews(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n num_example = 0\n for (ex_index, example) in enumerate(examples):\n if ex_index % 1000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature_list = convert_example_list_for_inews(ex_index, example, label_list,\n max_seq_length, tokenizer)\n num_example += len(feature_list)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n for feature in feature_list:\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n tf.logging.info(\"feature num: %s\", num_example)\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features_for_inews","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","num_example","=","0","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","1000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature_list","=","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","num_example","+=","len","(","feature_list",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","for","feature","in","feature_list",":","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","tf",".","logging",".","info","(","\"feature num: %s\"",",","num_example",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_classifier.py#L271-L301"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_classifier.py","language":"python","identifier":"convert_single_example","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature","function_tokens":["def","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","if","tokens_b",":","# Modifies `tokens_a` and `tokens_b` in place so that the total","# length is less than the specified length.","# Account for [CLS], [SEP], [SEP] with \"- 3\"","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_seq_length","-","3",")","else",":","# Account for [CLS] and [SEP] with \"- 2\"","if","len","(","tokens_a",")",">","max_seq_length","-","2",":","tokens_a","=","tokens_a","[","0",":","(","max_seq_length","-","2",")","]","# The convention in BERT is:","# (a) For sequence pairs:","# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]","# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1","# (b) For single sequences:","# tokens: [CLS] the dog is hairy . [SEP]","# type_ids: 0 0 0 0 0 0 0","#","# Where \"type_ids\" are used to indicate whether this is the first","# sequence or the second sequence. The embedding vectors for `type=0` and","# `type=1` were learned during pre-training and are added to the wordpiece","# embedding vector (and position vector). This is not *strictly* necessary","# since the [SEP] token unambiguously separates the sequences, but it makes","# it easier for the model to learn the concept of sequences.","#","# For classification tasks, the first vector (corresponding to [CLS]) is","# used as the \"sentence vector\". Note that this only makes sense because","# the entire model is fine-tuned.","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","label_id","=","label_map","[","example",".","label","]","if","ex_index","<","5",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"guid: %s\"","%","(","example",".","guid",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","tf",".","logging",".","info","(","\"label: %s (id = %d)\"","%","(","example",".","label",",","label_id",")",")","feature","=","InputFeatures","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","label_id","=","label_id",",","is_real_example","=","True",")","return","feature"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_classifier.py#L304-L403"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_classifier.py#L406-L433"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_classifier.py","language":"python","identifier":"file_based_input_fn_builder","parameters":"(input_file, seq_length, is_training,\n drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn","function_tokens":["def","file_based_input_fn_builder","(","input_file",",","seq_length",",","is_training",",","drop_remainder",")",":","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"label_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"is_real_example\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","}","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","d","=","tf",".","data",".","TFRecordDataset","(","input_file",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_classifier.py#L436-L481"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_classifier.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_classifier.py#L484-L498"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_classifier.py","language":"python","identifier":"create_model","parameters":"(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings)","argument_list":"","return_statement":"","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)","function_tokens":["def","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","labels",",","num_labels",",","use_one_hot_embeddings",")",":","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# In the demo, we are doing a simple classification task on the entire","# segment.","#","# If you want to use the token-level output, use model.get_sequence_output()","# instead.","output_layer","=","model",".","get_pooled_output","(",")","hidden_size","=","output_layer",".","shape","[","-","1","]",".","value","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","[","num_labels",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","[","num_labels","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","with","tf",".","variable_scope","(","\"loss\"",")",":","if","is_training",":","# I.e., 0.1 dropout","output_layer","=","tf",".","nn",".","dropout","(","output_layer",",","keep_prob","=","0.9",")","logits","=","tf",".","matmul","(","output_layer",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","probabilities","=","tf",".","nn",".","softmax","(","logits",",","axis","=","-","1",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","num_labels",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","logits",",","probabilities",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_classifier.py#L501-L543"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_classifier.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions, weights=is_real_example)\n loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn,\n [per_example_loss, label_ids, logits, is_real_example])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","num_labels",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","label_ids","=","features","[","\"label_ids\"","]","is_real_example","=","None","if","\"is_real_example\"","in","features",":","is_real_example","=","tf",".","cast","(","features","[","\"is_real_example\"","]",",","dtype","=","tf",".","float32",")","else",":","is_real_example","=","tf",".","ones","(","tf",".","shape","(","label_ids",")",",","dtype","=","tf",".","float32",")","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","total_loss",",","per_example_loss",",","logits",",","probabilities",")","=","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","label_ids",",","num_labels",",","use_one_hot_embeddings",")","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","per_example_loss",",","label_ids",",","logits",",","is_real_example",")",":","predictions","=","tf",".","argmax","(","logits",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","label_ids",",","predictions","=","predictions",",","weights","=","is_real_example",")","loss","=","tf",".","metrics",".","mean","(","values","=","per_example_loss",",","weights","=","is_real_example",")","return","{","\"eval_accuracy\"",":","accuracy",",","\"eval_loss\"",":","loss",",","}","eval_metrics","=","(","metric_fn",",","[","per_example_loss",",","label_ids",",","logits",",","is_real_example","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","{","\"probabilities\"",":","probabilities","}",",","scaffold_fn","=","scaffold_fn",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_classifier.py#L546-L635"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_classifier.py","language":"python","identifier":"input_fn_builder","parameters":"(features, seq_length, is_training, drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","features",",","seq_length",",","is_training",",","drop_remainder",")",":","all_input_ids","=","[","]","all_input_mask","=","[","]","all_segment_ids","=","[","]","all_label_ids","=","[","]","for","feature","in","features",":","all_input_ids",".","append","(","feature",".","input_ids",")","all_input_mask",".","append","(","feature",".","input_mask",")","all_segment_ids",".","append","(","feature",".","segment_ids",")","all_label_ids",".","append","(","feature",".","label_id",")","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","num_examples","=","len","(","features",")","# This is for demo purposes and does NOT scale to large data sets. We do","# not use Dataset.from_generator() because that uses tf.py_func which is","# not TPU compatible. The right way to load data is with TFRecordReader.","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","{","\"input_ids\"",":","tf",".","constant","(","all_input_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_mask\"",":","tf",".","constant","(","all_input_mask",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"segment_ids\"",":","tf",".","constant","(","all_segment_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"label_ids\"",":","tf",".","constant","(","all_label_ids",",","shape","=","[","num_examples","]",",","dtype","=","tf",".","int32",")",",","}",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","batch","(","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_classifier.py#L640-L689"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_classifier.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return features","docstring":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_summary":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","list","of","InputFeatures","."],"function":"def convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n features.append(feature)\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",")",":","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","features",".","append","(","feature",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_classifier.py#L694-L707"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/optimization.py","language":"python","identifier":"create_optimizer","parameters":"(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu)","argument_list":"","return_statement":"return train_op","docstring":"Creates an optimizer training op.","docstring_summary":"Creates an optimizer training op.","docstring_tokens":["Creates","an","optimizer","training","op","."],"function":"def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n \"\"\"Creates an optimizer training op.\"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step\/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float \/ warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m\/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n if use_tpu:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op","function_tokens":["def","create_optimizer","(","loss",",","init_lr",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")",":","global_step","=","tf",".","train",".","get_or_create_global_step","(",")","learning_rate","=","tf",".","constant","(","value","=","init_lr",",","shape","=","[","]",",","dtype","=","tf",".","float32",")","# Implements linear decay of the learning rate.","learning_rate","=","tf",".","train",".","polynomial_decay","(","learning_rate",",","global_step",",","num_train_steps",",","end_learning_rate","=","0.0",",","power","=","1.0",",","cycle","=","False",")","# Implements linear warmup. I.e., if global_step < num_warmup_steps, the","# learning rate will be `global_step\/num_warmup_steps * init_lr`.","if","num_warmup_steps",":","global_steps_int","=","tf",".","cast","(","global_step",",","tf",".","int32",")","warmup_steps_int","=","tf",".","constant","(","num_warmup_steps",",","dtype","=","tf",".","int32",")","global_steps_float","=","tf",".","cast","(","global_steps_int",",","tf",".","float32",")","warmup_steps_float","=","tf",".","cast","(","warmup_steps_int",",","tf",".","float32",")","warmup_percent_done","=","global_steps_float","\/","warmup_steps_float","warmup_learning_rate","=","init_lr","*","warmup_percent_done","is_warmup","=","tf",".","cast","(","global_steps_int","<","warmup_steps_int",",","tf",".","float32",")","learning_rate","=","(","(","1.0","-","is_warmup",")","*","learning_rate","+","is_warmup","*","warmup_learning_rate",")","# It is recommended that you use this optimizer for fine tuning, since this","# is how the model was trained (note that the Adam m\/v variables are NOT","# loaded from init_checkpoint.)","optimizer","=","AdamWeightDecayOptimizer","(","learning_rate","=","learning_rate",",","weight_decay_rate","=","0.01",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","[","\"LayerNorm\"",",","\"layer_norm\"",",","\"bias\"","]",")","if","use_tpu",":","optimizer","=","tf",".","contrib",".","tpu",".","CrossShardOptimizer","(","optimizer",")","tvars","=","tf",".","trainable_variables","(",")","grads","=","tf",".","gradients","(","loss",",","tvars",")","# This is how the model was pre-trained.","(","grads",",","_",")","=","tf",".","clip_by_global_norm","(","grads",",","clip_norm","=","1.0",")","train_op","=","optimizer",".","apply_gradients","(","zip","(","grads",",","tvars",")",",","global_step","=","global_step",")","# Normally the global step update is done inside of `apply_gradients`.","# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use","# a different optimizer, you should probably take this line out.","new_global_step","=","global_step","+","1","train_op","=","tf",".","group","(","train_op",",","[","global_step",".","assign","(","new_global_step",")","]",")","return","train_op"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/optimization.py#L25-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.__init__","parameters":"(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\")","argument_list":"","return_statement":"","docstring":"Constructs a AdamWeightDecayOptimizer.","docstring_summary":"Constructs a AdamWeightDecayOptimizer.","docstring_tokens":["Constructs","a","AdamWeightDecayOptimizer","."],"function":"def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay","function_tokens":["def","__init__","(","self",",","learning_rate",",","weight_decay_rate","=","0.0",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","None",",","name","=","\"AdamWeightDecayOptimizer\"",")",":","super","(","AdamWeightDecayOptimizer",",","self",")",".","__init__","(","False",",","name",")","self",".","learning_rate","=","learning_rate","self",".","weight_decay_rate","=","weight_decay_rate","self",".","beta_1","=","beta_1","self",".","beta_2","=","beta_2","self",".","epsilon","=","epsilon","self",".","exclude_from_weight_decay","=","exclude_from_weight_decay"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/optimization.py#L90-L106"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.apply_gradients","parameters":"(self, grads_and_vars, global_step=None, name=None)","argument_list":"","return_statement":"return tf.group(*assignments, name=name)","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"\/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"\/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m \/ (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization\/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m\/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)","function_tokens":["def","apply_gradients","(","self",",","grads_and_vars",",","global_step","=","None",",","name","=","None",")",":","assignments","=","[","]","for","(","grad",",","param",")","in","grads_and_vars",":","if","grad","is","None","or","param","is","None",":","continue","param_name","=","self",".","_get_variable_name","(","param",".","name",")","m","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_m\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","v","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_v\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","# Standard Adam update.","next_m","=","(","tf",".","multiply","(","self",".","beta_1",",","m",")","+","tf",".","multiply","(","1.0","-","self",".","beta_1",",","grad",")",")","next_v","=","(","tf",".","multiply","(","self",".","beta_2",",","v",")","+","tf",".","multiply","(","1.0","-","self",".","beta_2",",","tf",".","square","(","grad",")",")",")","update","=","next_m","\/","(","tf",".","sqrt","(","next_v",")","+","self",".","epsilon",")","# Just adding the square of the weights to the loss function is *not*","# the correct way of using L2 regularization\/weight decay with Adam,","# since that will interact with the m and v parameters in strange ways.","#","# Instead we want ot decay the weights in a manner that doesn't interact","# with the m\/v parameters. This is equivalent to adding the square","# of the weights to the loss with plain (non-momentum) SGD.","if","self",".","_do_use_weight_decay","(","param_name",")",":","update","+=","self",".","weight_decay_rate","*","param","update_with_lr","=","self",".","learning_rate","*","update","next_param","=","param","-","update_with_lr","assignments",".","extend","(","[","param",".","assign","(","next_param",")",",","m",".","assign","(","next_m",")",",","v",".","assign","(","next_v",")","]",")","return","tf",".","group","(","*","assignments",",","name","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/optimization.py#L108-L157"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._do_use_weight_decay","parameters":"(self, param_name)","argument_list":"","return_statement":"return True","docstring":"Whether to use L2 weight decay for `param_name`.","docstring_summary":"Whether to use L2 weight decay for `param_name`.","docstring_tokens":["Whether","to","use","L2","weight","decay","for","param_name","."],"function":"def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True","function_tokens":["def","_do_use_weight_decay","(","self",",","param_name",")",":","if","not","self",".","weight_decay_rate",":","return","False","if","self",".","exclude_from_weight_decay",":","for","r","in","self",".","exclude_from_weight_decay",":","if","re",".","search","(","r",",","param_name",")","is","not","None",":","return","False","return","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/optimization.py#L159-L167"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._get_variable_name","parameters":"(self, param_name)","argument_list":"","return_statement":"return param_name","docstring":"Get the variable name from the tensor name.","docstring_summary":"Get the variable name from the tensor name.","docstring_tokens":["Get","the","variable","name","from","the","tensor","name","."],"function":"def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name","function_tokens":["def","_get_variable_name","(","self",",","param_name",")",":","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","param_name",")","if","m","is","not","None",":","param_name","=","m",".","group","(","1",")","return","param_name"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/optimization.py#L169-L174"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/create_pretraining_data.py","language":"python","identifier":"write_instance_to_example_files","parameters":"(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files)","argument_list":"","return_statement":"","docstring":"Create TF example files from `TrainingInstance`s.","docstring_summary":"Create TF example files from `TrainingInstance`s.","docstring_tokens":["Create","TF","example","files","from","TrainingInstance","s","."],"function":"def write_instance_to_example_files(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files):\n \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n writers = []\n for output_file in output_files:\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n writer_index = 0\n\n total_written = 0\n for (inst_index, instance) in enumerate(instances):\n input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)\n input_mask = [1] * len(input_ids)\n segment_ids = list(instance.segment_ids)\n assert len(input_ids) <= max_seq_length\n\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n while len(masked_lm_positions) < max_predictions_per_seq:\n masked_lm_positions.append(0)\n masked_lm_ids.append(0)\n masked_lm_weights.append(0.0)\n\n next_sentence_label = 1 if instance.is_random_next else 0\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n features[\"masked_lm_positions\"] = create_int_feature(masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n features[\"next_sentence_labels\"] = create_int_feature([next_sentence_label])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n\n writers[writer_index].write(tf_example.SerializeToString())\n writer_index = (writer_index + 1) % len(writers)\n\n total_written += 1\n\n if inst_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in instance.tokens]))\n\n for feature_name in features.keys():\n feature = features[feature_name]\n values = []\n if feature.int64_list.value:\n values = feature.int64_list.value\n elif feature.float_list.value:\n values = feature.float_list.value\n tf.logging.info(\n \"%s: %s\" % (feature_name, \" \".join([str(x) for x in values])))\n\n for writer in writers:\n writer.close()\n\n tf.logging.info(\"Wrote %d total instances\", total_written)","function_tokens":["def","write_instance_to_example_files","(","instances",",","tokenizer",",","max_seq_length",",","max_predictions_per_seq",",","output_files",")",":","writers","=","[","]","for","output_file","in","output_files",":","writers",".","append","(","tf",".","python_io",".","TFRecordWriter","(","output_file",")",")","writer_index","=","0","total_written","=","0","for","(","inst_index",",","instance",")","in","enumerate","(","instances",")",":","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","tokens",")","input_mask","=","[","1","]","*","len","(","input_ids",")","segment_ids","=","list","(","instance",".","segment_ids",")","assert","len","(","input_ids",")","<=","max_seq_length","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","masked_lm_positions","=","list","(","instance",".","masked_lm_positions",")","masked_lm_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","masked_lm_labels",")","masked_lm_weights","=","[","1.0","]","*","len","(","masked_lm_ids",")","while","len","(","masked_lm_positions",")","<","max_predictions_per_seq",":","masked_lm_positions",".","append","(","0",")","masked_lm_ids",".","append","(","0",")","masked_lm_weights",".","append","(","0.0",")","next_sentence_label","=","1","if","instance",".","is_random_next","else","0","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","segment_ids",")","features","[","\"masked_lm_positions\"","]","=","create_int_feature","(","masked_lm_positions",")","features","[","\"masked_lm_ids\"","]","=","create_int_feature","(","masked_lm_ids",")","features","[","\"masked_lm_weights\"","]","=","create_float_feature","(","masked_lm_weights",")","features","[","\"next_sentence_labels\"","]","=","create_int_feature","(","[","next_sentence_label","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writers","[","writer_index","]",".","write","(","tf_example",".","SerializeToString","(",")",")","writer_index","=","(","writer_index","+","1",")","%","len","(","writers",")","total_written","+=","1","if","inst_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","instance",".","tokens","]",")",")","for","feature_name","in","features",".","keys","(",")",":","feature","=","features","[","feature_name","]","values","=","[","]","if","feature",".","int64_list",".","value",":","values","=","feature",".","int64_list",".","value","elif","feature",".","float_list",".","value",":","values","=","feature",".","float_list",".","value","tf",".","logging",".","info","(","\"%s: %s\"","%","(","feature_name",",","\" \"",".","join","(","[","str","(","x",")","for","x","in","values","]",")",")",")","for","writer","in","writers",":","writer",".","close","(",")","tf",".","logging",".","info","(","\"Wrote %d total instances\"",",","total_written",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/create_pretraining_data.py#L96-L166"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/create_pretraining_data.py","language":"python","identifier":"create_training_instances","parameters":"(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng)","argument_list":"","return_statement":"return instances","docstring":"Create `TrainingInstance`s from raw text.","docstring_summary":"Create `TrainingInstance`s from raw text.","docstring_tokens":["Create","TrainingInstance","s","from","raw","text","."],"function":"def create_training_instances(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng):\n \"\"\"Create `TrainingInstance`s from raw text.\"\"\"\n all_documents = [[]]\n\n # Input file format:\n # (1) One sentence per line. These should ideally be actual sentences, not\n # entire paragraphs or arbitrary spans of text. (Because we use the\n # sentence boundaries for the \"next sentence prediction\" task).\n # (2) Blank lines between documents. Document boundaries are needed so\n # that the \"next sentence prediction\" task doesn't span between documents.\n for input_file in input_files:\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n\n # Empty lines are used as document delimiters\n if not line:\n all_documents.append([])\n tokens = tokenizer.tokenize(line)\n if tokens:\n all_documents[-1].append(tokens)\n\n # Remove empty documents\n all_documents = [x for x in all_documents if x]\n rng.shuffle(all_documents)\n\n vocab_words = list(tokenizer.vocab.keys())\n instances = []\n for _ in range(dupe_factor):\n for document_index in range(len(all_documents)):\n instances.extend(\n create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng))\n\n rng.shuffle(instances)\n return instances","function_tokens":["def","create_training_instances","(","input_files",",","tokenizer",",","max_seq_length",",","dupe_factor",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","rng",")",":","all_documents","=","[","[","]","]","# Input file format:","# (1) One sentence per line. These should ideally be actual sentences, not","# entire paragraphs or arbitrary spans of text. (Because we use the","# sentence boundaries for the \"next sentence prediction\" task).","# (2) Blank lines between documents. Document boundaries are needed so","# that the \"next sentence prediction\" task doesn't span between documents.","for","input_file","in","input_files",":","with","tf",".","gfile",".","GFile","(","input_file",",","\"r\"",")","as","reader",":","while","True",":","line","=","tokenization",".","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","line",":","break","line","=","line",".","strip","(",")","# Empty lines are used as document delimiters","if","not","line",":","all_documents",".","append","(","[","]",")","tokens","=","tokenizer",".","tokenize","(","line",")","if","tokens",":","all_documents","[","-","1","]",".","append","(","tokens",")","# Remove empty documents","all_documents","=","[","x","for","x","in","all_documents","if","x","]","rng",".","shuffle","(","all_documents",")","vocab_words","=","list","(","tokenizer",".","vocab",".","keys","(",")",")","instances","=","[","]","for","_","in","range","(","dupe_factor",")",":","for","document_index","in","range","(","len","(","all_documents",")",")",":","instances",".","extend","(","create_instances_from_document","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",")","rng",".","shuffle","(","instances",")","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/create_pretraining_data.py#L179-L220"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/create_pretraining_data.py","language":"python","identifier":"create_instances_from_document","parameters":"(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return instances","docstring":"Creates `TrainingInstance`s for a single document.","docstring_summary":"Creates `TrainingInstance`s for a single document.","docstring_tokens":["Creates","TrainingInstance","s","for","a","single","document","."],"function":"def create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[document_index]\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if rng.random() < short_seq_prob:\n target_seq_length = rng.randint(2, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n instances = []\n current_chunk = []\n current_length = 0\n i = 0\n while i < len(document):\n segment = document[i]\n current_chunk.append(segment)\n current_length += len(segment)\n if i == len(document) - 1 or current_length >= target_seq_length:\n if current_chunk:\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2:\n a_end = rng.randint(1, len(current_chunk) - 1)\n\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n tokens_b = []\n # Random next\n is_random_next = False\n if len(current_chunk) == 1 or rng.random() < 0.5:\n is_random_next = True\n target_b_length = target_seq_length - len(tokens_a)\n\n # This should rarely go for more than one iteration for large\n # corpora. However, just to be careful, we try to make sure that\n # the random document is not the same as the document\n # we're processing.\n for _ in range(10):\n random_document_index = rng.randint(0, len(all_documents) - 1)\n if random_document_index != document_index:\n break\n\n random_document = all_documents[random_document_index]\n random_start = rng.randint(0, len(random_document) - 1)\n for j in range(random_start, len(random_document)):\n tokens_b.extend(random_document[j])\n if len(tokens_b) >= target_b_length:\n break\n # We didn't actually use these segments so we \"put them back\" so\n # they don't go to waste.\n num_unused_segments = len(current_chunk) - a_end\n i -= num_unused_segments\n # Actual next\n else:\n is_random_next = False\n for j in range(a_end, len(current_chunk)):\n tokens_b.extend(current_chunk[j])\n truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)\n\n assert len(tokens_a) >= 1\n assert len(tokens_b) >= 1\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n current_chunk = []\n current_length = 0\n i += 1\n\n return instances","function_tokens":["def","create_instances_from_document","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","document","=","all_documents","[","document_index","]","# Account for [CLS], [SEP], [SEP]","max_num_tokens","=","max_seq_length","-","3","# We *usually* want to fill up the entire sequence since we are padding","# to `max_seq_length` anyways, so short sequences are generally wasted","# computation. However, we *sometimes*","# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter","# sequences to minimize the mismatch between pre-training and fine-tuning.","# The `target_seq_length` is just a rough target however, whereas","# `max_seq_length` is a hard limit.","target_seq_length","=","max_num_tokens","if","rng",".","random","(",")","<","short_seq_prob",":","target_seq_length","=","rng",".","randint","(","2",",","max_num_tokens",")","# We DON'T just concatenate all of the tokens from a document into a long","# sequence and choose an arbitrary split point because this would make the","# next sentence prediction task too easy. Instead, we split the input into","# segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user","# input.","instances","=","[","]","current_chunk","=","[","]","current_length","=","0","i","=","0","while","i","<","len","(","document",")",":","segment","=","document","[","i","]","current_chunk",".","append","(","segment",")","current_length","+=","len","(","segment",")","if","i","==","len","(","document",")","-","1","or","current_length",">=","target_seq_length",":","if","current_chunk",":","# `a_end` is how many segments from `current_chunk` go into the `A`","# (first) sentence.","a_end","=","1","if","len","(","current_chunk",")",">=","2",":","a_end","=","rng",".","randint","(","1",",","len","(","current_chunk",")","-","1",")","tokens_a","=","[","]","for","j","in","range","(","a_end",")",":","tokens_a",".","extend","(","current_chunk","[","j","]",")","tokens_b","=","[","]","# Random next","is_random_next","=","False","if","len","(","current_chunk",")","==","1","or","rng",".","random","(",")","<","0.5",":","is_random_next","=","True","target_b_length","=","target_seq_length","-","len","(","tokens_a",")","# This should rarely go for more than one iteration for large","# corpora. However, just to be careful, we try to make sure that","# the random document is not the same as the document","# we're processing.","for","_","in","range","(","10",")",":","random_document_index","=","rng",".","randint","(","0",",","len","(","all_documents",")","-","1",")","if","random_document_index","!=","document_index",":","break","random_document","=","all_documents","[","random_document_index","]","random_start","=","rng",".","randint","(","0",",","len","(","random_document",")","-","1",")","for","j","in","range","(","random_start",",","len","(","random_document",")",")",":","tokens_b",".","extend","(","random_document","[","j","]",")","if","len","(","tokens_b",")",">=","target_b_length",":","break","# We didn't actually use these segments so we \"put them back\" so","# they don't go to waste.","num_unused_segments","=","len","(","current_chunk",")","-","a_end","i","-=","num_unused_segments","# Actual next","else",":","is_random_next","=","False","for","j","in","range","(","a_end",",","len","(","current_chunk",")",")",":","tokens_b",".","extend","(","current_chunk","[","j","]",")","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")","assert","len","(","tokens_a",")",">=","1","assert","len","(","tokens_b",")",">=","1","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","(","tokens",",","masked_lm_positions",",","masked_lm_labels",")","=","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")","instance","=","TrainingInstance","(","tokens","=","tokens",",","segment_ids","=","segment_ids",",","is_random_next","=","is_random_next",",","masked_lm_positions","=","masked_lm_positions",",","masked_lm_labels","=","masked_lm_labels",")","instances",".","append","(","instance",")","current_chunk","=","[","]","current_length","=","0","i","+=","1","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/create_pretraining_data.py#L223-L335"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/create_pretraining_data.py","language":"python","identifier":"create_masked_lm_predictions","parameters":"(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return (output_tokens, masked_lm_positions, masked_lm_labels)","docstring":"Creates the predictions for the masked LM objective.","docstring_summary":"Creates the predictions for the masked LM objective.","docstring_tokens":["Creates","the","predictions","for","the","masked","LM","objective","."],"function":"def create_masked_lm_predictions(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n continue\n # Whole Word Masking means that if we mask all of the wordpieces\n # corresponding to an original word. When a word has been split into\n # WordPieces, the first token does not have any marker and any subsequence\n # tokens are prefixed with ##. So whenever we see the ## token, we\n # append it to the previous set of word indexes.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each WordPiece independently, softmaxed\n # over the entire vocabulary.\n if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and\n token.startswith(\"##\")):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n rng.shuffle(cand_indexes)\n\n output_tokens = list(tokens)\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n assert len(masked_lms) <= num_to_predict\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)","function_tokens":["def","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","cand_indexes","=","[","]","for","(","i",",","token",")","in","enumerate","(","tokens",")",":","if","token","==","\"[CLS]\"","or","token","==","\"[SEP]\"",":","continue","# Whole Word Masking means that if we mask all of the wordpieces","# corresponding to an original word. When a word has been split into","# WordPieces, the first token does not have any marker and any subsequence","# tokens are prefixed with ##. So whenever we see the ## token, we","# append it to the previous set of word indexes.","#","# Note that Whole Word Masking does *not* change the training code","# at all -- we still predict each WordPiece independently, softmaxed","# over the entire vocabulary.","if","(","FLAGS",".","do_whole_word_mask","and","len","(","cand_indexes",")",">=","1","and","token",".","startswith","(","\"##\"",")",")",":","cand_indexes","[","-","1","]",".","append","(","i",")","else",":","cand_indexes",".","append","(","[","i","]",")","rng",".","shuffle","(","cand_indexes",")","output_tokens","=","list","(","tokens",")","num_to_predict","=","min","(","max_predictions_per_seq",",","max","(","1",",","int","(","round","(","len","(","tokens",")","*","masked_lm_prob",")",")",")",")","masked_lms","=","[","]","covered_indexes","=","set","(",")","for","index_set","in","cand_indexes",":","if","len","(","masked_lms",")",">=","num_to_predict",":","break","# If adding a whole-word mask would exceed the maximum number of","# predictions, then just skip this candidate.","if","len","(","masked_lms",")","+","len","(","index_set",")",">","num_to_predict",":","continue","is_any_index_covered","=","False","for","index","in","index_set",":","if","index","in","covered_indexes",":","is_any_index_covered","=","True","break","if","is_any_index_covered",":","continue","for","index","in","index_set",":","covered_indexes",".","add","(","index",")","masked_token","=","None","# 80% of the time, replace with [MASK]","if","rng",".","random","(",")","<","0.8",":","masked_token","=","\"[MASK]\"","else",":","# 10% of the time, keep original","if","rng",".","random","(",")","<","0.5",":","masked_token","=","tokens","[","index","]","# 10% of the time, replace with random word","else",":","masked_token","=","vocab_words","[","rng",".","randint","(","0",",","len","(","vocab_words",")","-","1",")","]","output_tokens","[","index","]","=","masked_token","masked_lms",".","append","(","MaskedLmInstance","(","index","=","index",",","label","=","tokens","[","index","]",")",")","assert","len","(","masked_lms",")","<=","num_to_predict","masked_lms","=","sorted","(","masked_lms",",","key","=","lambda","x",":","x",".","index",")","masked_lm_positions","=","[","]","masked_lm_labels","=","[","]","for","p","in","masked_lms",":","masked_lm_positions",".","append","(","p",".","index",")","masked_lm_labels",".","append","(","p",".","label",")","return","(","output_tokens",",","masked_lm_positions",",","masked_lm_labels",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/create_pretraining_data.py#L342-L415"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/create_pretraining_data.py","language":"python","identifier":"truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_num_tokens, rng)","argument_list":"","return_statement":"","docstring":"Truncates a pair of sequences to a maximum sequence length.","docstring_summary":"Truncates a pair of sequences to a maximum sequence length.","docstring_tokens":["Truncates","a","pair","of","sequences","to","a","maximum","sequence","length","."],"function":"def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):\n \"\"\"Truncates a pair of sequences to a maximum sequence length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()","function_tokens":["def","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")",":","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_num_tokens",":","break","trunc_tokens","=","tokens_a","if","len","(","tokens_a",")",">","len","(","tokens_b",")","else","tokens_b","assert","len","(","trunc_tokens",")",">=","1","# We want to sometimes truncate from the front and sometimes from the","# back to add more randomness and avoid biases.","if","rng",".","random","(",")","<","0.5",":","del","trunc_tokens","[","0","]","else",":","trunc_tokens",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/create_pretraining_data.py#L418-L433"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_ner.py","language":"python","identifier":"InputExample.__init__","parameters":"(self, guid, text, label=None)","argument_list":"","return_statement":"","docstring":"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.","docstring_summary":"Constructs a InputExample.","docstring_tokens":["Constructs","a","InputExample","."],"function":"def __init__(self, guid, text, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text = text\n self.label = label","function_tokens":["def","__init__","(","self",",","guid",",","text",",","label","=","None",")",":","self",".","guid","=","guid","self",".","text","=","text","self",".","label","=","label"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_ner.py#L123-L135"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_ner.py","language":"python","identifier":"DataProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the train set.","docstring_summary":"Gets a collection of `InputExample`s for the train set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","train","set","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_ner.py#L152-L154"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_ner.py","language":"python","identifier":"DataProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the dev set.","docstring_summary":"Gets a collection of `InputExample`s for the dev set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","dev","set","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_ner.py#L156-L158"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_ner.py","language":"python","identifier":"DataProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Gets the list of labels for this data set.","docstring_summary":"Gets the list of labels for this data set.","docstring_tokens":["Gets","the","list","of","labels","for","this","data","set","."],"function":"def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_labels","(","self",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_ner.py#L160-L162"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_ner.py","language":"python","identifier":"DataProcessor._read_data","parameters":"(cls, input_file)","argument_list":"","return_statement":"","docstring":"Reads a BIO data.","docstring_summary":"Reads a BIO data.","docstring_tokens":["Reads","a","BIO","data","."],"function":"def _read_data(cls, input_file):\n \"\"\"Reads a BIO data.\"\"\"\n with open(input_file) as f:\n lines = []\n words = []\n labels = []\n for line in f:\n contends = line.strip()\n word = line.strip().split(' ')[0]\n label = line.strip().split(' ')[-1]\n if contends.startswith(\"-DOCSTART-\"):\n words.append('')\n continue\n if len(contends) == 0 and words[-1] == '.':\n l = ' '.join([label for label in labels if len(label) > 0])\n w = ' '.join([word for word in words if len(word) > 0])\n lines.append([l, w])\n words = []\n labels = []\n continue\n if len(contends) == 0:\n continue\n words.append(word)\n labels.append(label)\n return lines","function_tokens":["def","_read_data","(","cls",",","input_file",")",":","with","open","(","input_file",")","as","f",":","lines","=","[","]","words","=","[","]","labels","=","[","]","for","line","in","f",":","contends","=","line",".","strip","(",")","word","=","line",".","strip","(",")",".","split","(","' '",")","[","0","]","label","=","line",".","strip","(",")",".","split","(","' '",")","[","-","1","]","if","contends",".","startswith","(","\"-DOCSTART-\"",")",":","words",".","append","(","''",")","continue","if","len","(","contends",")","==","0","and","words","[","-","1","]","==","'.'",":","l","=","' '",".","join","(","[","label","for","label","in","labels","if","len","(","label",")",">","0","]",")","w","=","' '",".","join","(","[","word","for","word","in","words","if","len","(","word",")",">","0","]",")","lines",".","append","(","[","l",",","w","]",")","words","=","[","]","labels","=","[","]","continue","if","len","(","contends",")","==","0",":","continue","words",".","append","(","word",")","labels",".","append","(","label",")","return","lines"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_ner.py#L165-L189"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/extract_features.py","language":"python","identifier":"input_fn_builder","parameters":"(features, seq_length)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(features, seq_length):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_unique_ids = []\n all_input_ids = []\n all_input_mask = []\n all_input_type_ids = []\n\n for feature in features:\n all_unique_ids.append(feature.unique_id)\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_input_type_ids.append(feature.input_type_ids)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"unique_ids\":\n tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_type_ids\":\n tf.constant(\n all_input_type_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n })\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","features",",","seq_length",")",":","all_unique_ids","=","[","]","all_input_ids","=","[","]","all_input_mask","=","[","]","all_input_type_ids","=","[","]","for","feature","in","features",":","all_unique_ids",".","append","(","feature",".","unique_id",")","all_input_ids",".","append","(","feature",".","input_ids",")","all_input_mask",".","append","(","feature",".","input_mask",")","all_input_type_ids",".","append","(","feature",".","input_type_ids",")","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","num_examples","=","len","(","features",")","# This is for demo purposes and does NOT scale to large data sets. We do","# not use Dataset.from_generator() because that uses tf.py_func which is","# not TPU compatible. The right way to load data is with TFRecordReader.","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","{","\"unique_ids\"",":","tf",".","constant","(","all_unique_ids",",","shape","=","[","num_examples","]",",","dtype","=","tf",".","int32",")",",","\"input_ids\"",":","tf",".","constant","(","all_input_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_mask\"",":","tf",".","constant","(","all_input_mask",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_type_ids\"",":","tf",".","constant","(","all_input_type_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","}",")","d","=","d",".","batch","(","batch_size","=","batch_size",",","drop_remainder","=","False",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/extract_features.py#L100-L145"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/extract_features.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, layer_indexes, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n input_type_ids = features[\"input_type_ids\"]\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=False,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=input_type_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n raise ValueError(\"Only PREDICT modes are supported: %s\" % (mode))\n\n tvars = tf.trainable_variables()\n scaffold_fn = None\n (assignment_map,\n initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n all_layers = model.get_all_encoder_layers()\n\n predictions = {\n \"unique_id\": unique_ids,\n }\n\n for (i, layer_index) in enumerate(layer_indexes):\n predictions[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","layer_indexes",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","unique_ids","=","features","[","\"unique_ids\"","]","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","input_type_ids","=","features","[","\"input_type_ids\"","]","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","False",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","input_type_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","if","mode","!=","tf",".","estimator",".","ModeKeys",".","PREDICT",":","raise","ValueError","(","\"Only PREDICT modes are supported: %s\"","%","(","mode",")",")","tvars","=","tf",".","trainable_variables","(",")","scaffold_fn","=","None","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","all_layers","=","model",".","get_all_encoder_layers","(",")","predictions","=","{","\"unique_id\"",":","unique_ids",",","}","for","(","i",",","layer_index",")","in","enumerate","(","layer_indexes",")",":","predictions","[","\"layer_output_%d\"","%","i","]","=","all_layers","[","layer_index","]","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","predictions",",","scaffold_fn","=","scaffold_fn",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/extract_features.py#L148-L207"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/extract_features.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, seq_length, tokenizer)","argument_list":"","return_statement":"return features","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > seq_length - 2:\n tokens_a = tokens_a[0:(seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n input_type_ids = []\n tokens.append(\"[CLS]\")\n input_type_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n input_type_ids.append(0)\n tokens.append(\"[SEP]\")\n input_type_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n input_type_ids.append(1)\n tokens.append(\"[SEP]\")\n input_type_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < seq_length:\n input_ids.append(0)\n input_mask.append(0)\n input_type_ids.append(0)\n\n assert len(input_ids) == seq_length\n assert len(input_mask) == seq_length\n assert len(input_type_ids) == seq_length\n\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (example.unique_id))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"input_type_ids: %s\" % \" \".join([str(x) for x in input_type_ids]))\n\n features.append(\n InputFeatures(\n unique_id=example.unique_id,\n tokens=tokens,\n input_ids=input_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids))\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","seq_length",",","tokenizer",")",":","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","if","tokens_b",":","# Modifies `tokens_a` and `tokens_b` in place so that the total","# length is less than the specified length.","# Account for [CLS], [SEP], [SEP] with \"- 3\"","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","seq_length","-","3",")","else",":","# Account for [CLS] and [SEP] with \"- 2\"","if","len","(","tokens_a",")",">","seq_length","-","2",":","tokens_a","=","tokens_a","[","0",":","(","seq_length","-","2",")","]","# The convention in BERT is:","# (a) For sequence pairs:","# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]","# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1","# (b) For single sequences:","# tokens: [CLS] the dog is hairy . [SEP]","# type_ids: 0 0 0 0 0 0 0","#","# Where \"type_ids\" are used to indicate whether this is the first","# sequence or the second sequence. The embedding vectors for `type=0` and","# `type=1` were learned during pre-training and are added to the wordpiece","# embedding vector (and position vector). This is not *strictly* necessary","# since the [SEP] token unambiguously separates the sequences, but it makes","# it easier for the model to learn the concept of sequences.","#","# For classification tasks, the first vector (corresponding to [CLS]) is","# used as as the \"sentence vector\". Note that this only makes sense because","# the entire model is fine-tuned.","tokens","=","[","]","input_type_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","input_type_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","input_type_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","input_type_ids",".","append","(","0",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","input_type_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","input_type_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","input_type_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","seq_length","assert","len","(","input_mask",")","==","seq_length","assert","len","(","input_type_ids",")","==","seq_length","if","ex_index","<","5",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"unique_id: %s\"","%","(","example",".","unique_id",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"input_type_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_type_ids","]",")",")","features",".","append","(","InputFeatures","(","unique_id","=","example",".","unique_id",",","tokens","=","tokens",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","input_type_ids","=","input_type_ids",")",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/extract_features.py#L210-L299"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/extract_features.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/extract_features.py#L302-L316"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/extract_features.py","language":"python","identifier":"read_examples","parameters":"(input_file)","argument_list":"","return_statement":"return examples","docstring":"Read a list of `InputExample`s from an input file.","docstring_summary":"Read a list of `InputExample`s from an input file.","docstring_tokens":["Read","a","list","of","InputExample","s","from","an","input","file","."],"function":"def read_examples(input_file):\n \"\"\"Read a list of `InputExample`s from an input file.\"\"\"\n examples = []\n unique_id = 0\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n examples.append(\n InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))\n unique_id += 1\n return examples","function_tokens":["def","read_examples","(","input_file",")",":","examples","=","[","]","unique_id","=","0","with","tf",".","gfile",".","GFile","(","input_file",",","\"r\"",")","as","reader",":","while","True",":","line","=","tokenization",".","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","line",":","break","line","=","line",".","strip","(",")","text_a","=","None","text_b","=","None","m","=","re",".","match","(","r\"^(.*) \\|\\|\\| (.*)$\"",",","line",")","if","m","is","None",":","text_a","=","line","else",":","text_a","=","m",".","group","(","1",")","text_b","=","m",".","group","(","2",")","examples",".","append","(","InputExample","(","unique_id","=","unique_id",",","text_a","=","text_a",",","text_b","=","text_b",")",")","unique_id","+=","1","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/extract_features.py#L319-L340"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_classifier_with_tfhub.py","language":"python","identifier":"create_model","parameters":"(is_training, input_ids, input_mask, segment_ids, labels,\n num_labels, bert_hub_module_handle)","argument_list":"","return_statement":"","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(is_training, input_ids, input_mask, segment_ids, labels,\n num_labels, bert_hub_module_handle):\n \"\"\"Creates a classification model.\"\"\"\n tags = set()\n if is_training:\n tags.add(\"train\")\n bert_module = hub.Module(bert_hub_module_handle, tags=tags, trainable=True)\n bert_inputs = dict(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids)\n bert_outputs = bert_module(\n inputs=bert_inputs,\n signature=\"tokens\",\n as_dict=True)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use\n # bert_outputs[\"sequence_output\"] instead.\n output_layer = bert_outputs[\"pooled_output\"]\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)","function_tokens":["def","create_model","(","is_training",",","input_ids",",","input_mask",",","segment_ids",",","labels",",","num_labels",",","bert_hub_module_handle",")",":","tags","=","set","(",")","if","is_training",":","tags",".","add","(","\"train\"",")","bert_module","=","hub",".","Module","(","bert_hub_module_handle",",","tags","=","tags",",","trainable","=","True",")","bert_inputs","=","dict","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",")","bert_outputs","=","bert_module","(","inputs","=","bert_inputs",",","signature","=","\"tokens\"",",","as_dict","=","True",")","# In the demo, we are doing a simple classification task on the entire","# segment.","#","# If you want to use the token-level output, use","# bert_outputs[\"sequence_output\"] instead.","output_layer","=","bert_outputs","[","\"pooled_output\"","]","hidden_size","=","output_layer",".","shape","[","-","1","]",".","value","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","[","num_labels",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","[","num_labels","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","with","tf",".","variable_scope","(","\"loss\"",")",":","if","is_training",":","# I.e., 0.1 dropout","output_layer","=","tf",".","nn",".","dropout","(","output_layer",",","keep_prob","=","0.9",")","logits","=","tf",".","matmul","(","output_layer",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","probabilities","=","tf",".","nn",".","softmax","(","logits",",","axis","=","-","1",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","num_labels",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","logits",",","probabilities",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_classifier_with_tfhub.py#L37-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_classifier_with_tfhub.py","language":"python","identifier":"model_fn_builder","parameters":"(num_labels, learning_rate, num_train_steps,\n num_warmup_steps, use_tpu, bert_hub_module_handle)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(num_labels, learning_rate, num_train_steps,\n num_warmup_steps, use_tpu, bert_hub_module_handle):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n is_training, input_ids, input_mask, segment_ids, label_ids, num_labels,\n bert_hub_module_handle)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions={\"probabilities\": probabilities})\n else:\n raise ValueError(\n \"Only TRAIN, EVAL and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","num_labels",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","bert_hub_module_handle",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","label_ids","=","features","[","\"label_ids\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","total_loss",",","per_example_loss",",","logits",",","probabilities",")","=","create_model","(","is_training",",","input_ids",",","input_mask",",","segment_ids",",","label_ids",",","num_labels",",","bert_hub_module_handle",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","per_example_loss",",","label_ids",",","logits",")",":","predictions","=","tf",".","argmax","(","logits",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","accuracy","=","tf",".","metrics",".","accuracy","(","label_ids",",","predictions",")","loss","=","tf",".","metrics",".","mean","(","per_example_loss",")","return","{","\"eval_accuracy\"",":","accuracy",",","\"eval_loss\"",":","loss",",","}","eval_metrics","=","(","metric_fn",",","[","per_example_loss",",","label_ids",",","logits","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","PREDICT",":","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","{","\"probabilities\"",":","probabilities","}",")","else",":","raise","ValueError","(","\"Only TRAIN, EVAL and PREDICT modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_classifier_with_tfhub.py#L87-L143"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/ernie\/run_classifier_with_tfhub.py","language":"python","identifier":"create_tokenizer_from_hub_module","parameters":"(bert_hub_module_handle)","argument_list":"","return_statement":"return tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)","docstring":"Get the vocab file and casing info from the Hub module.","docstring_summary":"Get the vocab file and casing info from the Hub module.","docstring_tokens":["Get","the","vocab","file","and","casing","info","from","the","Hub","module","."],"function":"def create_tokenizer_from_hub_module(bert_hub_module_handle):\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n bert_module = hub.Module(bert_hub_module_handle)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n return tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)","function_tokens":["def","create_tokenizer_from_hub_module","(","bert_hub_module_handle",")",":","with","tf",".","Graph","(",")",".","as_default","(",")",":","bert_module","=","hub",".","Module","(","bert_hub_module_handle",")","tokenization_info","=","bert_module","(","signature","=","\"tokenization_info\"",",","as_dict","=","True",")","with","tf",".","Session","(",")","as","sess",":","vocab_file",",","do_lower_case","=","sess",".","run","(","[","tokenization_info","[","\"vocab_file\"","]",",","tokenization_info","[","\"do_lower_case\"","]","]",")","return","tokenization",".","FullTokenizer","(","vocab_file","=","vocab_file",",","do_lower_case","=","do_lower_case",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/ernie\/run_classifier_with_tfhub.py#L146-L155"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_pretraining.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n next_sentence_labels = features[\"next_sentence_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), model.get_embedding_table(),\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n (next_sentence_loss, next_sentence_example_loss,\n next_sentence_log_probs) = get_next_sentence_output(\n bert_config, model.get_pooled_output(), next_sentence_labels)\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n next_sentence_log_probs = tf.reshape(\n next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])\n next_sentence_predictions = tf.argmax(\n next_sentence_log_probs, axis=-1, output_type=tf.int32)\n next_sentence_labels = tf.reshape(next_sentence_labels, [-1])\n next_sentence_accuracy = tf.metrics.accuracy(\n labels=next_sentence_labels, predictions=next_sentence_predictions)\n next_sentence_mean_loss = tf.metrics.mean(\n values=next_sentence_example_loss)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n \"next_sentence_accuracy\": next_sentence_accuracy,\n \"next_sentence_loss\": next_sentence_mean_loss,\n }\n\n eval_metrics = (metric_fn, [\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels\n ])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\"Only TRAIN and EVAL modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","masked_lm_positions","=","features","[","\"masked_lm_positions\"","]","masked_lm_ids","=","features","[","\"masked_lm_ids\"","]","masked_lm_weights","=","features","[","\"masked_lm_weights\"","]","next_sentence_labels","=","features","[","\"next_sentence_labels\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","(","masked_lm_loss",",","masked_lm_example_loss",",","masked_lm_log_probs",")","=","get_masked_lm_output","(","bert_config",",","model",".","get_sequence_output","(",")",",","model",".","get_embedding_table","(",")",",","masked_lm_positions",",","masked_lm_ids",",","masked_lm_weights",")","(","next_sentence_loss",",","next_sentence_example_loss",",","next_sentence_log_probs",")","=","get_next_sentence_output","(","bert_config",",","model",".","get_pooled_output","(",")",",","next_sentence_labels",")","total_loss","=","masked_lm_loss","+","next_sentence_loss","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels",")",":","\"\"\"Computes the loss and accuracy of the model.\"\"\"","masked_lm_log_probs","=","tf",".","reshape","(","masked_lm_log_probs",",","[","-","1",",","masked_lm_log_probs",".","shape","[","-","1","]","]",")","masked_lm_predictions","=","tf",".","argmax","(","masked_lm_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","masked_lm_example_loss","=","tf",".","reshape","(","masked_lm_example_loss",",","[","-","1","]",")","masked_lm_ids","=","tf",".","reshape","(","masked_lm_ids",",","[","-","1","]",")","masked_lm_weights","=","tf",".","reshape","(","masked_lm_weights",",","[","-","1","]",")","masked_lm_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","masked_lm_ids",",","predictions","=","masked_lm_predictions",",","weights","=","masked_lm_weights",")","masked_lm_mean_loss","=","tf",".","metrics",".","mean","(","values","=","masked_lm_example_loss",",","weights","=","masked_lm_weights",")","next_sentence_log_probs","=","tf",".","reshape","(","next_sentence_log_probs",",","[","-","1",",","next_sentence_log_probs",".","shape","[","-","1","]","]",")","next_sentence_predictions","=","tf",".","argmax","(","next_sentence_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","next_sentence_labels","=","tf",".","reshape","(","next_sentence_labels",",","[","-","1","]",")","next_sentence_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","next_sentence_labels",",","predictions","=","next_sentence_predictions",")","next_sentence_mean_loss","=","tf",".","metrics",".","mean","(","values","=","next_sentence_example_loss",")","return","{","\"masked_lm_accuracy\"",":","masked_lm_accuracy",",","\"masked_lm_loss\"",":","masked_lm_mean_loss",",","\"next_sentence_accuracy\"",":","next_sentence_accuracy",",","\"next_sentence_loss\"",":","next_sentence_mean_loss",",","}","eval_metrics","=","(","metric_fn",",","[","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","raise","ValueError","(","\"Only TRAIN and EVAL modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_pretraining.py#L109-L237"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_pretraining.py","language":"python","identifier":"get_masked_lm_output","parameters":"(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights)","argument_list":"","return_statement":"return (loss, per_example_loss, log_probs)","docstring":"Get loss and log probs for the masked LM.","docstring_summary":"Get loss and log probs for the masked LM.","docstring_tokens":["Get","loss","and","log","probs","for","the","masked","LM","."],"function":"def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights):\n \"\"\"Get loss and log probs for the masked LM.\"\"\"\n input_tensor = gather_indexes(input_tensor, positions)\n\n with tf.variable_scope(\"cls\/predictions\"):\n # We apply one more non-linear transformation before the output layer.\n # This matrix is not used after pre-training.\n with tf.variable_scope(\"transform\"):\n input_tensor = tf.layers.dense(\n input_tensor,\n units=bert_config.hidden_size,\n activation=modeling.get_activation(bert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n bert_config.initializer_range))\n input_tensor = modeling.layer_norm(input_tensor)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n output_bias = tf.get_variable(\n \"output_bias\",\n shape=[bert_config.vocab_size],\n initializer=tf.zeros_initializer())\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n label_ids = tf.reshape(label_ids, [-1])\n label_weights = tf.reshape(label_weights, [-1])\n\n one_hot_labels = tf.one_hot(\n label_ids, depth=bert_config.vocab_size, dtype=tf.float32)\n\n # The `positions` tensor might be zero-padded (if the sequence is too\n # short to have the maximum number of predictions). The `label_weights`\n # tensor has a value of 1.0 for every real prediction and 0.0 for the\n # padding predictions.\n per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])\n numerator = tf.reduce_sum(label_weights * per_example_loss)\n denominator = tf.reduce_sum(label_weights) + 1e-5\n loss = numerator \/ denominator\n\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_masked_lm_output","(","bert_config",",","input_tensor",",","output_weights",",","positions",",","label_ids",",","label_weights",")",":","input_tensor","=","gather_indexes","(","input_tensor",",","positions",")","with","tf",".","variable_scope","(","\"cls\/predictions\"",")",":","# We apply one more non-linear transformation before the output layer.","# This matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"transform\"",")",":","input_tensor","=","tf",".","layers",".","dense","(","input_tensor",",","units","=","bert_config",".","hidden_size",",","activation","=","modeling",".","get_activation","(","bert_config",".","hidden_act",")",",","kernel_initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","input_tensor","=","modeling",".","layer_norm","(","input_tensor",")","# The output weights are the same as the input embeddings, but there is","# an output-only bias for each token.","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","bert_config",".","vocab_size","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","matmul","(","input_tensor",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","label_ids","=","tf",".","reshape","(","label_ids",",","[","-","1","]",")","label_weights","=","tf",".","reshape","(","label_weights",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","label_ids",",","depth","=","bert_config",".","vocab_size",",","dtype","=","tf",".","float32",")","# The `positions` tensor might be zero-padded (if the sequence is too","# short to have the maximum number of predictions). The `label_weights`","# tensor has a value of 1.0 for every real prediction and 0.0 for the","# padding predictions.","per_example_loss","=","-","tf",".","reduce_sum","(","log_probs","*","one_hot_labels",",","axis","=","[","-","1","]",")","numerator","=","tf",".","reduce_sum","(","label_weights","*","per_example_loss",")","denominator","=","tf",".","reduce_sum","(","label_weights",")","+","1e-5","loss","=","numerator","\/","denominator","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_pretraining.py#L240-L282"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_pretraining.py","language":"python","identifier":"get_next_sentence_output","parameters":"(bert_config, input_tensor, labels)","argument_list":"","return_statement":"","docstring":"Get loss and log probs for the next sentence prediction.","docstring_summary":"Get loss and log probs for the next sentence prediction.","docstring_tokens":["Get","loss","and","log","probs","for","the","next","sentence","prediction","."],"function":"def get_next_sentence_output(bert_config, input_tensor, labels):\n \"\"\"Get loss and log probs for the next sentence prediction.\"\"\"\n\n # Simple binary classification. Note that 0 is \"next sentence\" and 1 is\n # \"random sentence\". This weight matrix is not used after pre-training.\n with tf.variable_scope(\"cls\/seq_relationship\"):\n output_weights = tf.get_variable(\n \"output_weights\",\n shape=[2, bert_config.hidden_size],\n initializer=modeling.create_initializer(bert_config.initializer_range))\n output_bias = tf.get_variable(\n \"output_bias\", shape=[2], initializer=tf.zeros_initializer())\n\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n labels = tf.reshape(labels, [-1])\n one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_next_sentence_output","(","bert_config",",","input_tensor",",","labels",")",":","# Simple binary classification. Note that 0 is \"next sentence\" and 1 is","# \"random sentence\". This weight matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"cls\/seq_relationship\"",")",":","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","shape","=","[","2",",","bert_config",".","hidden_size","]",",","initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","2","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","matmul","(","input_tensor",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","labels","=","tf",".","reshape","(","labels",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","2",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_pretraining.py#L285-L305"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_pretraining.py","language":"python","identifier":"gather_indexes","parameters":"(sequence_tensor, positions)","argument_list":"","return_statement":"return output_tensor","docstring":"Gathers the vectors at the specific positions over a minibatch.","docstring_summary":"Gathers the vectors at the specific positions over a minibatch.","docstring_tokens":["Gathers","the","vectors","at","the","specific","positions","over","a","minibatch","."],"function":"def gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor","function_tokens":["def","gather_indexes","(","sequence_tensor",",","positions",")",":","sequence_shape","=","modeling",".","get_shape_list","(","sequence_tensor",",","expected_rank","=","3",")","batch_size","=","sequence_shape","[","0","]","seq_length","=","sequence_shape","[","1","]","width","=","sequence_shape","[","2","]","flat_offsets","=","tf",".","reshape","(","tf",".","range","(","0",",","batch_size",",","dtype","=","tf",".","int32",")","*","seq_length",",","[","-","1",",","1","]",")","flat_positions","=","tf",".","reshape","(","positions","+","flat_offsets",",","[","-","1","]",")","flat_sequence_tensor","=","tf",".","reshape","(","sequence_tensor",",","[","batch_size","*","seq_length",",","width","]",")","output_tensor","=","tf",".","gather","(","flat_sequence_tensor",",","flat_positions",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_pretraining.py#L308-L321"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_pretraining.py","language":"python","identifier":"input_fn_builder","parameters":"(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n name_to_features = {\n \"input_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"input_mask\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"segment_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"masked_lm_positions\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_ids\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_weights\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.float32),\n \"next_sentence_labels\":\n tf.FixedLenFeature([1], tf.int64),\n }\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n if is_training:\n d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))\n d = d.repeat()\n d = d.shuffle(buffer_size=len(input_files))\n\n # `cycle_length` is the number of parallel files that get read.\n cycle_length = min(num_cpu_threads, len(input_files))\n\n # `sloppy` mode means that the interleaving is not exact. This adds\n # even more randomness to the training pipeline.\n d = d.apply(\n tf.contrib.data.parallel_interleave(\n tf.data.TFRecordDataset,\n sloppy=is_training,\n cycle_length=cycle_length))\n d = d.shuffle(buffer_size=100)\n else:\n d = tf.data.TFRecordDataset(input_files)\n # Since we evaluate for a fixed number of steps we don't want to encounter\n # out-of-range exceptions.\n d = d.repeat()\n\n # We must `drop_remainder` on training because the TPU requires fixed\n # size dimensions. For eval, we assume we are evaluating on the CPU or GPU\n # and we *don't* want to drop the remainder, otherwise we wont cover\n # every sample.\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n num_parallel_batches=num_cpu_threads,\n drop_remainder=True))\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","input_files",",","max_seq_length",",","max_predictions_per_seq",",","is_training",",","num_cpu_threads","=","4",")",":","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"masked_lm_positions\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_ids\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_weights\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","float32",")",",","\"next_sentence_labels\"",":","tf",".","FixedLenFeature","(","[","1","]",",","tf",".","int64",")",",","}","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","if","is_training",":","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","tf",".","constant","(","input_files",")",")","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","len","(","input_files",")",")","# `cycle_length` is the number of parallel files that get read.","cycle_length","=","min","(","num_cpu_threads",",","len","(","input_files",")",")","# `sloppy` mode means that the interleaving is not exact. This adds","# even more randomness to the training pipeline.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","parallel_interleave","(","tf",".","data",".","TFRecordDataset",",","sloppy","=","is_training",",","cycle_length","=","cycle_length",")",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","else",":","d","=","tf",".","data",".","TFRecordDataset","(","input_files",")","# Since we evaluate for a fixed number of steps we don't want to encounter","# out-of-range exceptions.","d","=","d",".","repeat","(",")","# We must `drop_remainder` on training because the TPU requires fixed","# size dimensions. For eval, we assume we are evaluating on the CPU or GPU","# and we *don't* want to drop the remainder, otherwise we wont cover","# every sample.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","num_parallel_batches","=","num_cpu_threads",",","drop_remainder","=","True",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_pretraining.py#L324-L388"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_pretraining.py","language":"python","identifier":"_decode_record","parameters":"(record, name_to_features)","argument_list":"","return_statement":"return example","docstring":"Decodes a record to a TensorFlow example.","docstring_summary":"Decodes a record to a TensorFlow example.","docstring_tokens":["Decodes","a","record","to","a","TensorFlow","example","."],"function":"def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example","function_tokens":["def","_decode_record","(","record",",","name_to_features",")",":","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_pretraining.py#L391-L403"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_squad.py","language":"python","identifier":"read_squad_examples","parameters":"(input_file, is_training)","argument_list":"","return_statement":"return examples","docstring":"Read a SQuAD json file into a list of SquadExample.","docstring_summary":"Read a SQuAD json file into a list of SquadExample.","docstring_tokens":["Read","a","SQuAD","json","file","into","a","list","of","SquadExample","."],"function":"def read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n\n if FLAGS.version_2_with_negative:\n is_impossible = qa[\"is_impossible\"]\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\")\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length -\n 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(\n doc_tokens[start_position:(end_position + 1)])\n cleaned_answer_text = \" \".join(\n tokenization.whitespace_tokenize(orig_answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n tf.logging.warning(\"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n\n return examples","function_tokens":["def","read_squad_examples","(","input_file",",","is_training",")",":","with","tf",".","gfile",".","Open","(","input_file",",","\"r\"",")","as","reader",":","input_data","=","json",".","load","(","reader",")","[","\"data\"","]","def","is_whitespace","(","c",")",":","if","c","==","\" \"","or","c","==","\"\\t\"","or","c","==","\"\\r\"","or","c","==","\"\\n\"","or","ord","(","c",")","==","0x202F",":","return","True","return","False","examples","=","[","]","for","entry","in","input_data",":","for","paragraph","in","entry","[","\"paragraphs\"","]",":","paragraph_text","=","paragraph","[","\"context\"","]","doc_tokens","=","[","]","char_to_word_offset","=","[","]","prev_is_whitespace","=","True","for","c","in","paragraph_text",":","if","is_whitespace","(","c",")",":","prev_is_whitespace","=","True","else",":","if","prev_is_whitespace",":","doc_tokens",".","append","(","c",")","else",":","doc_tokens","[","-","1","]","+=","c","prev_is_whitespace","=","False","char_to_word_offset",".","append","(","len","(","doc_tokens",")","-","1",")","for","qa","in","paragraph","[","\"qas\"","]",":","qas_id","=","qa","[","\"id\"","]","question_text","=","qa","[","\"question\"","]","start_position","=","None","end_position","=","None","orig_answer_text","=","None","is_impossible","=","False","if","is_training",":","if","FLAGS",".","version_2_with_negative",":","is_impossible","=","qa","[","\"is_impossible\"","]","if","(","len","(","qa","[","\"answers\"","]",")","!=","1",")","and","(","not","is_impossible",")",":","raise","ValueError","(","\"For training, each question should have exactly 1 answer.\"",")","if","not","is_impossible",":","answer","=","qa","[","\"answers\"","]","[","0","]","orig_answer_text","=","answer","[","\"text\"","]","answer_offset","=","answer","[","\"answer_start\"","]","answer_length","=","len","(","orig_answer_text",")","start_position","=","char_to_word_offset","[","answer_offset","]","end_position","=","char_to_word_offset","[","answer_offset","+","answer_length","-","1","]","# Only add answers where the text can be exactly recovered from the","# document. If this CAN'T happen it's likely due to weird Unicode","# stuff so we will just skip the example.","#","# Note that this means for training mode, every example is NOT","# guaranteed to be preserved.","actual_text","=","\" \"",".","join","(","doc_tokens","[","start_position",":","(","end_position","+","1",")","]",")","cleaned_answer_text","=","\" \"",".","join","(","tokenization",".","whitespace_tokenize","(","orig_answer_text",")",")","if","actual_text",".","find","(","cleaned_answer_text",")","==","-","1",":","tf",".","logging",".","warning","(","\"Could not find answer: '%s' vs. '%s'\"",",","actual_text",",","cleaned_answer_text",")","continue","else",":","start_position","=","-","1","end_position","=","-","1","orig_answer_text","=","\"\"","example","=","SquadExample","(","qas_id","=","qas_id",",","question_text","=","question_text",",","doc_tokens","=","doc_tokens",",","orig_answer_text","=","orig_answer_text",",","start_position","=","start_position",",","end_position","=","end_position",",","is_impossible","=","is_impossible",")","examples",".","append","(","example",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_squad.py#L227-L306"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_squad.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn)","argument_list":"","return_statement":"","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"token_to_orig_map: %s\" % \" \".join(\n [\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))\n tf.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n tf.logging.info(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\n \"answer: %s\" % (tokenization.printable_text(answer_text)))\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1","function_tokens":["def","convert_examples_to_features","(","examples",",","tokenizer",",","max_seq_length",",","doc_stride",",","max_query_length",",","is_training",",","output_fn",")",":","unique_id","=","1000000000","for","(","example_index",",","example",")","in","enumerate","(","examples",")",":","query_tokens","=","tokenizer",".","tokenize","(","example",".","question_text",")","if","len","(","query_tokens",")",">","max_query_length",":","query_tokens","=","query_tokens","[","0",":","max_query_length","]","tok_to_orig_index","=","[","]","orig_to_tok_index","=","[","]","all_doc_tokens","=","[","]","for","(","i",",","token",")","in","enumerate","(","example",".","doc_tokens",")",":","orig_to_tok_index",".","append","(","len","(","all_doc_tokens",")",")","sub_tokens","=","tokenizer",".","tokenize","(","token",")","for","sub_token","in","sub_tokens",":","tok_to_orig_index",".","append","(","i",")","all_doc_tokens",".","append","(","sub_token",")","tok_start_position","=","None","tok_end_position","=","None","if","is_training","and","example",".","is_impossible",":","tok_start_position","=","-","1","tok_end_position","=","-","1","if","is_training","and","not","example",".","is_impossible",":","tok_start_position","=","orig_to_tok_index","[","example",".","start_position","]","if","example",".","end_position","<","len","(","example",".","doc_tokens",")","-","1",":","tok_end_position","=","orig_to_tok_index","[","example",".","end_position","+","1","]","-","1","else",":","tok_end_position","=","len","(","all_doc_tokens",")","-","1","(","tok_start_position",",","tok_end_position",")","=","_improve_answer_span","(","all_doc_tokens",",","tok_start_position",",","tok_end_position",",","tokenizer",",","example",".","orig_answer_text",")","# The -3 accounts for [CLS], [SEP] and [SEP]","max_tokens_for_doc","=","max_seq_length","-","len","(","query_tokens",")","-","3","# We can have documents that are longer than the maximum sequence length.","# To deal with this we do a sliding window approach, where we take chunks","# of the up to our max length with a stride of `doc_stride`.","_DocSpan","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"DocSpan\"",",","[","\"start\"",",","\"length\"","]",")","doc_spans","=","[","]","start_offset","=","0","while","start_offset","<","len","(","all_doc_tokens",")",":","length","=","len","(","all_doc_tokens",")","-","start_offset","if","length",">","max_tokens_for_doc",":","length","=","max_tokens_for_doc","doc_spans",".","append","(","_DocSpan","(","start","=","start_offset",",","length","=","length",")",")","if","start_offset","+","length","==","len","(","all_doc_tokens",")",":","break","start_offset","+=","min","(","length",",","doc_stride",")","for","(","doc_span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","tokens","=","[","]","token_to_orig_map","=","{","}","token_is_max_context","=","{","}","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","query_tokens",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","i","in","range","(","doc_span",".","length",")",":","split_token_index","=","doc_span",".","start","+","i","token_to_orig_map","[","len","(","tokens",")","]","=","tok_to_orig_index","[","split_token_index","]","is_max_context","=","_check_is_max_context","(","doc_spans",",","doc_span_index",",","split_token_index",")","token_is_max_context","[","len","(","tokens",")","]","=","is_max_context","tokens",".","append","(","all_doc_tokens","[","split_token_index","]",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","start_position","=","None","end_position","=","None","if","is_training","and","not","example",".","is_impossible",":","# For training, if our document chunk does not contain an annotation","# we throw it out, since there is nothing to predict.","doc_start","=","doc_span",".","start","doc_end","=","doc_span",".","start","+","doc_span",".","length","-","1","out_of_span","=","False","if","not","(","tok_start_position",">=","doc_start","and","tok_end_position","<=","doc_end",")",":","out_of_span","=","True","if","out_of_span",":","start_position","=","0","end_position","=","0","else",":","doc_offset","=","len","(","query_tokens",")","+","2","start_position","=","tok_start_position","-","doc_start","+","doc_offset","end_position","=","tok_end_position","-","doc_start","+","doc_offset","if","is_training","and","example",".","is_impossible",":","start_position","=","0","end_position","=","0","if","example_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"unique_id: %s\"","%","(","unique_id",")",")","tf",".","logging",".","info","(","\"example_index: %s\"","%","(","example_index",")",")","tf",".","logging",".","info","(","\"doc_span_index: %s\"","%","(","doc_span_index",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"token_to_orig_map: %s\"","%","\" \"",".","join","(","[","\"%d:%d\"","%","(","x",",","y",")","for","(","x",",","y",")","in","six",".","iteritems","(","token_to_orig_map",")","]",")",")","tf",".","logging",".","info","(","\"token_is_max_context: %s\"","%","\" \"",".","join","(","[","\"%d:%s\"","%","(","x",",","y",")","for","(","x",",","y",")","in","six",".","iteritems","(","token_is_max_context",")","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","if","is_training","and","example",".","is_impossible",":","tf",".","logging",".","info","(","\"impossible example\"",")","if","is_training","and","not","example",".","is_impossible",":","answer_text","=","\" \"",".","join","(","tokens","[","start_position",":","(","end_position","+","1",")","]",")","tf",".","logging",".","info","(","\"start_position: %d\"","%","(","start_position",")",")","tf",".","logging",".","info","(","\"end_position: %d\"","%","(","end_position",")",")","tf",".","logging",".","info","(","\"answer: %s\"","%","(","tokenization",".","printable_text","(","answer_text",")",")",")","feature","=","InputFeatures","(","unique_id","=","unique_id",",","example_index","=","example_index",",","doc_span_index","=","doc_span_index",",","tokens","=","tokens",",","token_to_orig_map","=","token_to_orig_map",",","token_is_max_context","=","token_is_max_context",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","start_position","=","start_position",",","end_position","=","end_position",",","is_impossible","=","example",".","is_impossible",")","# Run callback","output_fn","(","feature",")","unique_id","+=","1"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_squad.py#L309-L473"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_squad.py","language":"python","identifier":"_improve_answer_span","parameters":"(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text)","argument_list":"","return_statement":"return (input_start, input_end)","docstring":"Returns tokenized answer spans that better match the annotated answer.","docstring_summary":"Returns tokenized answer spans that better match the annotated answer.","docstring_tokens":["Returns","tokenized","answer","spans","that","better","match","the","annotated","answer","."],"function":"def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)","function_tokens":["def","_improve_answer_span","(","doc_tokens",",","input_start",",","input_end",",","tokenizer",",","orig_answer_text",")",":","# The SQuAD annotations are character based. We first project them to","# whitespace-tokenized words. But then after WordPiece tokenization, we can","# often find a \"better match\". For example:","#","# Question: What year was John Smith born?","# Context: The leader was John Smith (1895-1943).","# Answer: 1895","#","# The original whitespace-tokenized answer will be \"(1895-1943).\". However","# after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match","# the exact answer, 1895.","#","# However, this is not always possible. Consider the following:","#","# Question: What country is the top exporter of electornics?","# Context: The Japanese electronics industry is the lagest in the world.","# Answer: Japan","#","# In this case, the annotator chose \"Japan\" as a character sub-span of","# the word \"Japanese\". Since our WordPiece tokenizer does not split","# \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare","# in SQuAD, but does happen.","tok_answer_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_answer_text",")",")","for","new_start","in","range","(","input_start",",","input_end","+","1",")",":","for","new_end","in","range","(","input_end",",","new_start","-","1",",","-","1",")",":","text_span","=","\" \"",".","join","(","doc_tokens","[","new_start",":","(","new_end","+","1",")","]",")","if","text_span","==","tok_answer_text",":","return","(","new_start",",","new_end",")","return","(","input_start",",","input_end",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_squad.py#L476-L510"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_squad.py","language":"python","identifier":"_check_is_max_context","parameters":"(doc_spans, cur_span_index, position)","argument_list":"","return_statement":"return cur_span_index == best_span_index","docstring":"Check if this is the 'max context' doc span for the token.","docstring_summary":"Check if this is the 'max context' doc span for the token.","docstring_tokens":["Check","if","this","is","the","max","context","doc","span","for","the","token","."],"function":"def _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index","function_tokens":["def","_check_is_max_context","(","doc_spans",",","cur_span_index",",","position",")",":","# Because of the sliding window approach taken to scoring documents, a single","# token can appear in multiple documents. E.g.","# Doc: the man went to the store and bought a gallon of milk","# Span A: the man went to the","# Span B: to the store and bought","# Span C: and bought a gallon of","# ...","#","# Now the word 'bought' will have two scores from spans B and C. We only","# want to consider the score with \"maximum context\", which we define as","# the *minimum* of its left and right context (the *sum* of left and","# right context will always be the same, of course).","#","# In the example the maximum context for 'bought' would be span C since","# it has 1 left context and 3 right context, while span B has 4 left context","# and 0 right context.","best_score","=","None","best_span_index","=","None","for","(","span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","end","=","doc_span",".","start","+","doc_span",".","length","-","1","if","position","<","doc_span",".","start",":","continue","if","position",">","end",":","continue","num_left_context","=","position","-","doc_span",".","start","num_right_context","=","end","-","position","score","=","min","(","num_left_context",",","num_right_context",")","+","0.01","*","doc_span",".","length","if","best_score","is","None","or","score",">","best_score",":","best_score","=","score","best_span_index","=","span_index","return","cur_span_index","==","best_span_index"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_squad.py#L513-L547"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_squad.py","language":"python","identifier":"create_model","parameters":"(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return (start_logits, end_logits)","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n final_hidden = model.get_sequence_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n hidden_size = final_hidden_shape[2]\n\n output_weights = tf.get_variable(\n \"cls\/squad\/output_weights\", [2, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"cls\/squad\/output_bias\", [2], initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden,\n [batch_size * seq_length, hidden_size])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(logits, [2, 0, 1])\n\n unstacked_logits = tf.unstack(logits, axis=0)\n\n (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n return (start_logits, end_logits)","function_tokens":["def","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","use_one_hot_embeddings",")",":","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","final_hidden","=","model",".","get_sequence_output","(",")","final_hidden_shape","=","modeling",".","get_shape_list","(","final_hidden",",","expected_rank","=","3",")","batch_size","=","final_hidden_shape","[","0","]","seq_length","=","final_hidden_shape","[","1","]","hidden_size","=","final_hidden_shape","[","2","]","output_weights","=","tf",".","get_variable","(","\"cls\/squad\/output_weights\"",",","[","2",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"cls\/squad\/output_bias\"",",","[","2","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","final_hidden_matrix","=","tf",".","reshape","(","final_hidden",",","[","batch_size","*","seq_length",",","hidden_size","]",")","logits","=","tf",".","matmul","(","final_hidden_matrix",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","logits","=","tf",".","reshape","(","logits",",","[","batch_size",",","seq_length",",","2","]",")","logits","=","tf",".","transpose","(","logits",",","[","2",",","0",",","1","]",")","unstacked_logits","=","tf",".","unstack","(","logits",",","axis","=","0",")","(","start_logits",",","end_logits",")","=","(","unstacked_logits","[","0","]",",","unstacked_logits","[","1","]",")","return","(","start_logits",",","end_logits",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_squad.py#L550-L587"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_squad.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (start_logits, end_logits) = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n start_positions = features[\"start_positions\"]\n end_positions = features[\"end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) \/ 2.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"start_logits\": start_logits,\n \"end_logits\": end_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","unique_ids","=","features","[","\"unique_ids\"","]","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","start_logits",",","end_logits",")","=","create_model","(","bert_config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","seq_length","=","modeling",".","get_shape_list","(","input_ids",")","[","1","]","def","compute_loss","(","logits",",","positions",")",":","one_hot_positions","=","tf",".","one_hot","(","positions",",","depth","=","seq_length",",","dtype","=","tf",".","float32",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","loss","=","-","tf",".","reduce_mean","(","tf",".","reduce_sum","(","one_hot_positions","*","log_probs",",","axis","=","-","1",")",")","return","loss","start_positions","=","features","[","\"start_positions\"","]","end_positions","=","features","[","\"end_positions\"","]","start_loss","=","compute_loss","(","start_logits",",","start_positions",")","end_loss","=","compute_loss","(","end_logits",",","end_positions",")","total_loss","=","(","start_loss","+","end_loss",")","\/","2.0","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","PREDICT",":","predictions","=","{","\"unique_ids\"",":","unique_ids",",","\"start_logits\"",":","start_logits",",","\"end_logits\"",":","end_logits",",","}","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","predictions",",","scaffold_fn","=","scaffold_fn",")","else",":","raise","ValueError","(","\"Only TRAIN and PREDICT modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_squad.py#L590-L684"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_squad.py","language":"python","identifier":"input_fn_builder","parameters":"(input_file, seq_length, is_training, drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(input_file, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","input_file",",","seq_length",",","is_training",",","drop_remainder",")",":","name_to_features","=","{","\"unique_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","}","if","is_training",":","name_to_features","[","\"start_positions\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")","name_to_features","[","\"end_positions\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","d","=","tf",".","data",".","TFRecordDataset","(","input_file",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_squad.py#L687-L734"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_squad.py","language":"python","identifier":"write_predictions","parameters":"(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file)","argument_list":"","return_statement":"","docstring":"Write final predictions to the json file and log-odds of null if needed.","docstring_summary":"Write final predictions to the json file and log-odds of null if needed.","docstring_tokens":["Write","final","predictions","to","the","json","file","and","log","-","odds","of","null","if","needed","."],"function":"def write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if FLAGS.version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n\n if FLAGS.version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # if we didn't inlude the empty option in the n-best, inlcude it\n if FLAGS.version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\", start_logit=null_start_logit,\n end_logit=null_end_logit))\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not FLAGS.version_2_with_negative:\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (\n best_non_null_entry.end_logit)\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > FLAGS.null_score_diff_threshold:\n all_predictions[example.qas_id] = \"\"\n else:\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n if FLAGS.version_2_with_negative:\n with tf.gfile.GFile(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")","function_tokens":["def","write_predictions","(","all_examples",",","all_features",",","all_results",",","n_best_size",",","max_answer_length",",","do_lower_case",",","output_prediction_file",",","output_nbest_file",",","output_null_log_odds_file",")",":","tf",".","logging",".","info","(","\"Writing predictions to: %s\"","%","(","output_prediction_file",")",")","tf",".","logging",".","info","(","\"Writing nbest to: %s\"","%","(","output_nbest_file",")",")","example_index_to_features","=","collections",".","defaultdict","(","list",")","for","feature","in","all_features",":","example_index_to_features","[","feature",".","example_index","]",".","append","(","feature",")","unique_id_to_result","=","{","}","for","result","in","all_results",":","unique_id_to_result","[","result",".","unique_id","]","=","result","_PrelimPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"PrelimPrediction\"",",","[","\"feature_index\"",",","\"start_index\"",",","\"end_index\"",",","\"start_logit\"",",","\"end_logit\"","]",")","all_predictions","=","collections",".","OrderedDict","(",")","all_nbest_json","=","collections",".","OrderedDict","(",")","scores_diff_json","=","collections",".","OrderedDict","(",")","for","(","example_index",",","example",")","in","enumerate","(","all_examples",")",":","features","=","example_index_to_features","[","example_index","]","prelim_predictions","=","[","]","# keep track of the minimum score of null start+end of position 0","score_null","=","1000000","# large and positive","min_null_feature_index","=","0","# the paragraph slice with min mull score","null_start_logit","=","0","# the start logit at the slice with min null score","null_end_logit","=","0","# the end logit at the slice with min null score","for","(","feature_index",",","feature",")","in","enumerate","(","features",")",":","result","=","unique_id_to_result","[","feature",".","unique_id","]","start_indexes","=","_get_best_indexes","(","result",".","start_logits",",","n_best_size",")","end_indexes","=","_get_best_indexes","(","result",".","end_logits",",","n_best_size",")","# if we could have irrelevant answers, get the min score of irrelevant","if","FLAGS",".","version_2_with_negative",":","feature_null_score","=","result",".","start_logits","[","0","]","+","result",".","end_logits","[","0","]","if","feature_null_score","<","score_null",":","score_null","=","feature_null_score","min_null_feature_index","=","feature_index","null_start_logit","=","result",".","start_logits","[","0","]","null_end_logit","=","result",".","end_logits","[","0","]","for","start_index","in","start_indexes",":","for","end_index","in","end_indexes",":","# We could hypothetically create invalid predictions, e.g., predict","# that the start of the span is in the question. We throw out all","# invalid predictions.","if","start_index",">=","len","(","feature",".","tokens",")",":","continue","if","end_index",">=","len","(","feature",".","tokens",")",":","continue","if","start_index","not","in","feature",".","token_to_orig_map",":","continue","if","end_index","not","in","feature",".","token_to_orig_map",":","continue","if","not","feature",".","token_is_max_context",".","get","(","start_index",",","False",")",":","continue","if","end_index","<","start_index",":","continue","length","=","end_index","-","start_index","+","1","if","length",">","max_answer_length",":","continue","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","feature_index",",","start_index","=","start_index",",","end_index","=","end_index",",","start_logit","=","result",".","start_logits","[","start_index","]",",","end_logit","=","result",".","end_logits","[","end_index","]",")",")","if","FLAGS",".","version_2_with_negative",":","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","min_null_feature_index",",","start_index","=","0",",","end_index","=","0",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","prelim_predictions","=","sorted","(","prelim_predictions",",","key","=","lambda","x",":","(","x",".","start_logit","+","x",".","end_logit",")",",","reverse","=","True",")","_NbestPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"NbestPrediction\"",",","[","\"text\"",",","\"start_logit\"",",","\"end_logit\"","]",")","seen_predictions","=","{","}","nbest","=","[","]","for","pred","in","prelim_predictions",":","if","len","(","nbest",")",">=","n_best_size",":","break","feature","=","features","[","pred",".","feature_index","]","if","pred",".","start_index",">","0",":","# this is a non-null prediction","tok_tokens","=","feature",".","tokens","[","pred",".","start_index",":","(","pred",".","end_index","+","1",")","]","orig_doc_start","=","feature",".","token_to_orig_map","[","pred",".","start_index","]","orig_doc_end","=","feature",".","token_to_orig_map","[","pred",".","end_index","]","orig_tokens","=","example",".","doc_tokens","[","orig_doc_start",":","(","orig_doc_end","+","1",")","]","tok_text","=","\" \"",".","join","(","tok_tokens",")","# De-tokenize WordPieces that have been split off.","tok_text","=","tok_text",".","replace","(","\" ##\"",",","\"\"",")","tok_text","=","tok_text",".","replace","(","\"##\"",",","\"\"",")","# Clean whitespace","tok_text","=","tok_text",".","strip","(",")","tok_text","=","\" \"",".","join","(","tok_text",".","split","(",")",")","orig_text","=","\" \"",".","join","(","orig_tokens",")","final_text","=","get_final_text","(","tok_text",",","orig_text",",","do_lower_case",")","if","final_text","in","seen_predictions",":","continue","seen_predictions","[","final_text","]","=","True","else",":","final_text","=","\"\"","seen_predictions","[","final_text","]","=","True","nbest",".","append","(","_NbestPrediction","(","text","=","final_text",",","start_logit","=","pred",".","start_logit",",","end_logit","=","pred",".","end_logit",")",")","# if we didn't inlude the empty option in the n-best, inlcude it","if","FLAGS",".","version_2_with_negative",":","if","\"\"","not","in","seen_predictions",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"\"",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","# In very rare edge cases we could have no valid predictions. So we","# just create a nonce prediction in this case to avoid failure.","if","not","nbest",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"empty\"",",","start_logit","=","0.0",",","end_logit","=","0.0",")",")","assert","len","(","nbest",")",">=","1","total_scores","=","[","]","best_non_null_entry","=","None","for","entry","in","nbest",":","total_scores",".","append","(","entry",".","start_logit","+","entry",".","end_logit",")","if","not","best_non_null_entry",":","if","entry",".","text",":","best_non_null_entry","=","entry","probs","=","_compute_softmax","(","total_scores",")","nbest_json","=","[","]","for","(","i",",","entry",")","in","enumerate","(","nbest",")",":","output","=","collections",".","OrderedDict","(",")","output","[","\"text\"","]","=","entry",".","text","output","[","\"probability\"","]","=","probs","[","i","]","output","[","\"start_logit\"","]","=","entry",".","start_logit","output","[","\"end_logit\"","]","=","entry",".","end_logit","nbest_json",".","append","(","output",")","assert","len","(","nbest_json",")",">=","1","if","not","FLAGS",".","version_2_with_negative",":","all_predictions","[","example",".","qas_id","]","=","nbest_json","[","0","]","[","\"text\"","]","else",":","# predict \"\" iff the null score - the score of best non-null > threshold","score_diff","=","score_null","-","best_non_null_entry",".","start_logit","-","(","best_non_null_entry",".","end_logit",")","scores_diff_json","[","example",".","qas_id","]","=","score_diff","if","score_diff",">","FLAGS",".","null_score_diff_threshold",":","all_predictions","[","example",".","qas_id","]","=","\"\"","else",":","all_predictions","[","example",".","qas_id","]","=","best_non_null_entry",".","text","all_nbest_json","[","example",".","qas_id","]","=","nbest_json","with","tf",".","gfile",".","GFile","(","output_prediction_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_predictions",",","indent","=","4",")","+","\"\\n\"",")","with","tf",".","gfile",".","GFile","(","output_nbest_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_nbest_json",",","indent","=","4",")","+","\"\\n\"",")","if","FLAGS",".","version_2_with_negative",":","with","tf",".","gfile",".","GFile","(","output_null_log_odds_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","scores_diff_json",",","indent","=","4",")","+","\"\\n\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_squad.py#L741-L924"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_squad.py","language":"python","identifier":"get_final_text","parameters":"(pred_text, orig_text, do_lower_case)","argument_list":"","return_statement":"return output_text","docstring":"Project the tokenized prediction back to the original text.","docstring_summary":"Project the tokenized prediction back to the original text.","docstring_tokens":["Project","the","tokenized","prediction","back","to","the","original","text","."],"function":"def get_final_text(pred_text, orig_text, do_lower_case):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping\/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if FLAGS.verbose_logging:\n tf.logging.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if FLAGS.verbose_logging:\n tf.logging.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text","function_tokens":["def","get_final_text","(","pred_text",",","orig_text",",","do_lower_case",")",":","# When we created the data, we kept track of the alignment between original","# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So","# now `orig_text` contains the span of our original text corresponding to the","# span that we predicted.","#","# However, `orig_text` may contain extra characters that we don't want in","# our prediction.","#","# For example, let's say:","# pred_text = steve smith","# orig_text = Steve Smith's","#","# We don't want to return `orig_text` because it contains the extra \"'s\".","#","# We don't want to return `pred_text` because it's already been normalized","# (the SQuAD eval script also does punctuation stripping\/lower casing but","# our tokenizer does additional normalization like stripping accent","# characters).","#","# What we really want to return is \"Steve Smith\".","#","# Therefore, we have to apply a semi-complicated alignment heruistic between","# `pred_text` and `orig_text` to get a character-to-charcter alignment. This","# can fail in certain cases in which case we just return `orig_text`.","def","_strip_spaces","(","text",")",":","ns_chars","=","[","]","ns_to_s_map","=","collections",".","OrderedDict","(",")","for","(","i",",","c",")","in","enumerate","(","text",")",":","if","c","==","\" \"",":","continue","ns_to_s_map","[","len","(","ns_chars",")","]","=","i","ns_chars",".","append","(","c",")","ns_text","=","\"\"",".","join","(","ns_chars",")","return","(","ns_text",",","ns_to_s_map",")","# We first tokenize `orig_text`, strip whitespace from the result","# and `pred_text`, and check if they are the same length. If they are","# NOT the same length, the heuristic has failed. If they are the same","# length, we assume the characters are one-to-one aligned.","tokenizer","=","tokenization",".","BasicTokenizer","(","do_lower_case","=","do_lower_case",")","tok_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_text",")",")","start_position","=","tok_text",".","find","(","pred_text",")","if","start_position","==","-","1",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Unable to find text: '%s' in '%s'\"","%","(","pred_text",",","orig_text",")",")","return","orig_text","end_position","=","start_position","+","len","(","pred_text",")","-","1","(","orig_ns_text",",","orig_ns_to_s_map",")","=","_strip_spaces","(","orig_text",")","(","tok_ns_text",",","tok_ns_to_s_map",")","=","_strip_spaces","(","tok_text",")","if","len","(","orig_ns_text",")","!=","len","(","tok_ns_text",")",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Length not equal after stripping spaces: '%s' vs '%s'\"",",","orig_ns_text",",","tok_ns_text",")","return","orig_text","# We then project the characters in `pred_text` back to `orig_text` using","# the character-to-character alignment.","tok_s_to_ns_map","=","{","}","for","(","i",",","tok_index",")","in","six",".","iteritems","(","tok_ns_to_s_map",")",":","tok_s_to_ns_map","[","tok_index","]","=","i","orig_start_position","=","None","if","start_position","in","tok_s_to_ns_map",":","ns_start_position","=","tok_s_to_ns_map","[","start_position","]","if","ns_start_position","in","orig_ns_to_s_map",":","orig_start_position","=","orig_ns_to_s_map","[","ns_start_position","]","if","orig_start_position","is","None",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Couldn't map start position\"",")","return","orig_text","orig_end_position","=","None","if","end_position","in","tok_s_to_ns_map",":","ns_end_position","=","tok_s_to_ns_map","[","end_position","]","if","ns_end_position","in","orig_ns_to_s_map",":","orig_end_position","=","orig_ns_to_s_map","[","ns_end_position","]","if","orig_end_position","is","None",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Couldn't map end position\"",")","return","orig_text","output_text","=","orig_text","[","orig_start_position",":","(","orig_end_position","+","1",")","]","return","output_text"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_squad.py#L927-L1020"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_squad.py","language":"python","identifier":"_get_best_indexes","parameters":"(logits, n_best_size)","argument_list":"","return_statement":"return best_indexes","docstring":"Get the n-best logits from a list.","docstring_summary":"Get the n-best logits from a list.","docstring_tokens":["Get","the","n","-","best","logits","from","a","list","."],"function":"def _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes","function_tokens":["def","_get_best_indexes","(","logits",",","n_best_size",")",":","index_and_score","=","sorted","(","enumerate","(","logits",")",",","key","=","lambda","x",":","x","[","1","]",",","reverse","=","True",")","best_indexes","=","[","]","for","i","in","range","(","len","(","index_and_score",")",")",":","if","i",">=","n_best_size",":","break","best_indexes",".","append","(","index_and_score","[","i","]","[","0","]",")","return","best_indexes"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_squad.py#L1023-L1032"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_squad.py","language":"python","identifier":"_compute_softmax","parameters":"(scores)","argument_list":"","return_statement":"return probs","docstring":"Compute softmax probability over raw logits.","docstring_summary":"Compute softmax probability over raw logits.","docstring_tokens":["Compute","softmax","probability","over","raw","logits","."],"function":"def _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score \/ total_sum)\n return probs","function_tokens":["def","_compute_softmax","(","scores",")",":","if","not","scores",":","return","[","]","max_score","=","None","for","score","in","scores",":","if","max_score","is","None","or","score",">","max_score",":","max_score","=","score","exp_scores","=","[","]","total_sum","=","0.0","for","score","in","scores",":","x","=","math",".","exp","(","score","-","max_score",")","exp_scores",".","append","(","x",")","total_sum","+=","x","probs","=","[","]","for","score","in","exp_scores",":","probs",".","append","(","score","\/","total_sum",")","return","probs"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_squad.py#L1035-L1055"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_squad.py","language":"python","identifier":"validate_flags_or_throw","parameters":"(bert_config)","argument_list":"","return_statement":"","docstring":"Validate the input FLAGS or throw an exception.","docstring_summary":"Validate the input FLAGS or throw an exception.","docstring_tokens":["Validate","the","input","FLAGS","or","throw","an","exception","."],"function":"def validate_flags_or_throw(bert_config):\n \"\"\"Validate the input FLAGS or throw an exception.\"\"\"\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_predict` must be True.\")\n\n if FLAGS.do_train:\n if not FLAGS.train_file:\n raise ValueError(\n \"If `do_train` is True, then `train_file` must be specified.\")\n if FLAGS.do_predict:\n if not FLAGS.predict_file:\n raise ValueError(\n \"If `do_predict` is True, then `predict_file` must be specified.\")\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:\n raise ValueError(\n \"The max_seq_length (%d) must be greater than max_query_length \"\n \"(%d) + 3\" % (FLAGS.max_seq_length, FLAGS.max_query_length))","function_tokens":["def","validate_flags_or_throw","(","bert_config",")",":","tokenization",".","validate_case_matches_checkpoint","(","FLAGS",".","do_lower_case",",","FLAGS",".","init_checkpoint",")","if","not","FLAGS",".","do_train","and","not","FLAGS",".","do_predict",":","raise","ValueError","(","\"At least one of `do_train` or `do_predict` must be True.\"",")","if","FLAGS",".","do_train",":","if","not","FLAGS",".","train_file",":","raise","ValueError","(","\"If `do_train` is True, then `train_file` must be specified.\"",")","if","FLAGS",".","do_predict",":","if","not","FLAGS",".","predict_file",":","raise","ValueError","(","\"If `do_predict` is True, then `predict_file` must be specified.\"",")","if","FLAGS",".","max_seq_length",">","bert_config",".","max_position_embeddings",":","raise","ValueError","(","\"Cannot use sequence length %d because the BERT model \"","\"was only trained up to sequence length %d\"","%","(","FLAGS",".","max_seq_length",",","bert_config",".","max_position_embeddings",")",")","if","FLAGS",".","max_seq_length","<=","FLAGS",".","max_query_length","+","3",":","raise","ValueError","(","\"The max_seq_length (%d) must be greater than max_query_length \"","\"(%d) + 3\"","%","(","FLAGS",".","max_seq_length",",","FLAGS",".","max_query_length",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_squad.py#L1097-L1123"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_squad.py","language":"python","identifier":"FeatureWriter.process_feature","parameters":"(self, feature)","argument_list":"","return_statement":"","docstring":"Write a InputFeature to the TFRecordWriter as a tf.train.Example.","docstring_summary":"Write a InputFeature to the TFRecordWriter as a tf.train.Example.","docstring_tokens":["Write","a","InputFeature","to","the","TFRecordWriter","as","a","tf",".","train",".","Example","."],"function":"def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())","function_tokens":["def","process_feature","(","self",",","feature",")",":","self",".","num_features","+=","1","def","create_int_feature","(","values",")",":","feature","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","feature","features","=","collections",".","OrderedDict","(",")","features","[","\"unique_ids\"","]","=","create_int_feature","(","[","feature",".","unique_id","]",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","if","self",".","is_training",":","features","[","\"start_positions\"","]","=","create_int_feature","(","[","feature",".","start_position","]",")","features","[","\"end_positions\"","]","=","create_int_feature","(","[","feature",".","end_position","]",")","impossible","=","0","if","feature",".","is_impossible",":","impossible","=","1","features","[","\"is_impossible\"","]","=","create_int_feature","(","[","impossible","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","self",".","_writer",".","write","(","tf_example",".","SerializeToString","(",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_squad.py#L1067-L1091"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"validate_case_matches_checkpoint","parameters":"(do_lower_case, init_checkpoint)","argument_list":"","return_statement":"","docstring":"Checks whether the casing config is consistent with the checkpoint name.","docstring_summary":"Checks whether the casing config is consistent with the checkpoint name.","docstring_tokens":["Checks","whether","the","casing","config","is","consistent","with","the","checkpoint","name","."],"function":"def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):\n \"\"\"Checks whether the casing config is consistent with the checkpoint name.\"\"\"\n\n # The casing has to be passed in by the user and there is no explicit check\n # as to whether it matches the checkpoint. The casing information probably\n # should have been stored in the bert_config.json file, but it's not, so\n # we have to heuristically detect it to validate.\n\n if not init_checkpoint:\n return\n\n m = re.match(\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\", init_checkpoint)\n if m is None:\n return\n\n model_name = m.group(1)\n\n lower_models = [\n \"uncased_L-24_H-1024_A-16\", \"uncased_L-12_H-768_A-12\",\n \"multilingual_L-12_H-768_A-12\", \"chinese_L-12_H-768_A-12\"\n ]\n\n cased_models = [\n \"cased_L-12_H-768_A-12\", \"cased_L-24_H-1024_A-16\",\n \"multi_cased_L-12_H-768_A-12\"\n ]\n\n is_bad_config = False\n if model_name in lower_models and not do_lower_case:\n is_bad_config = True\n actual_flag = \"False\"\n case_name = \"lowercased\"\n opposite_flag = \"True\"\n\n if model_name in cased_models and do_lower_case:\n is_bad_config = True\n actual_flag = \"True\"\n case_name = \"cased\"\n opposite_flag = \"False\"\n\n if is_bad_config:\n raise ValueError(\n \"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"\n \"However, `%s` seems to be a %s model, so you \"\n \"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"\n \"how the model was pre-training. If this error is wrong, please \"\n \"just comment out this check.\" % (actual_flag, init_checkpoint,\n model_name, case_name, opposite_flag))","function_tokens":["def","validate_case_matches_checkpoint","(","do_lower_case",",","init_checkpoint",")",":","# The casing has to be passed in by the user and there is no explicit check","# as to whether it matches the checkpoint. The casing information probably","# should have been stored in the bert_config.json file, but it's not, so","# we have to heuristically detect it to validate.","if","not","init_checkpoint",":","return","m","=","re",".","match","(","\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\"",",","init_checkpoint",")","if","m","is","None",":","return","model_name","=","m",".","group","(","1",")","lower_models","=","[","\"uncased_L-24_H-1024_A-16\"",",","\"uncased_L-12_H-768_A-12\"",",","\"multilingual_L-12_H-768_A-12\"",",","\"chinese_L-12_H-768_A-12\"","]","cased_models","=","[","\"cased_L-12_H-768_A-12\"",",","\"cased_L-24_H-1024_A-16\"",",","\"multi_cased_L-12_H-768_A-12\"","]","is_bad_config","=","False","if","model_name","in","lower_models","and","not","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"False\"","case_name","=","\"lowercased\"","opposite_flag","=","\"True\"","if","model_name","in","cased_models","and","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"True\"","case_name","=","\"cased\"","opposite_flag","=","\"False\"","if","is_bad_config",":","raise","ValueError","(","\"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"","\"However, `%s` seems to be a %s model, so you \"","\"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"","\"how the model was pre-training. If this error is wrong, please \"","\"just comment out this check.\"","%","(","actual_flag",",","init_checkpoint",",","model_name",",","case_name",",","opposite_flag",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L28-L75"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"convert_to_unicode","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_summary":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_tokens":["Converts","text","to","Unicode","(","if","it","s","not","already",")","assuming","utf","-","8","input","."],"function":"def convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","convert_to_unicode","(","text",")",":","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","elif","isinstance","(","text",",","unicode",")",":","return","text","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L78-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"printable_text","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_summary":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_tokens":["Returns","text","encoded","in","a","way","suitable","for","print","or","tf",".","logging","."],"function":"def printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","printable_text","(","text",")",":","# These functions want `str` for both Python2 and Python3, but in one case","# it's a Unicode string and in the other it's a byte string.","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","unicode",")",":","return","text",".","encode","(","\"utf-8\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L98-L118"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"load_vocab","parameters":"(vocab_file)","argument_list":"","return_statement":"return vocab","docstring":"Loads a vocabulary file into a dictionary.","docstring_summary":"Loads a vocabulary file into a dictionary.","docstring_tokens":["Loads","a","vocabulary","file","into","a","dictionary","."],"function":"def load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab","function_tokens":["def","load_vocab","(","vocab_file",")",":","vocab","=","collections",".","OrderedDict","(",")","index","=","0","with","tf",".","gfile",".","GFile","(","vocab_file",",","\"r\"",")","as","reader",":","while","True",":","token","=","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","token",":","break","token","=","token",".","strip","(",")","vocab","[","token","]","=","index","index","+=","1","return","vocab"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L121-L133"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"convert_by_vocab","parameters":"(vocab, items)","argument_list":"","return_statement":"return output","docstring":"Converts a sequence of [tokens|ids] using the vocab.","docstring_summary":"Converts a sequence of [tokens|ids] using the vocab.","docstring_tokens":["Converts","a","sequence","of","[","tokens|ids","]","using","the","vocab","."],"function":"def convert_by_vocab(vocab, items):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n output = []\n for item in items:\n output.append(vocab[item])\n return output","function_tokens":["def","convert_by_vocab","(","vocab",",","items",")",":","output","=","[","]","for","item","in","items",":","output",".","append","(","vocab","[","item","]",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L136-L141"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"whitespace_tokenize","parameters":"(text)","argument_list":"","return_statement":"return tokens","docstring":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_summary":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_tokens":["Runs","basic","whitespace","cleaning","and","splitting","on","a","piece","of","text","."],"function":"def whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens","function_tokens":["def","whitespace_tokenize","(","text",")",":","text","=","text",".","strip","(",")","if","not","text",":","return","[","]","tokens","=","text",".","split","(",")","return","tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L152-L158"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"_is_whitespace","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a whitespace character.","docstring_summary":"Checks whether `chars` is a whitespace character.","docstring_tokens":["Checks","whether","chars","is","a","whitespace","character","."],"function":"def _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False","function_tokens":["def","_is_whitespace","(","char",")",":","# \\t, \\n, and \\r are technically contorl characters but we treat them","# as whitespace since they are generally considered as such.","if","char","==","\" \"","or","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Zs\"",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L362-L371"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"_is_control","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a control character.","docstring_summary":"Checks whether `chars` is a control character.","docstring_tokens":["Checks","whether","chars","is","a","control","character","."],"function":"def _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False","function_tokens":["def","_is_control","(","char",")",":","# These are technically control characters but we count them as whitespace","# characters.","if","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","False","cat","=","unicodedata",".","category","(","char",")","if","cat","in","(","\"Cc\"",",","\"Cf\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L374-L383"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"_is_punctuation","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a punctuation character.","docstring_summary":"Checks whether `chars` is a punctuation character.","docstring_tokens":["Checks","whether","chars","is","a","punctuation","character","."],"function":"def _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter\/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False","function_tokens":["def","_is_punctuation","(","char",")",":","cp","=","ord","(","char",")","# We treat all non-letter\/number ASCII as punctuation.","# Characters such as \"^\", \"$\", and \"`\" are not in the Unicode","# Punctuation class but we treat them as punctuation anyways, for","# consistency.","if","(","(","cp",">=","33","and","cp","<=","47",")","or","(","cp",">=","58","and","cp","<=","64",")","or","(","cp",">=","91","and","cp","<=","96",")","or","(","cp",">=","123","and","cp","<=","126",")",")",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat",".","startswith","(","\"P\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L386-L399"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer.__init__","parameters":"(self, do_lower_case=True)","argument_list":"","return_statement":"","docstring":"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.","docstring_summary":"Constructs a BasicTokenizer.","docstring_tokens":["Constructs","a","BasicTokenizer","."],"function":"def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case","function_tokens":["def","__init__","(","self",",","do_lower_case","=","True",")",":","self",".","do_lower_case","=","do_lower_case"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L188-L194"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text.","docstring_summary":"Tokenizes a piece of text.","docstring_tokens":["Tokenizes","a","piece","of","text","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","text","=","self",".","_clean_text","(","text",")","# This was added on November 1st, 2018 for the multilingual and Chinese","# models. This is also applied to the English models now, but it doesn't","# matter since the English models were not trained on any Chinese data","# and generally don't have any Chinese data in them (there are Chinese","# characters in the vocabulary because Wikipedia does have some Chinese","# words in the English Wikipedia.).","text","=","self",".","_tokenize_chinese_chars","(","text",")","orig_tokens","=","whitespace_tokenize","(","text",")","split_tokens","=","[","]","for","token","in","orig_tokens",":","if","self",".","do_lower_case",":","token","=","token",".","lower","(",")","token","=","self",".","_run_strip_accents","(","token",")","split_tokens",".","extend","(","self",".","_run_split_on_punc","(","token",")",")","output_tokens","=","whitespace_tokenize","(","\" \"",".","join","(","split_tokens",")",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L196-L218"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_strip_accents","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Strips accents from a piece of text.","docstring_summary":"Strips accents from a piece of text.","docstring_tokens":["Strips","accents","from","a","piece","of","text","."],"function":"def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_run_strip_accents","(","self",",","text",")",":","text","=","unicodedata",".","normalize","(","\"NFD\"",",","text",")","output","=","[","]","for","char","in","text",":","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Mn\"",":","continue","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L220-L229"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_split_on_punc","parameters":"(self, text)","argument_list":"","return_statement":"return [\"\".join(x) for x in output]","docstring":"Splits punctuation on a piece of text.","docstring_summary":"Splits punctuation on a piece of text.","docstring_tokens":["Splits","punctuation","on","a","piece","of","text","."],"function":"def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]","function_tokens":["def","_run_split_on_punc","(","self",",","text",")",":","chars","=","list","(","text",")","i","=","0","start_new_word","=","True","output","=","[","]","while","i","<","len","(","chars",")",":","char","=","chars","[","i","]","if","_is_punctuation","(","char",")",":","output",".","append","(","[","char","]",")","start_new_word","=","True","else",":","if","start_new_word",":","output",".","append","(","[","]",")","start_new_word","=","False","output","[","-","1","]",".","append","(","char",")","i","+=","1","return","[","\"\"",".","join","(","x",")","for","x","in","output","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L231-L249"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._tokenize_chinese_chars","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Adds whitespace around any CJK character.","docstring_summary":"Adds whitespace around any CJK character.","docstring_tokens":["Adds","whitespace","around","any","CJK","character","."],"function":"def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_tokenize_chinese_chars","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","self",".","_is_chinese_char","(","cp",")",":","output",".","append","(","\" \"",")","output",".","append","(","char",")","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L251-L262"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._is_chinese_char","parameters":"(self, cp)","argument_list":"","return_statement":"return False","docstring":"Checks whether CP is the codepoint of a CJK character.","docstring_summary":"Checks whether CP is the codepoint of a CJK character.","docstring_tokens":["Checks","whether","CP","is","the","codepoint","of","a","CJK","character","."],"function":"def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False","function_tokens":["def","_is_chinese_char","(","self",",","cp",")",":","# This defines a \"chinese character\" as anything in the CJK Unicode block:","# https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)","#","# Note that the CJK Unicode block is NOT all Japanese and Korean characters,","# despite its name. The modern Korean Hangul alphabet is a different block,","# as is Japanese Hiragana and Katakana. Those alphabets are used to write","# space-separated words, so they are not treated specially and handled","# like the all of the other languages.","if","(","(","cp",">=","0x4E00","and","cp","<=","0x9FFF",")","or","#","(","cp",">=","0x3400","and","cp","<=","0x4DBF",")","or","#","(","cp",">=","0x20000","and","cp","<=","0x2A6DF",")","or","#","(","cp",">=","0x2A700","and","cp","<=","0x2B73F",")","or","#","(","cp",">=","0x2B740","and","cp","<=","0x2B81F",")","or","#","(","cp",">=","0x2B820","and","cp","<=","0x2CEAF",")","or","(","cp",">=","0xF900","and","cp","<=","0xFAFF",")","or","#","(","cp",">=","0x2F800","and","cp","<=","0x2FA1F",")",")",":","#","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L264-L284"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"BasicTokenizer._clean_text","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Performs invalid character removal and whitespace cleanup on text.","docstring_summary":"Performs invalid character removal and whitespace cleanup on text.","docstring_tokens":["Performs","invalid","character","removal","and","whitespace","cleanup","on","text","."],"function":"def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_clean_text","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","cp","==","0","or","cp","==","0xfffd","or","_is_control","(","char",")",":","continue","if","_is_whitespace","(","char",")",":","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L286-L297"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tokenization.py","language":"python","identifier":"WordpieceTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.","docstring_summary":"Tokenizes a piece of text into its word pieces.","docstring_tokens":["Tokenizes","a","piece","of","text","into","its","word","pieces","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","output_tokens","=","[","]","for","token","in","whitespace_tokenize","(","text",")",":","chars","=","list","(","token",")","if","len","(","chars",")",">","self",".","max_input_chars_per_word",":","output_tokens",".","append","(","self",".","unk_token",")","continue","is_bad","=","False","start","=","0","sub_tokens","=","[","]","while","start","<","len","(","chars",")",":","end","=","len","(","chars",")","cur_substr","=","None","while","start","<","end",":","substr","=","\"\"",".","join","(","chars","[","start",":","end","]",")","if","start",">","0",":","substr","=","\"##\"","+","substr","if","substr","in","self",".","vocab",":","cur_substr","=","substr","break","end","-=","1","if","cur_substr","is","None",":","is_bad","=","True","break","sub_tokens",".","append","(","cur_substr",")","start","=","end","if","is_bad",":","output_tokens",".","append","(","self",".","unk_token",")","else",":","output_tokens",".","extend","(","sub_tokens",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tokenization.py#L308-L359"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tf_metrics.py","language":"python","identifier":"precision","parameters":"(labels, predictions, num_classes, pos_indices=None,\n weights=None, average='micro')","argument_list":"","return_statement":"return (pr, op)","docstring":"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","precision","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def precision(labels, predictions, num_classes, pos_indices=None,\n weights=None, average='micro'):\n \"\"\"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n pr, _, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n op, _, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (pr, op)","function_tokens":["def","precision","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","pr",",","_",",","_","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",")","op",",","_",",","_","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",")","return","(","pr",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tf_metrics.py#L15-L50"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tf_metrics.py","language":"python","identifier":"recall","parameters":"(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro')","argument_list":"","return_statement":"return (re, op)","docstring":"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","recall","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def recall(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro'):\n \"\"\"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, re, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n _, op, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (re, op)","function_tokens":["def","recall","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","_",",","re",",","_","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",")","_",",","op",",","_","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",")","return","(","re",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tf_metrics.py#L53-L88"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tf_metrics.py","language":"python","identifier":"fbeta","parameters":"(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro', beta=1)","argument_list":"","return_statement":"return (fbeta, op)","docstring":"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","fbeta","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","beta",":","int","optional","Weight","of","precision","in","harmonic","mean","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def fbeta(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro', beta=1):\n \"\"\"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, _, fbeta = metrics_from_confusion_matrix(\n cm, pos_indices, average=average, beta=beta)\n _, _, op = metrics_from_confusion_matrix(\n op, pos_indices, average=average, beta=beta)\n return (fbeta, op)","function_tokens":["def","fbeta","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",",","beta","=","1",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","_",",","_",",","fbeta","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",",","beta","=","beta",")","_",",","_",",","op","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",",","beta","=","beta",")","return","(","fbeta",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tf_metrics.py#L97-L134"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tf_metrics.py","language":"python","identifier":"safe_div","parameters":"(numerator, denominator)","argument_list":"","return_statement":"return tf.where(denominator_is_zero, zeros, numerator \/ denominator)","docstring":"Safe division, return 0 if denominator is 0","docstring_summary":"Safe division, return 0 if denominator is 0","docstring_tokens":["Safe","division","return","0","if","denominator","is","0"],"function":"def safe_div(numerator, denominator):\n \"\"\"Safe division, return 0 if denominator is 0\"\"\"\n numerator, denominator = tf.to_float(numerator), tf.to_float(denominator)\n zeros = tf.zeros_like(numerator, dtype=numerator.dtype)\n denominator_is_zero = tf.equal(denominator, zeros)\n return tf.where(denominator_is_zero, zeros, numerator \/ denominator)","function_tokens":["def","safe_div","(","numerator",",","denominator",")",":","numerator",",","denominator","=","tf",".","to_float","(","numerator",")",",","tf",".","to_float","(","denominator",")","zeros","=","tf",".","zeros_like","(","numerator",",","dtype","=","numerator",".","dtype",")","denominator_is_zero","=","tf",".","equal","(","denominator",",","zeros",")","return","tf",".","where","(","denominator_is_zero",",","zeros",",","numerator","\/","denominator",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tf_metrics.py#L137-L142"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tf_metrics.py","language":"python","identifier":"pr_re_fbeta","parameters":"(cm, pos_indices, beta=1)","argument_list":"","return_statement":"return pr, re, fbeta","docstring":"Uses a confusion matrix to compute precision, recall and fbeta","docstring_summary":"Uses a confusion matrix to compute precision, recall and fbeta","docstring_tokens":["Uses","a","confusion","matrix","to","compute","precision","recall","and","fbeta"],"function":"def pr_re_fbeta(cm, pos_indices, beta=1):\n \"\"\"Uses a confusion matrix to compute precision, recall and fbeta\"\"\"\n num_classes = cm.shape[0]\n neg_indices = [i for i in range(num_classes) if i not in pos_indices]\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, neg_indices] = 0\n diag_sum = tf.reduce_sum(tf.diag_part(cm * cm_mask))\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[:, neg_indices] = 0\n tot_pred = tf.reduce_sum(cm * cm_mask)\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, :] = 0\n tot_gold = tf.reduce_sum(cm * cm_mask)\n\n pr = safe_div(diag_sum, tot_pred)\n re = safe_div(diag_sum, tot_gold)\n fbeta = safe_div((1. + beta**2) * pr * re, beta**2 * pr + re)\n\n return pr, re, fbeta","function_tokens":["def","pr_re_fbeta","(","cm",",","pos_indices",",","beta","=","1",")",":","num_classes","=","cm",".","shape","[","0","]","neg_indices","=","[","i","for","i","in","range","(","num_classes",")","if","i","not","in","pos_indices","]","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[","neg_indices",",","neg_indices","]","=","0","diag_sum","=","tf",".","reduce_sum","(","tf",".","diag_part","(","cm","*","cm_mask",")",")","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[",":",",","neg_indices","]","=","0","tot_pred","=","tf",".","reduce_sum","(","cm","*","cm_mask",")","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[","neg_indices",",",":","]","=","0","tot_gold","=","tf",".","reduce_sum","(","cm","*","cm_mask",")","pr","=","safe_div","(","diag_sum",",","tot_pred",")","re","=","safe_div","(","diag_sum",",","tot_gold",")","fbeta","=","safe_div","(","(","1.","+","beta","**","2",")","*","pr","*","re",",","beta","**","2","*","pr","+","re",")","return","pr",",","re",",","fbeta"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tf_metrics.py#L145-L165"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/tf_metrics.py","language":"python","identifier":"metrics_from_confusion_matrix","parameters":"(cm, pos_indices=None, average='micro',\n beta=1)","argument_list":"","return_statement":"","docstring":"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'","docstring_summary":"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'","docstring_tokens":["Precision","Recall","and","F1","from","the","confusion","matrix","Parameters","----------","cm",":","tf",".","Tensor","of","type","tf",".","int32","of","shape","(","num_classes","num_classes",")","The","streaming","confusion","matrix",".","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","beta",":","int","optional","Weight","of","precision","in","harmonic","mean","average",":","str","optional","micro","macro","or","weighted"],"function":"def metrics_from_confusion_matrix(cm, pos_indices=None, average='micro',\n beta=1):\n \"\"\"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'\n \"\"\"\n num_classes = cm.shape[0]\n if pos_indices is None:\n pos_indices = [i for i in range(num_classes)]\n\n if average == 'micro':\n return pr_re_fbeta(cm, pos_indices, beta)\n elif average in {'macro', 'weighted'}:\n precisions, recalls, fbetas, n_golds = [], [], [], []\n for idx in pos_indices:\n pr, re, fbeta = pr_re_fbeta(cm, [idx], beta)\n precisions.append(pr)\n recalls.append(re)\n fbetas.append(fbeta)\n cm_mask = np.zeros([num_classes, num_classes])\n cm_mask[idx, :] = 1\n n_golds.append(tf.to_float(tf.reduce_sum(cm * cm_mask)))\n\n if average == 'macro':\n pr = tf.reduce_mean(precisions)\n re = tf.reduce_mean(recalls)\n fbeta = tf.reduce_mean(fbetas)\n return pr, re, fbeta\n if average == 'weighted':\n n_gold = tf.reduce_sum(n_golds)\n pr_sum = sum(p * n for p, n in zip(precisions, n_golds))\n pr = safe_div(pr_sum, n_gold)\n re_sum = sum(r * n for r, n in zip(recalls, n_golds))\n re = safe_div(re_sum, n_gold)\n fbeta_sum = sum(f * n for f, n in zip(fbetas, n_golds))\n fbeta = safe_div(fbeta_sum, n_gold)\n return pr, re, fbeta\n\n else:\n raise NotImplementedError()","function_tokens":["def","metrics_from_confusion_matrix","(","cm",",","pos_indices","=","None",",","average","=","'micro'",",","beta","=","1",")",":","num_classes","=","cm",".","shape","[","0","]","if","pos_indices","is","None",":","pos_indices","=","[","i","for","i","in","range","(","num_classes",")","]","if","average","==","'micro'",":","return","pr_re_fbeta","(","cm",",","pos_indices",",","beta",")","elif","average","in","{","'macro'",",","'weighted'","}",":","precisions",",","recalls",",","fbetas",",","n_golds","=","[","]",",","[","]",",","[","]",",","[","]","for","idx","in","pos_indices",":","pr",",","re",",","fbeta","=","pr_re_fbeta","(","cm",",","[","idx","]",",","beta",")","precisions",".","append","(","pr",")","recalls",".","append","(","re",")","fbetas",".","append","(","fbeta",")","cm_mask","=","np",".","zeros","(","[","num_classes",",","num_classes","]",")","cm_mask","[","idx",",",":","]","=","1","n_golds",".","append","(","tf",".","to_float","(","tf",".","reduce_sum","(","cm","*","cm_mask",")",")",")","if","average","==","'macro'",":","pr","=","tf",".","reduce_mean","(","precisions",")","re","=","tf",".","reduce_mean","(","recalls",")","fbeta","=","tf",".","reduce_mean","(","fbetas",")","return","pr",",","re",",","fbeta","if","average","==","'weighted'",":","n_gold","=","tf",".","reduce_sum","(","n_golds",")","pr_sum","=","sum","(","p","*","n","for","p",",","n","in","zip","(","precisions",",","n_golds",")",")","pr","=","safe_div","(","pr_sum",",","n_gold",")","re_sum","=","sum","(","r","*","n","for","r",",","n","in","zip","(","recalls",",","n_golds",")",")","re","=","safe_div","(","re_sum",",","n_gold",")","fbeta_sum","=","sum","(","f","*","n","for","f",",","n","in","zip","(","fbetas",",","n_golds",")",")","fbeta","=","safe_div","(","fbeta_sum",",","n_gold",")","return","pr",",","re",",","fbeta","else",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/tf_metrics.py#L168-L215"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"gelu","parameters":"(x)","argument_list":"","return_statement":"return x * cdf","docstring":"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.","docstring_summary":"Gaussian Error Linear Unit.","docstring_tokens":["Gaussian","Error","Linear","Unit","."],"function":"def gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.tanh(\n (np.sqrt(2 \/ np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf","function_tokens":["def","gelu","(","x",")",":","cdf","=","0.5","*","(","1.0","+","tf",".","tanh","(","(","np",".","sqrt","(","2","\/","np",".","pi",")","*","(","x","+","0.044715","*","tf",".","pow","(","x",",","3",")",")",")",")",")","return","x","*","cdf"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L264-L277"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"get_activation","parameters":"(activation_string)","argument_list":"","return_statement":"","docstring":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.","docstring_summary":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.","docstring_tokens":["Maps","a","string","to","a","Python","function","e",".","g",".","relu","=",">","tf",".","nn",".","relu","."],"function":"def get_activation(activation_string):\n \"\"\"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.\n \"\"\"\n\n # We assume that anything that\"s not a string is already an activation\n # function, so we just return it.\n if not isinstance(activation_string, six.string_types):\n return activation_string\n\n if not activation_string:\n return None\n\n act = activation_string.lower()\n if act == \"linear\":\n return None\n elif act == \"relu\":\n return tf.nn.relu\n elif act == \"gelu\":\n return gelu\n elif act == \"tanh\":\n return tf.tanh\n else:\n raise ValueError(\"Unsupported activation: %s\" % act)","function_tokens":["def","get_activation","(","activation_string",")",":","# We assume that anything that\"s not a string is already an activation","# function, so we just return it.","if","not","isinstance","(","activation_string",",","six",".","string_types",")",":","return","activation_string","if","not","activation_string",":","return","None","act","=","activation_string",".","lower","(",")","if","act","==","\"linear\"",":","return","None","elif","act","==","\"relu\"",":","return","tf",".","nn",".","relu","elif","act","==","\"gelu\"",":","return","gelu","elif","act","==","\"tanh\"",":","return","tf",".","tanh","else",":","raise","ValueError","(","\"Unsupported activation: %s\"","%","act",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L280-L314"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"get_assignment_map_from_checkpoint","parameters":"(tvars, init_checkpoint)","argument_list":"","return_statement":"return (assignment_map, initialized_variable_names)","docstring":"Compute the union of the current variables and checkpoint variables.","docstring_summary":"Compute the union of the current variables and checkpoint variables.","docstring_tokens":["Compute","the","union","of","the","current","variables","and","checkpoint","variables","."],"function":"def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)","function_tokens":["def","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")",":","assignment_map","=","{","}","initialized_variable_names","=","{","}","name_to_variable","=","collections",".","OrderedDict","(",")","for","var","in","tvars",":","name","=","var",".","name","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","name",")","if","m","is","not","None",":","name","=","m",".","group","(","1",")","name_to_variable","[","name","]","=","var","init_vars","=","tf",".","train",".","list_variables","(","init_checkpoint",")","assignment_map","=","collections",".","OrderedDict","(",")","for","x","in","init_vars",":","(","name",",","var",")","=","(","x","[","0","]",",","x","[","1","]",")","if","name","not","in","name_to_variable",":","continue","assignment_map","[","name","]","=","name","initialized_variable_names","[","name","]","=","1","initialized_variable_names","[","name","+","\":0\"","]","=","1","return","(","assignment_map",",","initialized_variable_names",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L317-L341"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"dropout","parameters":"(input_tensor, dropout_prob)","argument_list":"","return_statement":"return output","docstring":"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.","docstring_summary":"Perform dropout.","docstring_tokens":["Perform","dropout","."],"function":"def dropout(input_tensor, dropout_prob):\n \"\"\"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.\n \"\"\"\n if dropout_prob is None or dropout_prob == 0.0:\n return input_tensor\n\n output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)\n return output","function_tokens":["def","dropout","(","input_tensor",",","dropout_prob",")",":","if","dropout_prob","is","None","or","dropout_prob","==","0.0",":","return","input_tensor","output","=","tf",".","nn",".","dropout","(","input_tensor",",","1.0","-","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L344-L359"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"layer_norm","parameters":"(input_tensor, name=None)","argument_list":"","return_statement":"return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","docstring":"Run layer normalization on the last dimension of the tensor.","docstring_summary":"Run layer normalization on the last dimension of the tensor.","docstring_tokens":["Run","layer","normalization","on","the","last","dimension","of","the","tensor","."],"function":"def layer_norm(input_tensor, name=None):\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\n return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","function_tokens":["def","layer_norm","(","input_tensor",",","name","=","None",")",":","return","tf",".","contrib",".","layers",".","layer_norm","(","inputs","=","input_tensor",",","begin_norm_axis","=","-","1",",","begin_params_axis","=","-","1",",","scope","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L362-L365"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"layer_norm_and_dropout","parameters":"(input_tensor, dropout_prob, name=None)","argument_list":"","return_statement":"return output_tensor","docstring":"Runs layer normalization followed by dropout.","docstring_summary":"Runs layer normalization followed by dropout.","docstring_tokens":["Runs","layer","normalization","followed","by","dropout","."],"function":"def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):\n \"\"\"Runs layer normalization followed by dropout.\"\"\"\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob)\n return output_tensor","function_tokens":["def","layer_norm_and_dropout","(","input_tensor",",","dropout_prob",",","name","=","None",")",":","output_tensor","=","layer_norm","(","input_tensor",",","name",")","output_tensor","=","dropout","(","output_tensor",",","dropout_prob",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L368-L372"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"create_initializer","parameters":"(initializer_range=0.02)","argument_list":"","return_statement":"return tf.truncated_normal_initializer(stddev=initializer_range)","docstring":"Creates a `truncated_normal_initializer` with the given range.","docstring_summary":"Creates a `truncated_normal_initializer` with the given range.","docstring_tokens":["Creates","a","truncated_normal_initializer","with","the","given","range","."],"function":"def create_initializer(initializer_range=0.02):\n \"\"\"Creates a `truncated_normal_initializer` with the given range.\"\"\"\n return tf.truncated_normal_initializer(stddev=initializer_range)","function_tokens":["def","create_initializer","(","initializer_range","=","0.02",")",":","return","tf",".","truncated_normal_initializer","(","stddev","=","initializer_range",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L375-L377"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"embedding_lookup","parameters":"(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False)","argument_list":"","return_statement":"return (output, embedding_table)","docstring":"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].","docstring_summary":"Looks up words embeddings for id tensor.","docstring_tokens":["Looks","up","words","embeddings","for","id","tensor","."],"function":"def embedding_lookup(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False):\n \"\"\"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].\n \"\"\"\n # This function assumes that the input is of shape [batch_size, seq_length,\n # num_inputs].\n #\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\n # reshape to [batch_size, seq_length, 1].\n if input_ids.shape.ndims == 2:\n input_ids = tf.expand_dims(input_ids, axis=[-1])\n\n embedding_table = tf.get_variable(\n name=word_embedding_name,\n shape=[vocab_size, embedding_size],\n initializer=create_initializer(initializer_range))\n\n flat_input_ids = tf.reshape(input_ids, [-1])\n if use_one_hot_embeddings:\n one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)\n output = tf.matmul(one_hot_input_ids, embedding_table)\n else:\n output = tf.gather(embedding_table, flat_input_ids)\n\n input_shape = get_shape_list(input_ids)\n\n output = tf.reshape(output,\n input_shape[0:-1] + [input_shape[-1] * embedding_size])\n return (output, embedding_table)","function_tokens":["def","embedding_lookup","(","input_ids",",","vocab_size",",","embedding_size","=","128",",","initializer_range","=","0.02",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","False",")",":","# This function assumes that the input is of shape [batch_size, seq_length,","# num_inputs].","#","# If the input is a 2D tensor of shape [batch_size, seq_length], we","# reshape to [batch_size, seq_length, 1].","if","input_ids",".","shape",".","ndims","==","2",":","input_ids","=","tf",".","expand_dims","(","input_ids",",","axis","=","[","-","1","]",")","embedding_table","=","tf",".","get_variable","(","name","=","word_embedding_name",",","shape","=","[","vocab_size",",","embedding_size","]",",","initializer","=","create_initializer","(","initializer_range",")",")","flat_input_ids","=","tf",".","reshape","(","input_ids",",","[","-","1","]",")","if","use_one_hot_embeddings",":","one_hot_input_ids","=","tf",".","one_hot","(","flat_input_ids",",","depth","=","vocab_size",")","output","=","tf",".","matmul","(","one_hot_input_ids",",","embedding_table",")","else",":","output","=","tf",".","gather","(","embedding_table",",","flat_input_ids",")","input_shape","=","get_shape_list","(","input_ids",")","output","=","tf",".","reshape","(","output",",","input_shape","[","0",":","-","1","]","+","[","input_shape","[","-","1","]","*","embedding_size","]",")","return","(","output",",","embedding_table",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L380-L425"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"embedding_postprocessor","parameters":"(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1)","argument_list":"","return_statement":"return output","docstring":"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.","docstring_summary":"Performs various post-processing on a word embedding tensor.","docstring_tokens":["Performs","various","post","-","processing","on","a","word","embedding","tensor","."],"function":"def embedding_postprocessor(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1):\n \"\"\"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.\n \"\"\"\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = input_tensor\n\n if use_token_type:\n if token_type_ids is None:\n raise ValueError(\"`token_type_ids` must be specified if\"\n \"`use_token_type` is True.\")\n token_type_table = tf.get_variable(\n name=token_type_embedding_name,\n shape=[token_type_vocab_size, width],\n initializer=create_initializer(initializer_range))\n # This vocab will be small so we always do one-hot here, since it is always\n # faster for a small vocabulary.\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)\n token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if use_position_embeddings:\n assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)\n with tf.control_dependencies([assert_op]):\n full_position_embeddings = tf.get_variable(\n name=position_embedding_name,\n shape=[max_position_embeddings, width],\n initializer=create_initializer(initializer_range))\n # Since the position embedding table is a learned variable, we create it\n # using a (long) sequence length `max_position_embeddings`. The actual\n # sequence length might be shorter than this, for faster training of\n # tasks that do not have long sequences.\n #\n # So `full_position_embeddings` is effectively an embedding table\n # for position [0, 1, 2, ..., max_position_embeddings-1], and the current\n # sequence has positions [0, 1, 2, ... seq_length-1], so we can just\n # perform a slice.\n position_embeddings = tf.slice(full_position_embeddings, [0, 0],\n [seq_length, -1])\n num_dims = len(output.shape.as_list())\n\n # Only the last two dimensions are relevant (`seq_length` and `width`), so\n # we broadcast among the first dimensions, which is typically just\n # the batch size.\n position_broadcast_shape = []\n for _ in range(num_dims - 2):\n position_broadcast_shape.append(1)\n position_broadcast_shape.extend([seq_length, width])\n position_embeddings = tf.reshape(position_embeddings,\n position_broadcast_shape)\n output += position_embeddings\n\n output = layer_norm_and_dropout(output, dropout_prob)\n return output","function_tokens":["def","embedding_postprocessor","(","input_tensor",",","use_token_type","=","False",",","token_type_ids","=","None",",","token_type_vocab_size","=","16",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","0.02",",","max_position_embeddings","=","512",",","dropout_prob","=","0.1",")",":","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","width","=","input_shape","[","2","]","output","=","input_tensor","if","use_token_type",":","if","token_type_ids","is","None",":","raise","ValueError","(","\"`token_type_ids` must be specified if\"","\"`use_token_type` is True.\"",")","token_type_table","=","tf",".","get_variable","(","name","=","token_type_embedding_name",",","shape","=","[","token_type_vocab_size",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# This vocab will be small so we always do one-hot here, since it is always","# faster for a small vocabulary.","flat_token_type_ids","=","tf",".","reshape","(","token_type_ids",",","[","-","1","]",")","one_hot_ids","=","tf",".","one_hot","(","flat_token_type_ids",",","depth","=","token_type_vocab_size",")","token_type_embeddings","=","tf",".","matmul","(","one_hot_ids",",","token_type_table",")","token_type_embeddings","=","tf",".","reshape","(","token_type_embeddings",",","[","batch_size",",","seq_length",",","width","]",")","output","+=","token_type_embeddings","if","use_position_embeddings",":","assert_op","=","tf",".","assert_less_equal","(","seq_length",",","max_position_embeddings",")","with","tf",".","control_dependencies","(","[","assert_op","]",")",":","full_position_embeddings","=","tf",".","get_variable","(","name","=","position_embedding_name",",","shape","=","[","max_position_embeddings",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# Since the position embedding table is a learned variable, we create it","# using a (long) sequence length `max_position_embeddings`. The actual","# sequence length might be shorter than this, for faster training of","# tasks that do not have long sequences.","#","# So `full_position_embeddings` is effectively an embedding table","# for position [0, 1, 2, ..., max_position_embeddings-1], and the current","# sequence has positions [0, 1, 2, ... seq_length-1], so we can just","# perform a slice.","position_embeddings","=","tf",".","slice","(","full_position_embeddings",",","[","0",",","0","]",",","[","seq_length",",","-","1","]",")","num_dims","=","len","(","output",".","shape",".","as_list","(",")",")","# Only the last two dimensions are relevant (`seq_length` and `width`), so","# we broadcast among the first dimensions, which is typically just","# the batch size.","position_broadcast_shape","=","[","]","for","_","in","range","(","num_dims","-","2",")",":","position_broadcast_shape",".","append","(","1",")","position_broadcast_shape",".","extend","(","[","seq_length",",","width","]",")","position_embeddings","=","tf",".","reshape","(","position_embeddings",",","position_broadcast_shape",")","output","+=","position_embeddings","output","=","layer_norm_and_dropout","(","output",",","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L428-L521"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"create_attention_mask_from_input_mask","parameters":"(from_tensor, to_mask)","argument_list":"","return_statement":"return mask","docstring":"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].","docstring_summary":"Create 3D attention mask from a 2D tensor mask.","docstring_tokens":["Create","3D","attention","mask","from","a","2D","tensor","mask","."],"function":"def create_attention_mask_from_input_mask(from_tensor, to_mask):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n\n return mask","function_tokens":["def","create_attention_mask_from_input_mask","(","from_tensor",",","to_mask",")",":","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_shape","=","get_shape_list","(","to_mask",",","expected_rank","=","2",")","to_seq_length","=","to_shape","[","1","]","to_mask","=","tf",".","cast","(","tf",".","reshape","(","to_mask",",","[","batch_size",",","1",",","to_seq_length","]",")",",","tf",".","float32",")","# We don't assume that `from_tensor` is a mask (although it could be). We","# don't actually care if we attend *from* padding tokens (only *to* padding)","# tokens so we create a tensor of all ones.","#","# `broadcast_ones` = [batch_size, from_seq_length, 1]","broadcast_ones","=","tf",".","ones","(","shape","=","[","batch_size",",","from_seq_length",",","1","]",",","dtype","=","tf",".","float32",")","# Here we broadcast along two dimensions to create the mask.","mask","=","broadcast_ones","*","to_mask","return","mask"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L524-L555"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"attention_layer","parameters":"(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None)","argument_list":"","return_statement":"return context_layer","docstring":"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.","docstring_summary":"Performs multi-headed attention from `from_tensor` to `to_tensor`.","docstring_tokens":["Performs","multi","-","headed","attention","from","from_tensor","to","to_tensor","."],"function":"def attention_layer(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n \"\"\"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])\n return output_tensor\n\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n from_tensor_2d = reshape_to_matrix(from_tensor)\n to_tensor_2d = reshape_to_matrix(to_tensor)\n\n # `query_layer` = [B*F, N*H]\n query_layer = tf.layers.dense(\n from_tensor_2d,\n num_attention_heads * size_per_head,\n activation=query_act,\n name=\"query\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `key_layer` = [B*T, N*H]\n key_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=key_act,\n name=\"key\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `value_layer` = [B*T, N*H]\n value_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=value_act,\n name=\"value\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `query_layer` = [B, N, F, H]\n query_layer = transpose_for_scores(query_layer, batch_size,\n num_attention_heads, from_seq_length,\n size_per_head)\n\n # `key_layer` = [B, N, T, H]\n key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,\n to_seq_length, size_per_head)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n # `attention_scores` = [B, N, F, T]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(attention_scores,\n 1.0 \/ math.sqrt(float(size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\n\n # `value_layer` = [B, T, N, H]\n value_layer = tf.reshape(\n value_layer,\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\n\n # `value_layer` = [B, N, T, H]\n value_layer = tf.transpose(value_layer, [0, 2, 1, 3])\n\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\n\n if do_return_2d_tensor:\n # `context_layer` = [B*F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\n else:\n # `context_layer` = [B, F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\n\n return context_layer","function_tokens":["def","attention_layer","(","from_tensor",",","to_tensor",",","attention_mask","=","None",",","num_attention_heads","=","1",",","size_per_head","=","512",",","query_act","=","None",",","key_act","=","None",",","value_act","=","None",",","attention_probs_dropout_prob","=","0.0",",","initializer_range","=","0.02",",","do_return_2d_tensor","=","False",",","batch_size","=","None",",","from_seq_length","=","None",",","to_seq_length","=","None",")",":","def","transpose_for_scores","(","input_tensor",",","batch_size",",","num_attention_heads",",","seq_length",",","width",")",":","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","batch_size",",","seq_length",",","num_attention_heads",",","width","]",")","output_tensor","=","tf",".","transpose","(","output_tensor",",","[","0",",","2",",","1",",","3","]",")","return","output_tensor","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","to_shape","=","get_shape_list","(","to_tensor",",","expected_rank","=","[","2",",","3","]",")","if","len","(","from_shape",")","!=","len","(","to_shape",")",":","raise","ValueError","(","\"The rank of `from_tensor` must match the rank of `to_tensor`.\"",")","if","len","(","from_shape",")","==","3",":","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_seq_length","=","to_shape","[","1","]","elif","len","(","from_shape",")","==","2",":","if","(","batch_size","is","None","or","from_seq_length","is","None","or","to_seq_length","is","None",")",":","raise","ValueError","(","\"When passing in rank 2 tensors to attention_layer, the values \"","\"for `batch_size`, `from_seq_length`, and `to_seq_length` \"","\"must all be specified.\"",")","# Scalar dimensions referenced here:","# B = batch size (number of sequences)","# F = `from_tensor` sequence length","# T = `to_tensor` sequence length","# N = `num_attention_heads`","# H = `size_per_head`","from_tensor_2d","=","reshape_to_matrix","(","from_tensor",")","to_tensor_2d","=","reshape_to_matrix","(","to_tensor",")","# `query_layer` = [B*F, N*H]","query_layer","=","tf",".","layers",".","dense","(","from_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","query_act",",","name","=","\"query\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `key_layer` = [B*T, N*H]","key_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","key_act",",","name","=","\"key\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `value_layer` = [B*T, N*H]","value_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","value_act",",","name","=","\"value\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `query_layer` = [B, N, F, H]","query_layer","=","transpose_for_scores","(","query_layer",",","batch_size",",","num_attention_heads",",","from_seq_length",",","size_per_head",")","# `key_layer` = [B, N, T, H]","key_layer","=","transpose_for_scores","(","key_layer",",","batch_size",",","num_attention_heads",",","to_seq_length",",","size_per_head",")","# Take the dot product between \"query\" and \"key\" to get the raw","# attention scores.","# `attention_scores` = [B, N, F, T]","attention_scores","=","tf",".","matmul","(","query_layer",",","key_layer",",","transpose_b","=","True",")","attention_scores","=","tf",".","multiply","(","attention_scores",",","1.0","\/","math",".","sqrt","(","float","(","size_per_head",")",")",")","if","attention_mask","is","not","None",":","# `attention_mask` = [B, 1, F, T]","attention_mask","=","tf",".","expand_dims","(","attention_mask",",","axis","=","[","1","]",")","# Since attention_mask is 1.0 for positions we want to attend and 0.0 for","# masked positions, this operation will create a tensor which is 0.0 for","# positions we want to attend and -10000.0 for masked positions.","adder","=","(","1.0","-","tf",".","cast","(","attention_mask",",","tf",".","float32",")",")","*","-","10000.0","# Since we are adding it to the raw scores before the softmax, this is","# effectively the same as removing these entirely.","attention_scores","+=","adder","# Normalize the attention scores to probabilities.","# `attention_probs` = [B, N, F, T]","attention_probs","=","tf",".","nn",".","softmax","(","attention_scores",")","# This is actually dropping out entire tokens to attend to, which might","# seem a bit unusual, but is taken from the original Transformer paper.","attention_probs","=","dropout","(","attention_probs",",","attention_probs_dropout_prob",")","# `value_layer` = [B, T, N, H]","value_layer","=","tf",".","reshape","(","value_layer",",","[","batch_size",",","to_seq_length",",","num_attention_heads",",","size_per_head","]",")","# `value_layer` = [B, N, T, H]","value_layer","=","tf",".","transpose","(","value_layer",",","[","0",",","2",",","1",",","3","]",")","# `context_layer` = [B, N, F, H]","context_layer","=","tf",".","matmul","(","attention_probs",",","value_layer",")","# `context_layer` = [B, F, N, H]","context_layer","=","tf",".","transpose","(","context_layer",",","[","0",",","2",",","1",",","3","]",")","if","do_return_2d_tensor",":","# `context_layer` = [B*F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size","*","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","else",":","# `context_layer` = [B, F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size",",","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","return","context_layer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L558-L751"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"transformer_model","parameters":"(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False)","argument_list":"","return_statement":"","docstring":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.","docstring_summary":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".","docstring_tokens":["Multi","-","headed","multi","-","layer","Transformer","from","Attention","is","All","You","Need","."],"function":"def transformer_model(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False):\n \"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.\n \"\"\"\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n\n attention_head_size = int(hidden_size \/ num_attention_heads)\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n input_width = input_shape[2]\n\n # The Transformer performs sum residuals on all layers so the input needs\n # to be the same as the hidden size.\n if input_width != hidden_size:\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n (input_width, hidden_size))\n\n # We keep the representation as a 2D tensor to avoid re-shaping it back and\n # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\n # the GPU\/CPU but may not be free on the TPU, so we want to minimize them to\n # help the optimizer.\n prev_output = reshape_to_matrix(input_tensor)\n\n all_layer_outputs = []\n for layer_idx in range(num_hidden_layers):\n with tf.variable_scope(\"layer_%d\" % layer_idx):\n layer_input = prev_output\n\n with tf.variable_scope(\"attention\"):\n attention_heads = []\n with tf.variable_scope(\"self\"):\n attention_head = attention_layer(\n from_tensor=layer_input,\n to_tensor=layer_input,\n attention_mask=attention_mask,\n num_attention_heads=num_attention_heads,\n size_per_head=attention_head_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n initializer_range=initializer_range,\n do_return_2d_tensor=True,\n batch_size=batch_size,\n from_seq_length=seq_length,\n to_seq_length=seq_length)\n attention_heads.append(attention_head)\n\n attention_output = None\n if len(attention_heads) == 1:\n attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n attention_output = tf.concat(attention_heads, axis=-1)\n\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n with tf.variable_scope(\"output\"):\n attention_output = tf.layers.dense(\n attention_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n attention_output = dropout(attention_output, hidden_dropout_prob)\n attention_output = layer_norm(attention_output + layer_input)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = tf.layers.dense(\n attention_output,\n intermediate_size,\n activation=intermediate_act_fn,\n kernel_initializer=create_initializer(initializer_range))\n\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"):\n layer_output = tf.layers.dense(\n intermediate_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n layer_output = dropout(layer_output, hidden_dropout_prob)\n layer_output = layer_norm(layer_output + attention_output)\n prev_output = layer_output\n all_layer_outputs.append(layer_output)\n\n if do_return_all_layers:\n final_outputs = []\n for layer_output in all_layer_outputs:\n final_output = reshape_from_matrix(layer_output, input_shape)\n final_outputs.append(final_output)\n return final_outputs\n else:\n final_output = reshape_from_matrix(prev_output, input_shape)\n return final_output","function_tokens":["def","transformer_model","(","input_tensor",",","attention_mask","=","None",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","intermediate_act_fn","=","gelu",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","initializer_range","=","0.02",",","do_return_all_layers","=","False",")",":","if","hidden_size","%","num_attention_heads","!=","0",":","raise","ValueError","(","\"The hidden size (%d) is not a multiple of the number of attention \"","\"heads (%d)\"","%","(","hidden_size",",","num_attention_heads",")",")","attention_head_size","=","int","(","hidden_size","\/","num_attention_heads",")","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","input_width","=","input_shape","[","2","]","# The Transformer performs sum residuals on all layers so the input needs","# to be the same as the hidden size.","if","input_width","!=","hidden_size",":","raise","ValueError","(","\"The width of the input tensor (%d) != hidden size (%d)\"","%","(","input_width",",","hidden_size",")",")","# We keep the representation as a 2D tensor to avoid re-shaping it back and","# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on","# the GPU\/CPU but may not be free on the TPU, so we want to minimize them to","# help the optimizer.","prev_output","=","reshape_to_matrix","(","input_tensor",")","all_layer_outputs","=","[","]","for","layer_idx","in","range","(","num_hidden_layers",")",":","with","tf",".","variable_scope","(","\"layer_%d\"","%","layer_idx",")",":","layer_input","=","prev_output","with","tf",".","variable_scope","(","\"attention\"",")",":","attention_heads","=","[","]","with","tf",".","variable_scope","(","\"self\"",")",":","attention_head","=","attention_layer","(","from_tensor","=","layer_input",",","to_tensor","=","layer_input",",","attention_mask","=","attention_mask",",","num_attention_heads","=","num_attention_heads",",","size_per_head","=","attention_head_size",",","attention_probs_dropout_prob","=","attention_probs_dropout_prob",",","initializer_range","=","initializer_range",",","do_return_2d_tensor","=","True",",","batch_size","=","batch_size",",","from_seq_length","=","seq_length",",","to_seq_length","=","seq_length",")","attention_heads",".","append","(","attention_head",")","attention_output","=","None","if","len","(","attention_heads",")","==","1",":","attention_output","=","attention_heads","[","0","]","else",":","# In the case where we have other sequences, we just concatenate","# them to the self-attention head before the projection.","attention_output","=","tf",".","concat","(","attention_heads",",","axis","=","-","1",")","# Run a linear projection of `hidden_size` then add a residual","# with `layer_input`.","with","tf",".","variable_scope","(","\"output\"",")",":","attention_output","=","tf",".","layers",".","dense","(","attention_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","attention_output","=","dropout","(","attention_output",",","hidden_dropout_prob",")","attention_output","=","layer_norm","(","attention_output","+","layer_input",")","# The activation is only applied to the \"intermediate\" hidden layer.","with","tf",".","variable_scope","(","\"intermediate\"",")",":","intermediate_output","=","tf",".","layers",".","dense","(","attention_output",",","intermediate_size",",","activation","=","intermediate_act_fn",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# Down-project back to `hidden_size` then add the residual.","with","tf",".","variable_scope","(","\"output\"",")",":","layer_output","=","tf",".","layers",".","dense","(","intermediate_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","layer_output","=","dropout","(","layer_output",",","hidden_dropout_prob",")","layer_output","=","layer_norm","(","layer_output","+","attention_output",")","prev_output","=","layer_output","all_layer_outputs",".","append","(","layer_output",")","if","do_return_all_layers",":","final_outputs","=","[","]","for","layer_output","in","all_layer_outputs",":","final_output","=","reshape_from_matrix","(","layer_output",",","input_shape",")","final_outputs",".","append","(","final_output",")","return","final_outputs","else",":","final_output","=","reshape_from_matrix","(","prev_output",",","input_shape",")","return","final_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L754-L892"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"get_shape_list","parameters":"(tensor, expected_rank=None, name=None)","argument_list":"","return_statement":"return shape","docstring":"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.","docstring_summary":"Returns a list of the shape of tensor, preferring static dimensions.","docstring_tokens":["Returns","a","list","of","the","shape","of","tensor","preferring","static","dimensions","."],"function":"def get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if name is None:\n name = tensor.name\n\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape","function_tokens":["def","get_shape_list","(","tensor",",","expected_rank","=","None",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","if","expected_rank","is","not","None",":","assert_rank","(","tensor",",","expected_rank",",","name",")","shape","=","tensor",".","shape",".","as_list","(",")","non_static_indexes","=","[","]","for","(","index",",","dim",")","in","enumerate","(","shape",")",":","if","dim","is","None",":","non_static_indexes",".","append","(","index",")","if","not","non_static_indexes",":","return","shape","dyn_shape","=","tf",".","shape","(","tensor",")","for","index","in","non_static_indexes",":","shape","[","index","]","=","dyn_shape","[","index","]","return","shape"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L895-L929"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"reshape_to_matrix","parameters":"(input_tensor)","argument_list":"","return_statement":"return output_tensor","docstring":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_summary":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_tokens":["Reshapes","a",">","=","rank","2","tensor","to","a","rank","2","tensor","(","i",".","e",".","a","matrix",")","."],"function":"def reshape_to_matrix(input_tensor):\n \"\"\"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).\"\"\"\n ndims = input_tensor.shape.ndims\n if ndims < 2:\n raise ValueError(\"Input tensor must have at least rank 2. Shape = %s\" %\n (input_tensor.shape))\n if ndims == 2:\n return input_tensor\n\n width = input_tensor.shape[-1]\n output_tensor = tf.reshape(input_tensor, [-1, width])\n return output_tensor","function_tokens":["def","reshape_to_matrix","(","input_tensor",")",":","ndims","=","input_tensor",".","shape",".","ndims","if","ndims","<","2",":","raise","ValueError","(","\"Input tensor must have at least rank 2. Shape = %s\"","%","(","input_tensor",".","shape",")",")","if","ndims","==","2",":","return","input_tensor","width","=","input_tensor",".","shape","[","-","1","]","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","-","1",",","width","]",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L932-L943"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"reshape_from_matrix","parameters":"(output_tensor, orig_shape_list)","argument_list":"","return_statement":"return tf.reshape(output_tensor, orig_dims + [width])","docstring":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_summary":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_tokens":["Reshapes","a","rank","2","tensor","back","to","its","original","rank",">","=","2","tensor","."],"function":"def reshape_from_matrix(output_tensor, orig_shape_list):\n \"\"\"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.\"\"\"\n if len(orig_shape_list) == 2:\n return output_tensor\n\n output_shape = get_shape_list(output_tensor)\n\n orig_dims = orig_shape_list[0:-1]\n width = output_shape[-1]\n\n return tf.reshape(output_tensor, orig_dims + [width])","function_tokens":["def","reshape_from_matrix","(","output_tensor",",","orig_shape_list",")",":","if","len","(","orig_shape_list",")","==","2",":","return","output_tensor","output_shape","=","get_shape_list","(","output_tensor",")","orig_dims","=","orig_shape_list","[","0",":","-","1","]","width","=","output_shape","[","-","1","]","return","tf",".","reshape","(","output_tensor",",","orig_dims","+","[","width","]",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L946-L956"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"assert_rank","parameters":"(tensor, expected_rank, name=None)","argument_list":"","return_statement":"","docstring":"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.","docstring_summary":"Raises an exception if the tensor rank is not of the expected rank.","docstring_tokens":["Raises","an","exception","if","the","tensor","rank","is","not","of","the","expected","rank","."],"function":"def assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n if name is None:\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n scope_name = tf.get_variable_scope().name\n raise ValueError(\n \"For the tensor `%s` in scope `%s`, the actual rank \"\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))","function_tokens":["def","assert_rank","(","tensor",",","expected_rank",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","expected_rank_dict","=","{","}","if","isinstance","(","expected_rank",",","six",".","integer_types",")",":","expected_rank_dict","[","expected_rank","]","=","True","else",":","for","x","in","expected_rank",":","expected_rank_dict","[","x","]","=","True","actual_rank","=","tensor",".","shape",".","ndims","if","actual_rank","not","in","expected_rank_dict",":","scope_name","=","tf",".","get_variable_scope","(",")",".","name","raise","ValueError","(","\"For the tensor `%s` in scope `%s`, the actual rank \"","\"`%d` (shape = %s) is not equal to the expected rank `%s`\"","%","(","name",",","scope_name",",","actual_rank",",","str","(","tensor",".","shape",")",",","str","(","expected_rank",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L959-L986"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"BertConfig.__init__","parameters":"(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02)","argument_list":"","return_statement":"","docstring":"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.","docstring_summary":"Constructs BertConfig.","docstring_tokens":["Constructs","BertConfig","."],"function":"def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range","function_tokens":["def","__init__","(","self",",","vocab_size",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","hidden_act","=","\"gelu\"",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","max_position_embeddings","=","512",",","type_vocab_size","=","16",",","initializer_range","=","0.02",")",":","self",".","vocab_size","=","vocab_size","self",".","hidden_size","=","hidden_size","self",".","num_hidden_layers","=","num_hidden_layers","self",".","num_attention_heads","=","num_attention_heads","self",".","hidden_act","=","hidden_act","self",".","intermediate_size","=","intermediate_size","self",".","hidden_dropout_prob","=","hidden_dropout_prob","self",".","attention_probs_dropout_prob","=","attention_probs_dropout_prob","self",".","max_position_embeddings","=","max_position_embeddings","self",".","type_vocab_size","=","type_vocab_size","self",".","initializer_range","=","initializer_range"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L34-L80"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"BertConfig.from_dict","parameters":"(cls, json_object)","argument_list":"","return_statement":"return config","docstring":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_summary":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","Python","dictionary","of","parameters","."],"function":"def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config","function_tokens":["def","from_dict","(","cls",",","json_object",")",":","config","=","BertConfig","(","vocab_size","=","None",")","for","(","key",",","value",")","in","six",".","iteritems","(","json_object",")",":","config",".","__dict__","[","key","]","=","value","return","config"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L83-L88"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"BertConfig.from_json_file","parameters":"(cls, json_file)","argument_list":"","return_statement":"return cls.from_dict(json.loads(text))","docstring":"Constructs a `BertConfig` from a json file of parameters.","docstring_summary":"Constructs a `BertConfig` from a json file of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","json","file","of","parameters","."],"function":"def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with tf.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))","function_tokens":["def","from_json_file","(","cls",",","json_file",")",":","with","tf",".","gfile",".","GFile","(","json_file",",","\"r\"",")","as","reader",":","text","=","reader",".","read","(",")","return","cls",".","from_dict","(","json",".","loads","(","text",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L91-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"BertConfig.to_dict","parameters":"(self)","argument_list":"","return_statement":"return output","docstring":"Serializes this instance to a Python dictionary.","docstring_summary":"Serializes this instance to a Python dictionary.","docstring_tokens":["Serializes","this","instance","to","a","Python","dictionary","."],"function":"def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output","function_tokens":["def","to_dict","(","self",")",":","output","=","copy",".","deepcopy","(","self",".","__dict__",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L97-L100"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"BertConfig.to_json_string","parameters":"(self)","argument_list":"","return_statement":"return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","docstring":"Serializes this instance to a JSON string.","docstring_summary":"Serializes this instance to a JSON string.","docstring_tokens":["Serializes","this","instance","to","a","JSON","string","."],"function":"def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","function_tokens":["def","to_json_string","(","self",")",":","return","json",".","dumps","(","self",".","to_dict","(",")",",","indent","=","2",",","sort_keys","=","True",")","+","\"\\n\""],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L102-L104"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"BertModel.__init__","parameters":"(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None)","argument_list":"","return_statement":"","docstring":"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.","docstring_summary":"Constructor for BertModel.","docstring_tokens":["Constructor","for","BertModel","."],"function":"def __init__(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n\n input_shape = get_shape_list(input_ids, expected_rank=2)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n\n if input_mask is None:\n input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)\n\n if token_type_ids is None:\n token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)\n\n with tf.variable_scope(scope, default_name=\"bert\"):\n with tf.variable_scope(\"embeddings\"):\n # Perform embedding lookup on the word ids.\n (self.embedding_output, self.embedding_table) = embedding_lookup(\n input_ids=input_ids,\n vocab_size=config.vocab_size,\n embedding_size=config.hidden_size,\n initializer_range=config.initializer_range,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n self.embedding_output = embedding_postprocessor(\n input_tensor=self.embedding_output,\n use_token_type=True,\n token_type_ids=token_type_ids,\n token_type_vocab_size=config.type_vocab_size,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=config.initializer_range,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n\n with tf.variable_scope(\"encoder\"):\n # This converts a 2D mask of shape [batch_size, seq_length] to a 3D\n # mask of shape [batch_size, seq_length, seq_length] which is used\n # for the attention scores.\n attention_mask = create_attention_mask_from_input_mask(\n input_ids, input_mask)\n\n # Run the stacked transformer.\n # `sequence_output` shape = [batch_size, seq_length, hidden_size].\n self.all_encoder_layers = transformer_model(\n input_tensor=self.embedding_output,\n attention_mask=attention_mask,\n hidden_size=config.hidden_size,\n num_hidden_layers=config.num_hidden_layers,\n num_attention_heads=config.num_attention_heads,\n intermediate_size=config.intermediate_size,\n intermediate_act_fn=get_activation(config.hidden_act),\n hidden_dropout_prob=config.hidden_dropout_prob,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n initializer_range=config.initializer_range,\n do_return_all_layers=True)\n\n self.sequence_output = self.all_encoder_layers[-1]\n # The \"pooler\" converts the encoded sequence tensor of shape\n # [batch_size, seq_length, hidden_size] to a tensor of shape\n # [batch_size, hidden_size]. This is necessary for segment-level\n # (or segment-pair-level) classification tasks where we need a fixed\n # dimensional representation of the segment.\n with tf.variable_scope(\"pooler\"):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token. We assume that this has been pre-trained\n first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)\n self.pooled_output = tf.layers.dense(\n first_token_tensor,\n config.hidden_size,\n activation=tf.tanh,\n kernel_initializer=create_initializer(config.initializer_range))","function_tokens":["def","__init__","(","self",",","config",",","is_training",",","input_ids",",","input_mask","=","None",",","token_type_ids","=","None",",","use_one_hot_embeddings","=","False",",","scope","=","None",")",":","config","=","copy",".","deepcopy","(","config",")","if","not","is_training",":","config",".","hidden_dropout_prob","=","0.0","config",".","attention_probs_dropout_prob","=","0.0","input_shape","=","get_shape_list","(","input_ids",",","expected_rank","=","2",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","if","input_mask","is","None",":","input_mask","=","tf",".","ones","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","if","token_type_ids","is","None",":","token_type_ids","=","tf",".","zeros","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","with","tf",".","variable_scope","(","scope",",","default_name","=","\"bert\"",")",":","with","tf",".","variable_scope","(","\"embeddings\"",")",":","# Perform embedding lookup on the word ids.","(","self",".","embedding_output",",","self",".","embedding_table",")","=","embedding_lookup","(","input_ids","=","input_ids",",","vocab_size","=","config",".","vocab_size",",","embedding_size","=","config",".","hidden_size",",","initializer_range","=","config",".","initializer_range",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# Add positional embeddings and token type embeddings, then layer","# normalize and perform dropout.","self",".","embedding_output","=","embedding_postprocessor","(","input_tensor","=","self",".","embedding_output",",","use_token_type","=","True",",","token_type_ids","=","token_type_ids",",","token_type_vocab_size","=","config",".","type_vocab_size",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","config",".","initializer_range",",","max_position_embeddings","=","config",".","max_position_embeddings",",","dropout_prob","=","config",".","hidden_dropout_prob",")","with","tf",".","variable_scope","(","\"encoder\"",")",":","# This converts a 2D mask of shape [batch_size, seq_length] to a 3D","# mask of shape [batch_size, seq_length, seq_length] which is used","# for the attention scores.","attention_mask","=","create_attention_mask_from_input_mask","(","input_ids",",","input_mask",")","# Run the stacked transformer.","# `sequence_output` shape = [batch_size, seq_length, hidden_size].","self",".","all_encoder_layers","=","transformer_model","(","input_tensor","=","self",".","embedding_output",",","attention_mask","=","attention_mask",",","hidden_size","=","config",".","hidden_size",",","num_hidden_layers","=","config",".","num_hidden_layers",",","num_attention_heads","=","config",".","num_attention_heads",",","intermediate_size","=","config",".","intermediate_size",",","intermediate_act_fn","=","get_activation","(","config",".","hidden_act",")",",","hidden_dropout_prob","=","config",".","hidden_dropout_prob",",","attention_probs_dropout_prob","=","config",".","attention_probs_dropout_prob",",","initializer_range","=","config",".","initializer_range",",","do_return_all_layers","=","True",")","self",".","sequence_output","=","self",".","all_encoder_layers","[","-","1","]","# The \"pooler\" converts the encoded sequence tensor of shape","# [batch_size, seq_length, hidden_size] to a tensor of shape","# [batch_size, hidden_size]. This is necessary for segment-level","# (or segment-pair-level) classification tasks where we need a fixed","# dimensional representation of the segment.","with","tf",".","variable_scope","(","\"pooler\"",")",":","# We \"pool\" the model by simply taking the hidden state corresponding","# to the first token. We assume that this has been pre-trained","first_token_tensor","=","tf",".","squeeze","(","self",".","sequence_output","[",":",",","0",":","1",",",":","]",",","axis","=","1",")","self",".","pooled_output","=","tf",".","layers",".","dense","(","first_token_tensor",",","config",".","hidden_size",",","activation","=","tf",".","tanh",",","kernel_initializer","=","create_initializer","(","config",".","initializer_range",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L131-L232"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"BertModel.get_sequence_output","parameters":"(self)","argument_list":"","return_statement":"return self.sequence_output","docstring":"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.","docstring_summary":"Gets final hidden layer of encoder.","docstring_tokens":["Gets","final","hidden","layer","of","encoder","."],"function":"def get_sequence_output(self):\n \"\"\"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.\n \"\"\"\n return self.sequence_output","function_tokens":["def","get_sequence_output","(","self",")",":","return","self",".","sequence_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L237-L244"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/modeling.py","language":"python","identifier":"BertModel.get_embedding_output","parameters":"(self)","argument_list":"","return_statement":"return self.embedding_output","docstring":"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.","docstring_summary":"Gets output of the embedding lookup (i.e., input to the transformer).","docstring_tokens":["Gets","output","of","the","embedding","lookup","(","i",".","e",".","input","to","the","transformer",")","."],"function":"def get_embedding_output(self):\n \"\"\"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.\n \"\"\"\n return self.embedding_output","function_tokens":["def","get_embedding_output","(","self",")",":","return","self",".","embedding_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/modeling.py#L249-L258"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_classifier.py","language":"python","identifier":"convert_example_list_for_inews","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature_list","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_example_list_for_inews(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return [InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)]\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n must_len = len(tokens_a) + 3\n extra_len = max_seq_length - must_len\n feature_list = []\n if example.text_b and extra_len > 0:\n extra_num = int((len(tokens_b) - 1) \/ extra_len) + 1\n for num in range(extra_num):\n max_len = min((num + 1) * extra_len, len(tokens_b))\n tokens_b_sub = tokens_b[num * extra_len: max_len]\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b_sub, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n else:\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n return feature_list","function_tokens":["def","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","[","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","]","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","must_len","=","len","(","tokens_a",")","+","3","extra_len","=","max_seq_length","-","must_len","feature_list","=","[","]","if","example",".","text_b","and","extra_len",">","0",":","extra_num","=","int","(","(","len","(","tokens_b",")","-","1",")","\/","extra_len",")","+","1","for","num","in","range","(","extra_num",")",":","max_len","=","min","(","(","num","+","1",")","*","extra_len",",","len","(","tokens_b",")",")","tokens_b_sub","=","tokens_b","[","num","*","extra_len",":","max_len","]","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b_sub",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","else",":","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","return","feature_list"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_classifier.py#L236-L271"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features_for_inews","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features_for_inews(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n num_example = 0\n for (ex_index, example) in enumerate(examples):\n if ex_index % 1000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature_list = convert_example_list_for_inews(ex_index, example, label_list,\n max_seq_length, tokenizer)\n num_example += len(feature_list)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n for feature in feature_list:\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n tf.logging.info(\"feature num: %s\", num_example)\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features_for_inews","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","num_example","=","0","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","1000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature_list","=","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","num_example","+=","len","(","feature_list",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","for","feature","in","feature_list",":","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","tf",".","logging",".","info","(","\"feature num: %s\"",",","num_example",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_classifier.py#L274-L304"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_classifier.py","language":"python","identifier":"convert_single_example","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature","function_tokens":["def","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","if","tokens_b",":","# Modifies `tokens_a` and `tokens_b` in place so that the total","# length is less than the specified length.","# Account for [CLS], [SEP], [SEP] with \"- 3\"","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_seq_length","-","3",")","else",":","# Account for [CLS] and [SEP] with \"- 2\"","if","len","(","tokens_a",")",">","max_seq_length","-","2",":","tokens_a","=","tokens_a","[","0",":","(","max_seq_length","-","2",")","]","# The convention in BERT is:","# (a) For sequence pairs:","# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]","# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1","# (b) For single sequences:","# tokens: [CLS] the dog is hairy . [SEP]","# type_ids: 0 0 0 0 0 0 0","#","# Where \"type_ids\" are used to indicate whether this is the first","# sequence or the second sequence. The embedding vectors for `type=0` and","# `type=1` were learned during pre-training and are added to the wordpiece","# embedding vector (and position vector). This is not *strictly* necessary","# since the [SEP] token unambiguously separates the sequences, but it makes","# it easier for the model to learn the concept of sequences.","#","# For classification tasks, the first vector (corresponding to [CLS]) is","# used as the \"sentence vector\". Note that this only makes sense because","# the entire model is fine-tuned.","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","label_id","=","label_map","[","example",".","label","]","if","ex_index","<","5",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"guid: %s\"","%","(","example",".","guid",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","tf",".","logging",".","info","(","\"label: %s (id = %d)\"","%","(","example",".","label",",","label_id",")",")","feature","=","InputFeatures","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","label_id","=","label_id",",","is_real_example","=","True",")","return","feature"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_classifier.py#L307-L406"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_classifier.py#L409-L436"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_classifier.py","language":"python","identifier":"file_based_input_fn_builder","parameters":"(input_file, seq_length, is_training,\n drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn","function_tokens":["def","file_based_input_fn_builder","(","input_file",",","seq_length",",","is_training",",","drop_remainder",")",":","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"label_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"is_real_example\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","}","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","d","=","tf",".","data",".","TFRecordDataset","(","input_file",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_classifier.py#L439-L484"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_classifier.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_classifier.py#L487-L501"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_classifier.py","language":"python","identifier":"create_model","parameters":"(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings)","argument_list":"","return_statement":"","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)","function_tokens":["def","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","labels",",","num_labels",",","use_one_hot_embeddings",")",":","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# In the demo, we are doing a simple classification task on the entire","# segment.","#","# If you want to use the token-level output, use model.get_sequence_output()","# instead.","output_layer","=","model",".","get_pooled_output","(",")","hidden_size","=","output_layer",".","shape","[","-","1","]",".","value","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","[","num_labels",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","[","num_labels","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","with","tf",".","variable_scope","(","\"loss\"",")",":","if","is_training",":","# I.e., 0.1 dropout","output_layer","=","tf",".","nn",".","dropout","(","output_layer",",","keep_prob","=","0.9",")","logits","=","tf",".","matmul","(","output_layer",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","probabilities","=","tf",".","nn",".","softmax","(","logits",",","axis","=","-","1",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","num_labels",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","logits",",","probabilities",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_classifier.py#L504-L546"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_classifier.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions, weights=is_real_example)\n loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn,\n [per_example_loss, label_ids, logits, is_real_example])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","num_labels",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","label_ids","=","features","[","\"label_ids\"","]","is_real_example","=","None","if","\"is_real_example\"","in","features",":","is_real_example","=","tf",".","cast","(","features","[","\"is_real_example\"","]",",","dtype","=","tf",".","float32",")","else",":","is_real_example","=","tf",".","ones","(","tf",".","shape","(","label_ids",")",",","dtype","=","tf",".","float32",")","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","total_loss",",","per_example_loss",",","logits",",","probabilities",")","=","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","label_ids",",","num_labels",",","use_one_hot_embeddings",")","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","per_example_loss",",","label_ids",",","logits",",","is_real_example",")",":","predictions","=","tf",".","argmax","(","logits",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","label_ids",",","predictions","=","predictions",",","weights","=","is_real_example",")","loss","=","tf",".","metrics",".","mean","(","values","=","per_example_loss",",","weights","=","is_real_example",")","return","{","\"eval_accuracy\"",":","accuracy",",","\"eval_loss\"",":","loss",",","}","eval_metrics","=","(","metric_fn",",","[","per_example_loss",",","label_ids",",","logits",",","is_real_example","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","{","\"probabilities\"",":","probabilities","}",",","scaffold_fn","=","scaffold_fn",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_classifier.py#L549-L638"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_classifier.py","language":"python","identifier":"input_fn_builder","parameters":"(features, seq_length, is_training, drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","features",",","seq_length",",","is_training",",","drop_remainder",")",":","all_input_ids","=","[","]","all_input_mask","=","[","]","all_segment_ids","=","[","]","all_label_ids","=","[","]","for","feature","in","features",":","all_input_ids",".","append","(","feature",".","input_ids",")","all_input_mask",".","append","(","feature",".","input_mask",")","all_segment_ids",".","append","(","feature",".","segment_ids",")","all_label_ids",".","append","(","feature",".","label_id",")","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","num_examples","=","len","(","features",")","# This is for demo purposes and does NOT scale to large data sets. We do","# not use Dataset.from_generator() because that uses tf.py_func which is","# not TPU compatible. The right way to load data is with TFRecordReader.","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","{","\"input_ids\"",":","tf",".","constant","(","all_input_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_mask\"",":","tf",".","constant","(","all_input_mask",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"segment_ids\"",":","tf",".","constant","(","all_segment_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"label_ids\"",":","tf",".","constant","(","all_label_ids",",","shape","=","[","num_examples","]",",","dtype","=","tf",".","int32",")",",","}",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","batch","(","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_classifier.py#L643-L692"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_classifier.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return features","docstring":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_summary":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","list","of","InputFeatures","."],"function":"def convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n features.append(feature)\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",")",":","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","features",".","append","(","feature",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_classifier.py#L697-L710"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/optimization.py","language":"python","identifier":"create_optimizer","parameters":"(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu)","argument_list":"","return_statement":"return train_op","docstring":"Creates an optimizer training op.","docstring_summary":"Creates an optimizer training op.","docstring_tokens":["Creates","an","optimizer","training","op","."],"function":"def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n \"\"\"Creates an optimizer training op.\"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step\/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float \/ warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m\/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n if use_tpu:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op","function_tokens":["def","create_optimizer","(","loss",",","init_lr",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")",":","global_step","=","tf",".","train",".","get_or_create_global_step","(",")","learning_rate","=","tf",".","constant","(","value","=","init_lr",",","shape","=","[","]",",","dtype","=","tf",".","float32",")","# Implements linear decay of the learning rate.","learning_rate","=","tf",".","train",".","polynomial_decay","(","learning_rate",",","global_step",",","num_train_steps",",","end_learning_rate","=","0.0",",","power","=","1.0",",","cycle","=","False",")","# Implements linear warmup. I.e., if global_step < num_warmup_steps, the","# learning rate will be `global_step\/num_warmup_steps * init_lr`.","if","num_warmup_steps",":","global_steps_int","=","tf",".","cast","(","global_step",",","tf",".","int32",")","warmup_steps_int","=","tf",".","constant","(","num_warmup_steps",",","dtype","=","tf",".","int32",")","global_steps_float","=","tf",".","cast","(","global_steps_int",",","tf",".","float32",")","warmup_steps_float","=","tf",".","cast","(","warmup_steps_int",",","tf",".","float32",")","warmup_percent_done","=","global_steps_float","\/","warmup_steps_float","warmup_learning_rate","=","init_lr","*","warmup_percent_done","is_warmup","=","tf",".","cast","(","global_steps_int","<","warmup_steps_int",",","tf",".","float32",")","learning_rate","=","(","(","1.0","-","is_warmup",")","*","learning_rate","+","is_warmup","*","warmup_learning_rate",")","# It is recommended that you use this optimizer for fine tuning, since this","# is how the model was trained (note that the Adam m\/v variables are NOT","# loaded from init_checkpoint.)","optimizer","=","AdamWeightDecayOptimizer","(","learning_rate","=","learning_rate",",","weight_decay_rate","=","0.01",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","[","\"LayerNorm\"",",","\"layer_norm\"",",","\"bias\"","]",")","if","use_tpu",":","optimizer","=","tf",".","contrib",".","tpu",".","CrossShardOptimizer","(","optimizer",")","tvars","=","tf",".","trainable_variables","(",")","grads","=","tf",".","gradients","(","loss",",","tvars",")","# This is how the model was pre-trained.","(","grads",",","_",")","=","tf",".","clip_by_global_norm","(","grads",",","clip_norm","=","1.0",")","train_op","=","optimizer",".","apply_gradients","(","zip","(","grads",",","tvars",")",",","global_step","=","global_step",")","# Normally the global step update is done inside of `apply_gradients`.","# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use","# a different optimizer, you should probably take this line out.","new_global_step","=","global_step","+","1","train_op","=","tf",".","group","(","train_op",",","[","global_step",".","assign","(","new_global_step",")","]",")","return","train_op"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/optimization.py#L25-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.__init__","parameters":"(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\")","argument_list":"","return_statement":"","docstring":"Constructs a AdamWeightDecayOptimizer.","docstring_summary":"Constructs a AdamWeightDecayOptimizer.","docstring_tokens":["Constructs","a","AdamWeightDecayOptimizer","."],"function":"def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay","function_tokens":["def","__init__","(","self",",","learning_rate",",","weight_decay_rate","=","0.0",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","None",",","name","=","\"AdamWeightDecayOptimizer\"",")",":","super","(","AdamWeightDecayOptimizer",",","self",")",".","__init__","(","False",",","name",")","self",".","learning_rate","=","learning_rate","self",".","weight_decay_rate","=","weight_decay_rate","self",".","beta_1","=","beta_1","self",".","beta_2","=","beta_2","self",".","epsilon","=","epsilon","self",".","exclude_from_weight_decay","=","exclude_from_weight_decay"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/optimization.py#L90-L106"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.apply_gradients","parameters":"(self, grads_and_vars, global_step=None, name=None)","argument_list":"","return_statement":"return tf.group(*assignments, name=name)","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"\/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"\/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m \/ (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization\/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m\/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)","function_tokens":["def","apply_gradients","(","self",",","grads_and_vars",",","global_step","=","None",",","name","=","None",")",":","assignments","=","[","]","for","(","grad",",","param",")","in","grads_and_vars",":","if","grad","is","None","or","param","is","None",":","continue","param_name","=","self",".","_get_variable_name","(","param",".","name",")","m","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_m\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","v","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_v\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","# Standard Adam update.","next_m","=","(","tf",".","multiply","(","self",".","beta_1",",","m",")","+","tf",".","multiply","(","1.0","-","self",".","beta_1",",","grad",")",")","next_v","=","(","tf",".","multiply","(","self",".","beta_2",",","v",")","+","tf",".","multiply","(","1.0","-","self",".","beta_2",",","tf",".","square","(","grad",")",")",")","update","=","next_m","\/","(","tf",".","sqrt","(","next_v",")","+","self",".","epsilon",")","# Just adding the square of the weights to the loss function is *not*","# the correct way of using L2 regularization\/weight decay with Adam,","# since that will interact with the m and v parameters in strange ways.","#","# Instead we want ot decay the weights in a manner that doesn't interact","# with the m\/v parameters. This is equivalent to adding the square","# of the weights to the loss with plain (non-momentum) SGD.","if","self",".","_do_use_weight_decay","(","param_name",")",":","update","+=","self",".","weight_decay_rate","*","param","update_with_lr","=","self",".","learning_rate","*","update","next_param","=","param","-","update_with_lr","assignments",".","extend","(","[","param",".","assign","(","next_param",")",",","m",".","assign","(","next_m",")",",","v",".","assign","(","next_v",")","]",")","return","tf",".","group","(","*","assignments",",","name","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/optimization.py#L108-L157"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._do_use_weight_decay","parameters":"(self, param_name)","argument_list":"","return_statement":"return True","docstring":"Whether to use L2 weight decay for `param_name`.","docstring_summary":"Whether to use L2 weight decay for `param_name`.","docstring_tokens":["Whether","to","use","L2","weight","decay","for","param_name","."],"function":"def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True","function_tokens":["def","_do_use_weight_decay","(","self",",","param_name",")",":","if","not","self",".","weight_decay_rate",":","return","False","if","self",".","exclude_from_weight_decay",":","for","r","in","self",".","exclude_from_weight_decay",":","if","re",".","search","(","r",",","param_name",")","is","not","None",":","return","False","return","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/optimization.py#L159-L167"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._get_variable_name","parameters":"(self, param_name)","argument_list":"","return_statement":"return param_name","docstring":"Get the variable name from the tensor name.","docstring_summary":"Get the variable name from the tensor name.","docstring_tokens":["Get","the","variable","name","from","the","tensor","name","."],"function":"def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name","function_tokens":["def","_get_variable_name","(","self",",","param_name",")",":","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","param_name",")","if","m","is","not","None",":","param_name","=","m",".","group","(","1",")","return","param_name"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/optimization.py#L169-L174"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/create_pretraining_data.py","language":"python","identifier":"write_instance_to_example_files","parameters":"(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files)","argument_list":"","return_statement":"","docstring":"Create TF example files from `TrainingInstance`s.","docstring_summary":"Create TF example files from `TrainingInstance`s.","docstring_tokens":["Create","TF","example","files","from","TrainingInstance","s","."],"function":"def write_instance_to_example_files(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files):\n \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n writers = []\n for output_file in output_files:\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n writer_index = 0\n\n total_written = 0\n for (inst_index, instance) in enumerate(instances):\n input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)\n input_mask = [1] * len(input_ids)\n segment_ids = list(instance.segment_ids)\n assert len(input_ids) <= max_seq_length\n\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n while len(masked_lm_positions) < max_predictions_per_seq:\n masked_lm_positions.append(0)\n masked_lm_ids.append(0)\n masked_lm_weights.append(0.0)\n\n next_sentence_label = 1 if instance.is_random_next else 0\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n features[\"masked_lm_positions\"] = create_int_feature(masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n features[\"next_sentence_labels\"] = create_int_feature([next_sentence_label])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n\n writers[writer_index].write(tf_example.SerializeToString())\n writer_index = (writer_index + 1) % len(writers)\n\n total_written += 1\n\n if inst_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in instance.tokens]))\n\n for feature_name in features.keys():\n feature = features[feature_name]\n values = []\n if feature.int64_list.value:\n values = feature.int64_list.value\n elif feature.float_list.value:\n values = feature.float_list.value\n tf.logging.info(\n \"%s: %s\" % (feature_name, \" \".join([str(x) for x in values])))\n\n for writer in writers:\n writer.close()\n\n tf.logging.info(\"Wrote %d total instances\", total_written)","function_tokens":["def","write_instance_to_example_files","(","instances",",","tokenizer",",","max_seq_length",",","max_predictions_per_seq",",","output_files",")",":","writers","=","[","]","for","output_file","in","output_files",":","writers",".","append","(","tf",".","python_io",".","TFRecordWriter","(","output_file",")",")","writer_index","=","0","total_written","=","0","for","(","inst_index",",","instance",")","in","enumerate","(","instances",")",":","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","tokens",")","input_mask","=","[","1","]","*","len","(","input_ids",")","segment_ids","=","list","(","instance",".","segment_ids",")","assert","len","(","input_ids",")","<=","max_seq_length","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","masked_lm_positions","=","list","(","instance",".","masked_lm_positions",")","masked_lm_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","masked_lm_labels",")","masked_lm_weights","=","[","1.0","]","*","len","(","masked_lm_ids",")","while","len","(","masked_lm_positions",")","<","max_predictions_per_seq",":","masked_lm_positions",".","append","(","0",")","masked_lm_ids",".","append","(","0",")","masked_lm_weights",".","append","(","0.0",")","next_sentence_label","=","1","if","instance",".","is_random_next","else","0","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","segment_ids",")","features","[","\"masked_lm_positions\"","]","=","create_int_feature","(","masked_lm_positions",")","features","[","\"masked_lm_ids\"","]","=","create_int_feature","(","masked_lm_ids",")","features","[","\"masked_lm_weights\"","]","=","create_float_feature","(","masked_lm_weights",")","features","[","\"next_sentence_labels\"","]","=","create_int_feature","(","[","next_sentence_label","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writers","[","writer_index","]",".","write","(","tf_example",".","SerializeToString","(",")",")","writer_index","=","(","writer_index","+","1",")","%","len","(","writers",")","total_written","+=","1","if","inst_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","instance",".","tokens","]",")",")","for","feature_name","in","features",".","keys","(",")",":","feature","=","features","[","feature_name","]","values","=","[","]","if","feature",".","int64_list",".","value",":","values","=","feature",".","int64_list",".","value","elif","feature",".","float_list",".","value",":","values","=","feature",".","float_list",".","value","tf",".","logging",".","info","(","\"%s: %s\"","%","(","feature_name",",","\" \"",".","join","(","[","str","(","x",")","for","x","in","values","]",")",")",")","for","writer","in","writers",":","writer",".","close","(",")","tf",".","logging",".","info","(","\"Wrote %d total instances\"",",","total_written",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/create_pretraining_data.py#L96-L166"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/create_pretraining_data.py","language":"python","identifier":"create_training_instances","parameters":"(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng)","argument_list":"","return_statement":"return instances","docstring":"Create `TrainingInstance`s from raw text.","docstring_summary":"Create `TrainingInstance`s from raw text.","docstring_tokens":["Create","TrainingInstance","s","from","raw","text","."],"function":"def create_training_instances(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng):\n \"\"\"Create `TrainingInstance`s from raw text.\"\"\"\n all_documents = [[]]\n\n # Input file format:\n # (1) One sentence per line. These should ideally be actual sentences, not\n # entire paragraphs or arbitrary spans of text. (Because we use the\n # sentence boundaries for the \"next sentence prediction\" task).\n # (2) Blank lines between documents. Document boundaries are needed so\n # that the \"next sentence prediction\" task doesn't span between documents.\n for input_file in input_files:\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n\n # Empty lines are used as document delimiters\n if not line:\n all_documents.append([])\n tokens = tokenizer.tokenize(line)\n if tokens:\n all_documents[-1].append(tokens)\n\n # Remove empty documents\n all_documents = [x for x in all_documents if x]\n rng.shuffle(all_documents)\n\n vocab_words = list(tokenizer.vocab.keys())\n instances = []\n for _ in range(dupe_factor):\n for document_index in range(len(all_documents)):\n instances.extend(\n create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng))\n\n rng.shuffle(instances)\n return instances","function_tokens":["def","create_training_instances","(","input_files",",","tokenizer",",","max_seq_length",",","dupe_factor",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","rng",")",":","all_documents","=","[","[","]","]","# Input file format:","# (1) One sentence per line. These should ideally be actual sentences, not","# entire paragraphs or arbitrary spans of text. (Because we use the","# sentence boundaries for the \"next sentence prediction\" task).","# (2) Blank lines between documents. Document boundaries are needed so","# that the \"next sentence prediction\" task doesn't span between documents.","for","input_file","in","input_files",":","with","tf",".","gfile",".","GFile","(","input_file",",","\"r\"",")","as","reader",":","while","True",":","line","=","tokenization",".","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","line",":","break","line","=","line",".","strip","(",")","# Empty lines are used as document delimiters","if","not","line",":","all_documents",".","append","(","[","]",")","tokens","=","tokenizer",".","tokenize","(","line",")","if","tokens",":","all_documents","[","-","1","]",".","append","(","tokens",")","# Remove empty documents","all_documents","=","[","x","for","x","in","all_documents","if","x","]","rng",".","shuffle","(","all_documents",")","vocab_words","=","list","(","tokenizer",".","vocab",".","keys","(",")",")","instances","=","[","]","for","_","in","range","(","dupe_factor",")",":","for","document_index","in","range","(","len","(","all_documents",")",")",":","instances",".","extend","(","create_instances_from_document","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",")","rng",".","shuffle","(","instances",")","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/create_pretraining_data.py#L179-L220"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/create_pretraining_data.py","language":"python","identifier":"create_instances_from_document","parameters":"(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return instances","docstring":"Creates `TrainingInstance`s for a single document.","docstring_summary":"Creates `TrainingInstance`s for a single document.","docstring_tokens":["Creates","TrainingInstance","s","for","a","single","document","."],"function":"def create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[document_index]\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if rng.random() < short_seq_prob:\n target_seq_length = rng.randint(2, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n instances = []\n current_chunk = []\n current_length = 0\n i = 0\n while i < len(document):\n segment = document[i]\n current_chunk.append(segment)\n current_length += len(segment)\n if i == len(document) - 1 or current_length >= target_seq_length:\n if current_chunk:\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2:\n a_end = rng.randint(1, len(current_chunk) - 1)\n\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n tokens_b = []\n # Random next\n is_random_next = False\n if len(current_chunk) == 1 or rng.random() < 0.5:\n is_random_next = True\n target_b_length = target_seq_length - len(tokens_a)\n\n # This should rarely go for more than one iteration for large\n # corpora. However, just to be careful, we try to make sure that\n # the random document is not the same as the document\n # we're processing.\n for _ in range(10):\n random_document_index = rng.randint(0, len(all_documents) - 1)\n if random_document_index != document_index:\n break\n\n random_document = all_documents[random_document_index]\n random_start = rng.randint(0, len(random_document) - 1)\n for j in range(random_start, len(random_document)):\n tokens_b.extend(random_document[j])\n if len(tokens_b) >= target_b_length:\n break\n # We didn't actually use these segments so we \"put them back\" so\n # they don't go to waste.\n num_unused_segments = len(current_chunk) - a_end\n i -= num_unused_segments\n # Actual next\n else:\n is_random_next = False\n for j in range(a_end, len(current_chunk)):\n tokens_b.extend(current_chunk[j])\n truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)\n\n assert len(tokens_a) >= 1\n assert len(tokens_b) >= 1\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n current_chunk = []\n current_length = 0\n i += 1\n\n return instances","function_tokens":["def","create_instances_from_document","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","document","=","all_documents","[","document_index","]","# Account for [CLS], [SEP], [SEP]","max_num_tokens","=","max_seq_length","-","3","# We *usually* want to fill up the entire sequence since we are padding","# to `max_seq_length` anyways, so short sequences are generally wasted","# computation. However, we *sometimes*","# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter","# sequences to minimize the mismatch between pre-training and fine-tuning.","# The `target_seq_length` is just a rough target however, whereas","# `max_seq_length` is a hard limit.","target_seq_length","=","max_num_tokens","if","rng",".","random","(",")","<","short_seq_prob",":","target_seq_length","=","rng",".","randint","(","2",",","max_num_tokens",")","# We DON'T just concatenate all of the tokens from a document into a long","# sequence and choose an arbitrary split point because this would make the","# next sentence prediction task too easy. Instead, we split the input into","# segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user","# input.","instances","=","[","]","current_chunk","=","[","]","current_length","=","0","i","=","0","while","i","<","len","(","document",")",":","segment","=","document","[","i","]","current_chunk",".","append","(","segment",")","current_length","+=","len","(","segment",")","if","i","==","len","(","document",")","-","1","or","current_length",">=","target_seq_length",":","if","current_chunk",":","# `a_end` is how many segments from `current_chunk` go into the `A`","# (first) sentence.","a_end","=","1","if","len","(","current_chunk",")",">=","2",":","a_end","=","rng",".","randint","(","1",",","len","(","current_chunk",")","-","1",")","tokens_a","=","[","]","for","j","in","range","(","a_end",")",":","tokens_a",".","extend","(","current_chunk","[","j","]",")","tokens_b","=","[","]","# Random next","is_random_next","=","False","if","len","(","current_chunk",")","==","1","or","rng",".","random","(",")","<","0.5",":","is_random_next","=","True","target_b_length","=","target_seq_length","-","len","(","tokens_a",")","# This should rarely go for more than one iteration for large","# corpora. However, just to be careful, we try to make sure that","# the random document is not the same as the document","# we're processing.","for","_","in","range","(","10",")",":","random_document_index","=","rng",".","randint","(","0",",","len","(","all_documents",")","-","1",")","if","random_document_index","!=","document_index",":","break","random_document","=","all_documents","[","random_document_index","]","random_start","=","rng",".","randint","(","0",",","len","(","random_document",")","-","1",")","for","j","in","range","(","random_start",",","len","(","random_document",")",")",":","tokens_b",".","extend","(","random_document","[","j","]",")","if","len","(","tokens_b",")",">=","target_b_length",":","break","# We didn't actually use these segments so we \"put them back\" so","# they don't go to waste.","num_unused_segments","=","len","(","current_chunk",")","-","a_end","i","-=","num_unused_segments","# Actual next","else",":","is_random_next","=","False","for","j","in","range","(","a_end",",","len","(","current_chunk",")",")",":","tokens_b",".","extend","(","current_chunk","[","j","]",")","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")","assert","len","(","tokens_a",")",">=","1","assert","len","(","tokens_b",")",">=","1","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","(","tokens",",","masked_lm_positions",",","masked_lm_labels",")","=","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")","instance","=","TrainingInstance","(","tokens","=","tokens",",","segment_ids","=","segment_ids",",","is_random_next","=","is_random_next",",","masked_lm_positions","=","masked_lm_positions",",","masked_lm_labels","=","masked_lm_labels",")","instances",".","append","(","instance",")","current_chunk","=","[","]","current_length","=","0","i","+=","1","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/create_pretraining_data.py#L223-L335"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/create_pretraining_data.py","language":"python","identifier":"create_masked_lm_predictions","parameters":"(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return (output_tokens, masked_lm_positions, masked_lm_labels)","docstring":"Creates the predictions for the masked LM objective.","docstring_summary":"Creates the predictions for the masked LM objective.","docstring_tokens":["Creates","the","predictions","for","the","masked","LM","objective","."],"function":"def create_masked_lm_predictions(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n continue\n # Whole Word Masking means that if we mask all of the wordpieces\n # corresponding to an original word. When a word has been split into\n # WordPieces, the first token does not have any marker and any subsequence\n # tokens are prefixed with ##. So whenever we see the ## token, we\n # append it to the previous set of word indexes.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each WordPiece independently, softmaxed\n # over the entire vocabulary.\n if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and\n token.startswith(\"##\")):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n rng.shuffle(cand_indexes)\n\n output_tokens = list(tokens)\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n assert len(masked_lms) <= num_to_predict\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)","function_tokens":["def","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","cand_indexes","=","[","]","for","(","i",",","token",")","in","enumerate","(","tokens",")",":","if","token","==","\"[CLS]\"","or","token","==","\"[SEP]\"",":","continue","# Whole Word Masking means that if we mask all of the wordpieces","# corresponding to an original word. When a word has been split into","# WordPieces, the first token does not have any marker and any subsequence","# tokens are prefixed with ##. So whenever we see the ## token, we","# append it to the previous set of word indexes.","#","# Note that Whole Word Masking does *not* change the training code","# at all -- we still predict each WordPiece independently, softmaxed","# over the entire vocabulary.","if","(","FLAGS",".","do_whole_word_mask","and","len","(","cand_indexes",")",">=","1","and","token",".","startswith","(","\"##\"",")",")",":","cand_indexes","[","-","1","]",".","append","(","i",")","else",":","cand_indexes",".","append","(","[","i","]",")","rng",".","shuffle","(","cand_indexes",")","output_tokens","=","list","(","tokens",")","num_to_predict","=","min","(","max_predictions_per_seq",",","max","(","1",",","int","(","round","(","len","(","tokens",")","*","masked_lm_prob",")",")",")",")","masked_lms","=","[","]","covered_indexes","=","set","(",")","for","index_set","in","cand_indexes",":","if","len","(","masked_lms",")",">=","num_to_predict",":","break","# If adding a whole-word mask would exceed the maximum number of","# predictions, then just skip this candidate.","if","len","(","masked_lms",")","+","len","(","index_set",")",">","num_to_predict",":","continue","is_any_index_covered","=","False","for","index","in","index_set",":","if","index","in","covered_indexes",":","is_any_index_covered","=","True","break","if","is_any_index_covered",":","continue","for","index","in","index_set",":","covered_indexes",".","add","(","index",")","masked_token","=","None","# 80% of the time, replace with [MASK]","if","rng",".","random","(",")","<","0.8",":","masked_token","=","\"[MASK]\"","else",":","# 10% of the time, keep original","if","rng",".","random","(",")","<","0.5",":","masked_token","=","tokens","[","index","]","# 10% of the time, replace with random word","else",":","masked_token","=","vocab_words","[","rng",".","randint","(","0",",","len","(","vocab_words",")","-","1",")","]","output_tokens","[","index","]","=","masked_token","masked_lms",".","append","(","MaskedLmInstance","(","index","=","index",",","label","=","tokens","[","index","]",")",")","assert","len","(","masked_lms",")","<=","num_to_predict","masked_lms","=","sorted","(","masked_lms",",","key","=","lambda","x",":","x",".","index",")","masked_lm_positions","=","[","]","masked_lm_labels","=","[","]","for","p","in","masked_lms",":","masked_lm_positions",".","append","(","p",".","index",")","masked_lm_labels",".","append","(","p",".","label",")","return","(","output_tokens",",","masked_lm_positions",",","masked_lm_labels",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/create_pretraining_data.py#L342-L415"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/create_pretraining_data.py","language":"python","identifier":"truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_num_tokens, rng)","argument_list":"","return_statement":"","docstring":"Truncates a pair of sequences to a maximum sequence length.","docstring_summary":"Truncates a pair of sequences to a maximum sequence length.","docstring_tokens":["Truncates","a","pair","of","sequences","to","a","maximum","sequence","length","."],"function":"def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):\n \"\"\"Truncates a pair of sequences to a maximum sequence length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()","function_tokens":["def","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")",":","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_num_tokens",":","break","trunc_tokens","=","tokens_a","if","len","(","tokens_a",")",">","len","(","tokens_b",")","else","tokens_b","assert","len","(","trunc_tokens",")",">=","1","# We want to sometimes truncate from the front and sometimes from the","# back to add more randomness and avoid biases.","if","rng",".","random","(",")","<","0.5",":","del","trunc_tokens","[","0","]","else",":","trunc_tokens",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/create_pretraining_data.py#L418-L433"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_ner.py","language":"python","identifier":"InputExample.__init__","parameters":"(self, guid, text, label=None)","argument_list":"","return_statement":"","docstring":"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.","docstring_summary":"Constructs a InputExample.","docstring_tokens":["Constructs","a","InputExample","."],"function":"def __init__(self, guid, text, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text = text\n self.label = label","function_tokens":["def","__init__","(","self",",","guid",",","text",",","label","=","None",")",":","self",".","guid","=","guid","self",".","text","=","text","self",".","label","=","label"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_ner.py#L123-L135"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_ner.py","language":"python","identifier":"DataProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the train set.","docstring_summary":"Gets a collection of `InputExample`s for the train set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","train","set","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_ner.py#L152-L154"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_ner.py","language":"python","identifier":"DataProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the dev set.","docstring_summary":"Gets a collection of `InputExample`s for the dev set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","dev","set","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_ner.py#L156-L158"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_ner.py","language":"python","identifier":"DataProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Gets the list of labels for this data set.","docstring_summary":"Gets the list of labels for this data set.","docstring_tokens":["Gets","the","list","of","labels","for","this","data","set","."],"function":"def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_labels","(","self",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_ner.py#L160-L162"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_ner.py","language":"python","identifier":"DataProcessor._read_data","parameters":"(cls, input_file)","argument_list":"","return_statement":"","docstring":"Reads a BIO data.","docstring_summary":"Reads a BIO data.","docstring_tokens":["Reads","a","BIO","data","."],"function":"def _read_data(cls, input_file):\n \"\"\"Reads a BIO data.\"\"\"\n with open(input_file) as f:\n lines = []\n words = []\n labels = []\n for line in f:\n contends = line.strip()\n word = line.strip().split(' ')[0]\n label = line.strip().split(' ')[-1]\n if contends.startswith(\"-DOCSTART-\"):\n words.append('')\n continue\n if len(contends) == 0 and words[-1] == '.':\n l = ' '.join([label for label in labels if len(label) > 0])\n w = ' '.join([word for word in words if len(word) > 0])\n lines.append([l, w])\n words = []\n labels = []\n continue\n if len(contends) == 0:\n continue\n words.append(word)\n labels.append(label)\n return lines","function_tokens":["def","_read_data","(","cls",",","input_file",")",":","with","open","(","input_file",")","as","f",":","lines","=","[","]","words","=","[","]","labels","=","[","]","for","line","in","f",":","contends","=","line",".","strip","(",")","word","=","line",".","strip","(",")",".","split","(","' '",")","[","0","]","label","=","line",".","strip","(",")",".","split","(","' '",")","[","-","1","]","if","contends",".","startswith","(","\"-DOCSTART-\"",")",":","words",".","append","(","''",")","continue","if","len","(","contends",")","==","0","and","words","[","-","1","]","==","'.'",":","l","=","' '",".","join","(","[","label","for","label","in","labels","if","len","(","label",")",">","0","]",")","w","=","' '",".","join","(","[","word","for","word","in","words","if","len","(","word",")",">","0","]",")","lines",".","append","(","[","l",",","w","]",")","words","=","[","]","labels","=","[","]","continue","if","len","(","contends",")","==","0",":","continue","words",".","append","(","word",")","labels",".","append","(","label",")","return","lines"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_ner.py#L165-L189"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/extract_features.py","language":"python","identifier":"input_fn_builder","parameters":"(features, seq_length)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(features, seq_length):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_unique_ids = []\n all_input_ids = []\n all_input_mask = []\n all_input_type_ids = []\n\n for feature in features:\n all_unique_ids.append(feature.unique_id)\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_input_type_ids.append(feature.input_type_ids)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"unique_ids\":\n tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_type_ids\":\n tf.constant(\n all_input_type_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n })\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","features",",","seq_length",")",":","all_unique_ids","=","[","]","all_input_ids","=","[","]","all_input_mask","=","[","]","all_input_type_ids","=","[","]","for","feature","in","features",":","all_unique_ids",".","append","(","feature",".","unique_id",")","all_input_ids",".","append","(","feature",".","input_ids",")","all_input_mask",".","append","(","feature",".","input_mask",")","all_input_type_ids",".","append","(","feature",".","input_type_ids",")","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","num_examples","=","len","(","features",")","# This is for demo purposes and does NOT scale to large data sets. We do","# not use Dataset.from_generator() because that uses tf.py_func which is","# not TPU compatible. The right way to load data is with TFRecordReader.","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","{","\"unique_ids\"",":","tf",".","constant","(","all_unique_ids",",","shape","=","[","num_examples","]",",","dtype","=","tf",".","int32",")",",","\"input_ids\"",":","tf",".","constant","(","all_input_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_mask\"",":","tf",".","constant","(","all_input_mask",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_type_ids\"",":","tf",".","constant","(","all_input_type_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","}",")","d","=","d",".","batch","(","batch_size","=","batch_size",",","drop_remainder","=","False",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/extract_features.py#L100-L145"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/extract_features.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, layer_indexes, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n input_type_ids = features[\"input_type_ids\"]\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=False,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=input_type_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n raise ValueError(\"Only PREDICT modes are supported: %s\" % (mode))\n\n tvars = tf.trainable_variables()\n scaffold_fn = None\n (assignment_map,\n initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n all_layers = model.get_all_encoder_layers()\n\n predictions = {\n \"unique_id\": unique_ids,\n }\n\n for (i, layer_index) in enumerate(layer_indexes):\n predictions[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","layer_indexes",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","unique_ids","=","features","[","\"unique_ids\"","]","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","input_type_ids","=","features","[","\"input_type_ids\"","]","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","False",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","input_type_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","if","mode","!=","tf",".","estimator",".","ModeKeys",".","PREDICT",":","raise","ValueError","(","\"Only PREDICT modes are supported: %s\"","%","(","mode",")",")","tvars","=","tf",".","trainable_variables","(",")","scaffold_fn","=","None","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","all_layers","=","model",".","get_all_encoder_layers","(",")","predictions","=","{","\"unique_id\"",":","unique_ids",",","}","for","(","i",",","layer_index",")","in","enumerate","(","layer_indexes",")",":","predictions","[","\"layer_output_%d\"","%","i","]","=","all_layers","[","layer_index","]","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","predictions",",","scaffold_fn","=","scaffold_fn",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/extract_features.py#L148-L207"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/extract_features.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, seq_length, tokenizer)","argument_list":"","return_statement":"return features","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > seq_length - 2:\n tokens_a = tokens_a[0:(seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n input_type_ids = []\n tokens.append(\"[CLS]\")\n input_type_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n input_type_ids.append(0)\n tokens.append(\"[SEP]\")\n input_type_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n input_type_ids.append(1)\n tokens.append(\"[SEP]\")\n input_type_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < seq_length:\n input_ids.append(0)\n input_mask.append(0)\n input_type_ids.append(0)\n\n assert len(input_ids) == seq_length\n assert len(input_mask) == seq_length\n assert len(input_type_ids) == seq_length\n\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (example.unique_id))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"input_type_ids: %s\" % \" \".join([str(x) for x in input_type_ids]))\n\n features.append(\n InputFeatures(\n unique_id=example.unique_id,\n tokens=tokens,\n input_ids=input_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids))\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","seq_length",",","tokenizer",")",":","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","if","tokens_b",":","# Modifies `tokens_a` and `tokens_b` in place so that the total","# length is less than the specified length.","# Account for [CLS], [SEP], [SEP] with \"- 3\"","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","seq_length","-","3",")","else",":","# Account for [CLS] and [SEP] with \"- 2\"","if","len","(","tokens_a",")",">","seq_length","-","2",":","tokens_a","=","tokens_a","[","0",":","(","seq_length","-","2",")","]","# The convention in BERT is:","# (a) For sequence pairs:","# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]","# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1","# (b) For single sequences:","# tokens: [CLS] the dog is hairy . [SEP]","# type_ids: 0 0 0 0 0 0 0","#","# Where \"type_ids\" are used to indicate whether this is the first","# sequence or the second sequence. The embedding vectors for `type=0` and","# `type=1` were learned during pre-training and are added to the wordpiece","# embedding vector (and position vector). This is not *strictly* necessary","# since the [SEP] token unambiguously separates the sequences, but it makes","# it easier for the model to learn the concept of sequences.","#","# For classification tasks, the first vector (corresponding to [CLS]) is","# used as as the \"sentence vector\". Note that this only makes sense because","# the entire model is fine-tuned.","tokens","=","[","]","input_type_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","input_type_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","input_type_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","input_type_ids",".","append","(","0",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","input_type_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","input_type_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","input_type_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","seq_length","assert","len","(","input_mask",")","==","seq_length","assert","len","(","input_type_ids",")","==","seq_length","if","ex_index","<","5",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"unique_id: %s\"","%","(","example",".","unique_id",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"input_type_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_type_ids","]",")",")","features",".","append","(","InputFeatures","(","unique_id","=","example",".","unique_id",",","tokens","=","tokens",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","input_type_ids","=","input_type_ids",")",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/extract_features.py#L210-L299"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/extract_features.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/extract_features.py#L302-L316"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/extract_features.py","language":"python","identifier":"read_examples","parameters":"(input_file)","argument_list":"","return_statement":"return examples","docstring":"Read a list of `InputExample`s from an input file.","docstring_summary":"Read a list of `InputExample`s from an input file.","docstring_tokens":["Read","a","list","of","InputExample","s","from","an","input","file","."],"function":"def read_examples(input_file):\n \"\"\"Read a list of `InputExample`s from an input file.\"\"\"\n examples = []\n unique_id = 0\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n examples.append(\n InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))\n unique_id += 1\n return examples","function_tokens":["def","read_examples","(","input_file",")",":","examples","=","[","]","unique_id","=","0","with","tf",".","gfile",".","GFile","(","input_file",",","\"r\"",")","as","reader",":","while","True",":","line","=","tokenization",".","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","line",":","break","line","=","line",".","strip","(",")","text_a","=","None","text_b","=","None","m","=","re",".","match","(","r\"^(.*) \\|\\|\\| (.*)$\"",",","line",")","if","m","is","None",":","text_a","=","line","else",":","text_a","=","m",".","group","(","1",")","text_b","=","m",".","group","(","2",")","examples",".","append","(","InputExample","(","unique_id","=","unique_id",",","text_a","=","text_a",",","text_b","=","text_b",")",")","unique_id","+=","1","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/extract_features.py#L319-L340"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_classifier_with_tfhub.py","language":"python","identifier":"create_model","parameters":"(is_training, input_ids, input_mask, segment_ids, labels,\n num_labels, bert_hub_module_handle)","argument_list":"","return_statement":"","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(is_training, input_ids, input_mask, segment_ids, labels,\n num_labels, bert_hub_module_handle):\n \"\"\"Creates a classification model.\"\"\"\n tags = set()\n if is_training:\n tags.add(\"train\")\n bert_module = hub.Module(bert_hub_module_handle, tags=tags, trainable=True)\n bert_inputs = dict(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids)\n bert_outputs = bert_module(\n inputs=bert_inputs,\n signature=\"tokens\",\n as_dict=True)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use\n # bert_outputs[\"sequence_output\"] instead.\n output_layer = bert_outputs[\"pooled_output\"]\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)","function_tokens":["def","create_model","(","is_training",",","input_ids",",","input_mask",",","segment_ids",",","labels",",","num_labels",",","bert_hub_module_handle",")",":","tags","=","set","(",")","if","is_training",":","tags",".","add","(","\"train\"",")","bert_module","=","hub",".","Module","(","bert_hub_module_handle",",","tags","=","tags",",","trainable","=","True",")","bert_inputs","=","dict","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",")","bert_outputs","=","bert_module","(","inputs","=","bert_inputs",",","signature","=","\"tokens\"",",","as_dict","=","True",")","# In the demo, we are doing a simple classification task on the entire","# segment.","#","# If you want to use the token-level output, use","# bert_outputs[\"sequence_output\"] instead.","output_layer","=","bert_outputs","[","\"pooled_output\"","]","hidden_size","=","output_layer",".","shape","[","-","1","]",".","value","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","[","num_labels",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","[","num_labels","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","with","tf",".","variable_scope","(","\"loss\"",")",":","if","is_training",":","# I.e., 0.1 dropout","output_layer","=","tf",".","nn",".","dropout","(","output_layer",",","keep_prob","=","0.9",")","logits","=","tf",".","matmul","(","output_layer",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","probabilities","=","tf",".","nn",".","softmax","(","logits",",","axis","=","-","1",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","num_labels",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","logits",",","probabilities",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_classifier_with_tfhub.py#L37-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_classifier_with_tfhub.py","language":"python","identifier":"model_fn_builder","parameters":"(num_labels, learning_rate, num_train_steps,\n num_warmup_steps, use_tpu, bert_hub_module_handle)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(num_labels, learning_rate, num_train_steps,\n num_warmup_steps, use_tpu, bert_hub_module_handle):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n is_training, input_ids, input_mask, segment_ids, label_ids, num_labels,\n bert_hub_module_handle)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions={\"probabilities\": probabilities})\n else:\n raise ValueError(\n \"Only TRAIN, EVAL and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","num_labels",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","bert_hub_module_handle",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","label_ids","=","features","[","\"label_ids\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","total_loss",",","per_example_loss",",","logits",",","probabilities",")","=","create_model","(","is_training",",","input_ids",",","input_mask",",","segment_ids",",","label_ids",",","num_labels",",","bert_hub_module_handle",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","per_example_loss",",","label_ids",",","logits",")",":","predictions","=","tf",".","argmax","(","logits",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","accuracy","=","tf",".","metrics",".","accuracy","(","label_ids",",","predictions",")","loss","=","tf",".","metrics",".","mean","(","per_example_loss",")","return","{","\"eval_accuracy\"",":","accuracy",",","\"eval_loss\"",":","loss",",","}","eval_metrics","=","(","metric_fn",",","[","per_example_loss",",","label_ids",",","logits","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","PREDICT",":","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","{","\"probabilities\"",":","probabilities","}",")","else",":","raise","ValueError","(","\"Only TRAIN, EVAL and PREDICT modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_classifier_with_tfhub.py#L87-L143"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/roberta_wwm_large_ext\/run_classifier_with_tfhub.py","language":"python","identifier":"create_tokenizer_from_hub_module","parameters":"(bert_hub_module_handle)","argument_list":"","return_statement":"return tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)","docstring":"Get the vocab file and casing info from the Hub module.","docstring_summary":"Get the vocab file and casing info from the Hub module.","docstring_tokens":["Get","the","vocab","file","and","casing","info","from","the","Hub","module","."],"function":"def create_tokenizer_from_hub_module(bert_hub_module_handle):\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n bert_module = hub.Module(bert_hub_module_handle)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n return tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)","function_tokens":["def","create_tokenizer_from_hub_module","(","bert_hub_module_handle",")",":","with","tf",".","Graph","(",")",".","as_default","(",")",":","bert_module","=","hub",".","Module","(","bert_hub_module_handle",")","tokenization_info","=","bert_module","(","signature","=","\"tokenization_info\"",",","as_dict","=","True",")","with","tf",".","Session","(",")","as","sess",":","vocab_file",",","do_lower_case","=","sess",".","run","(","[","tokenization_info","[","\"vocab_file\"","]",",","tokenization_info","[","\"do_lower_case\"","]","]",")","return","tokenization",".","FullTokenizer","(","vocab_file","=","vocab_file",",","do_lower_case","=","do_lower_case",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/roberta_wwm_large_ext\/run_classifier_with_tfhub.py#L146-L155"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_pretraining.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n next_sentence_labels = features[\"next_sentence_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), model.get_embedding_table(),\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n (next_sentence_loss, next_sentence_example_loss,\n next_sentence_log_probs) = get_next_sentence_output(\n bert_config, model.get_pooled_output(), next_sentence_labels)\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n next_sentence_log_probs = tf.reshape(\n next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])\n next_sentence_predictions = tf.argmax(\n next_sentence_log_probs, axis=-1, output_type=tf.int32)\n next_sentence_labels = tf.reshape(next_sentence_labels, [-1])\n next_sentence_accuracy = tf.metrics.accuracy(\n labels=next_sentence_labels, predictions=next_sentence_predictions)\n next_sentence_mean_loss = tf.metrics.mean(\n values=next_sentence_example_loss)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n \"next_sentence_accuracy\": next_sentence_accuracy,\n \"next_sentence_loss\": next_sentence_mean_loss,\n }\n\n eval_metrics = (metric_fn, [\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels\n ])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\"Only TRAIN and EVAL modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","masked_lm_positions","=","features","[","\"masked_lm_positions\"","]","masked_lm_ids","=","features","[","\"masked_lm_ids\"","]","masked_lm_weights","=","features","[","\"masked_lm_weights\"","]","next_sentence_labels","=","features","[","\"next_sentence_labels\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","(","masked_lm_loss",",","masked_lm_example_loss",",","masked_lm_log_probs",")","=","get_masked_lm_output","(","bert_config",",","model",".","get_sequence_output","(",")",",","model",".","get_embedding_table","(",")",",","masked_lm_positions",",","masked_lm_ids",",","masked_lm_weights",")","(","next_sentence_loss",",","next_sentence_example_loss",",","next_sentence_log_probs",")","=","get_next_sentence_output","(","bert_config",",","model",".","get_pooled_output","(",")",",","next_sentence_labels",")","total_loss","=","masked_lm_loss","+","next_sentence_loss","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels",")",":","\"\"\"Computes the loss and accuracy of the model.\"\"\"","masked_lm_log_probs","=","tf",".","reshape","(","masked_lm_log_probs",",","[","-","1",",","masked_lm_log_probs",".","shape","[","-","1","]","]",")","masked_lm_predictions","=","tf",".","argmax","(","masked_lm_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","masked_lm_example_loss","=","tf",".","reshape","(","masked_lm_example_loss",",","[","-","1","]",")","masked_lm_ids","=","tf",".","reshape","(","masked_lm_ids",",","[","-","1","]",")","masked_lm_weights","=","tf",".","reshape","(","masked_lm_weights",",","[","-","1","]",")","masked_lm_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","masked_lm_ids",",","predictions","=","masked_lm_predictions",",","weights","=","masked_lm_weights",")","masked_lm_mean_loss","=","tf",".","metrics",".","mean","(","values","=","masked_lm_example_loss",",","weights","=","masked_lm_weights",")","next_sentence_log_probs","=","tf",".","reshape","(","next_sentence_log_probs",",","[","-","1",",","next_sentence_log_probs",".","shape","[","-","1","]","]",")","next_sentence_predictions","=","tf",".","argmax","(","next_sentence_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","next_sentence_labels","=","tf",".","reshape","(","next_sentence_labels",",","[","-","1","]",")","next_sentence_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","next_sentence_labels",",","predictions","=","next_sentence_predictions",")","next_sentence_mean_loss","=","tf",".","metrics",".","mean","(","values","=","next_sentence_example_loss",")","return","{","\"masked_lm_accuracy\"",":","masked_lm_accuracy",",","\"masked_lm_loss\"",":","masked_lm_mean_loss",",","\"next_sentence_accuracy\"",":","next_sentence_accuracy",",","\"next_sentence_loss\"",":","next_sentence_mean_loss",",","}","eval_metrics","=","(","metric_fn",",","[","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","raise","ValueError","(","\"Only TRAIN and EVAL modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_pretraining.py#L109-L237"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_pretraining.py","language":"python","identifier":"get_masked_lm_output","parameters":"(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights)","argument_list":"","return_statement":"return (loss, per_example_loss, log_probs)","docstring":"Get loss and log probs for the masked LM.","docstring_summary":"Get loss and log probs for the masked LM.","docstring_tokens":["Get","loss","and","log","probs","for","the","masked","LM","."],"function":"def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights):\n \"\"\"Get loss and log probs for the masked LM.\"\"\"\n input_tensor = gather_indexes(input_tensor, positions)\n\n with tf.variable_scope(\"cls\/predictions\"):\n # We apply one more non-linear transformation before the output layer.\n # This matrix is not used after pre-training.\n with tf.variable_scope(\"transform\"):\n input_tensor = tf.layers.dense(\n input_tensor,\n units=bert_config.hidden_size,\n activation=modeling.get_activation(bert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n bert_config.initializer_range))\n input_tensor = modeling.layer_norm(input_tensor)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n output_bias = tf.get_variable(\n \"output_bias\",\n shape=[bert_config.vocab_size],\n initializer=tf.zeros_initializer())\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n label_ids = tf.reshape(label_ids, [-1])\n label_weights = tf.reshape(label_weights, [-1])\n\n one_hot_labels = tf.one_hot(\n label_ids, depth=bert_config.vocab_size, dtype=tf.float32)\n\n # The `positions` tensor might be zero-padded (if the sequence is too\n # short to have the maximum number of predictions). The `label_weights`\n # tensor has a value of 1.0 for every real prediction and 0.0 for the\n # padding predictions.\n per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])\n numerator = tf.reduce_sum(label_weights * per_example_loss)\n denominator = tf.reduce_sum(label_weights) + 1e-5\n loss = numerator \/ denominator\n\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_masked_lm_output","(","bert_config",",","input_tensor",",","output_weights",",","positions",",","label_ids",",","label_weights",")",":","input_tensor","=","gather_indexes","(","input_tensor",",","positions",")","with","tf",".","variable_scope","(","\"cls\/predictions\"",")",":","# We apply one more non-linear transformation before the output layer.","# This matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"transform\"",")",":","input_tensor","=","tf",".","layers",".","dense","(","input_tensor",",","units","=","bert_config",".","hidden_size",",","activation","=","modeling",".","get_activation","(","bert_config",".","hidden_act",")",",","kernel_initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","input_tensor","=","modeling",".","layer_norm","(","input_tensor",")","# The output weights are the same as the input embeddings, but there is","# an output-only bias for each token.","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","bert_config",".","vocab_size","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","matmul","(","input_tensor",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","label_ids","=","tf",".","reshape","(","label_ids",",","[","-","1","]",")","label_weights","=","tf",".","reshape","(","label_weights",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","label_ids",",","depth","=","bert_config",".","vocab_size",",","dtype","=","tf",".","float32",")","# The `positions` tensor might be zero-padded (if the sequence is too","# short to have the maximum number of predictions). The `label_weights`","# tensor has a value of 1.0 for every real prediction and 0.0 for the","# padding predictions.","per_example_loss","=","-","tf",".","reduce_sum","(","log_probs","*","one_hot_labels",",","axis","=","[","-","1","]",")","numerator","=","tf",".","reduce_sum","(","label_weights","*","per_example_loss",")","denominator","=","tf",".","reduce_sum","(","label_weights",")","+","1e-5","loss","=","numerator","\/","denominator","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_pretraining.py#L240-L282"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_pretraining.py","language":"python","identifier":"get_next_sentence_output","parameters":"(bert_config, input_tensor, labels)","argument_list":"","return_statement":"","docstring":"Get loss and log probs for the next sentence prediction.","docstring_summary":"Get loss and log probs for the next sentence prediction.","docstring_tokens":["Get","loss","and","log","probs","for","the","next","sentence","prediction","."],"function":"def get_next_sentence_output(bert_config, input_tensor, labels):\n \"\"\"Get loss and log probs for the next sentence prediction.\"\"\"\n\n # Simple binary classification. Note that 0 is \"next sentence\" and 1 is\n # \"random sentence\". This weight matrix is not used after pre-training.\n with tf.variable_scope(\"cls\/seq_relationship\"):\n output_weights = tf.get_variable(\n \"output_weights\",\n shape=[2, bert_config.hidden_size],\n initializer=modeling.create_initializer(bert_config.initializer_range))\n output_bias = tf.get_variable(\n \"output_bias\", shape=[2], initializer=tf.zeros_initializer())\n\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n labels = tf.reshape(labels, [-1])\n one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_next_sentence_output","(","bert_config",",","input_tensor",",","labels",")",":","# Simple binary classification. Note that 0 is \"next sentence\" and 1 is","# \"random sentence\". This weight matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"cls\/seq_relationship\"",")",":","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","shape","=","[","2",",","bert_config",".","hidden_size","]",",","initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","2","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","matmul","(","input_tensor",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","labels","=","tf",".","reshape","(","labels",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","2",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_pretraining.py#L285-L305"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_pretraining.py","language":"python","identifier":"gather_indexes","parameters":"(sequence_tensor, positions)","argument_list":"","return_statement":"return output_tensor","docstring":"Gathers the vectors at the specific positions over a minibatch.","docstring_summary":"Gathers the vectors at the specific positions over a minibatch.","docstring_tokens":["Gathers","the","vectors","at","the","specific","positions","over","a","minibatch","."],"function":"def gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor","function_tokens":["def","gather_indexes","(","sequence_tensor",",","positions",")",":","sequence_shape","=","modeling",".","get_shape_list","(","sequence_tensor",",","expected_rank","=","3",")","batch_size","=","sequence_shape","[","0","]","seq_length","=","sequence_shape","[","1","]","width","=","sequence_shape","[","2","]","flat_offsets","=","tf",".","reshape","(","tf",".","range","(","0",",","batch_size",",","dtype","=","tf",".","int32",")","*","seq_length",",","[","-","1",",","1","]",")","flat_positions","=","tf",".","reshape","(","positions","+","flat_offsets",",","[","-","1","]",")","flat_sequence_tensor","=","tf",".","reshape","(","sequence_tensor",",","[","batch_size","*","seq_length",",","width","]",")","output_tensor","=","tf",".","gather","(","flat_sequence_tensor",",","flat_positions",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_pretraining.py#L308-L321"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_pretraining.py","language":"python","identifier":"input_fn_builder","parameters":"(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n name_to_features = {\n \"input_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"input_mask\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"segment_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"masked_lm_positions\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_ids\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_weights\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.float32),\n \"next_sentence_labels\":\n tf.FixedLenFeature([1], tf.int64),\n }\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n if is_training:\n d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))\n d = d.repeat()\n d = d.shuffle(buffer_size=len(input_files))\n\n # `cycle_length` is the number of parallel files that get read.\n cycle_length = min(num_cpu_threads, len(input_files))\n\n # `sloppy` mode means that the interleaving is not exact. This adds\n # even more randomness to the training pipeline.\n d = d.apply(\n tf.contrib.data.parallel_interleave(\n tf.data.TFRecordDataset,\n sloppy=is_training,\n cycle_length=cycle_length))\n d = d.shuffle(buffer_size=100)\n else:\n d = tf.data.TFRecordDataset(input_files)\n # Since we evaluate for a fixed number of steps we don't want to encounter\n # out-of-range exceptions.\n d = d.repeat()\n\n # We must `drop_remainder` on training because the TPU requires fixed\n # size dimensions. For eval, we assume we are evaluating on the CPU or GPU\n # and we *don't* want to drop the remainder, otherwise we wont cover\n # every sample.\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n num_parallel_batches=num_cpu_threads,\n drop_remainder=True))\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","input_files",",","max_seq_length",",","max_predictions_per_seq",",","is_training",",","num_cpu_threads","=","4",")",":","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"masked_lm_positions\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_ids\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_weights\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","float32",")",",","\"next_sentence_labels\"",":","tf",".","FixedLenFeature","(","[","1","]",",","tf",".","int64",")",",","}","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","if","is_training",":","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","tf",".","constant","(","input_files",")",")","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","len","(","input_files",")",")","# `cycle_length` is the number of parallel files that get read.","cycle_length","=","min","(","num_cpu_threads",",","len","(","input_files",")",")","# `sloppy` mode means that the interleaving is not exact. This adds","# even more randomness to the training pipeline.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","parallel_interleave","(","tf",".","data",".","TFRecordDataset",",","sloppy","=","is_training",",","cycle_length","=","cycle_length",")",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","else",":","d","=","tf",".","data",".","TFRecordDataset","(","input_files",")","# Since we evaluate for a fixed number of steps we don't want to encounter","# out-of-range exceptions.","d","=","d",".","repeat","(",")","# We must `drop_remainder` on training because the TPU requires fixed","# size dimensions. For eval, we assume we are evaluating on the CPU or GPU","# and we *don't* want to drop the remainder, otherwise we wont cover","# every sample.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","num_parallel_batches","=","num_cpu_threads",",","drop_remainder","=","True",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_pretraining.py#L324-L388"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_pretraining.py","language":"python","identifier":"_decode_record","parameters":"(record, name_to_features)","argument_list":"","return_statement":"return example","docstring":"Decodes a record to a TensorFlow example.","docstring_summary":"Decodes a record to a TensorFlow example.","docstring_tokens":["Decodes","a","record","to","a","TensorFlow","example","."],"function":"def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example","function_tokens":["def","_decode_record","(","record",",","name_to_features",")",":","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_pretraining.py#L391-L403"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_squad.py","language":"python","identifier":"read_squad_examples","parameters":"(input_file, is_training)","argument_list":"","return_statement":"return examples","docstring":"Read a SQuAD json file into a list of SquadExample.","docstring_summary":"Read a SQuAD json file into a list of SquadExample.","docstring_tokens":["Read","a","SQuAD","json","file","into","a","list","of","SquadExample","."],"function":"def read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n\n if FLAGS.version_2_with_negative:\n is_impossible = qa[\"is_impossible\"]\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\")\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length -\n 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(\n doc_tokens[start_position:(end_position + 1)])\n cleaned_answer_text = \" \".join(\n tokenization.whitespace_tokenize(orig_answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n tf.logging.warning(\"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n\n return examples","function_tokens":["def","read_squad_examples","(","input_file",",","is_training",")",":","with","tf",".","gfile",".","Open","(","input_file",",","\"r\"",")","as","reader",":","input_data","=","json",".","load","(","reader",")","[","\"data\"","]","def","is_whitespace","(","c",")",":","if","c","==","\" \"","or","c","==","\"\\t\"","or","c","==","\"\\r\"","or","c","==","\"\\n\"","or","ord","(","c",")","==","0x202F",":","return","True","return","False","examples","=","[","]","for","entry","in","input_data",":","for","paragraph","in","entry","[","\"paragraphs\"","]",":","paragraph_text","=","paragraph","[","\"context\"","]","doc_tokens","=","[","]","char_to_word_offset","=","[","]","prev_is_whitespace","=","True","for","c","in","paragraph_text",":","if","is_whitespace","(","c",")",":","prev_is_whitespace","=","True","else",":","if","prev_is_whitespace",":","doc_tokens",".","append","(","c",")","else",":","doc_tokens","[","-","1","]","+=","c","prev_is_whitespace","=","False","char_to_word_offset",".","append","(","len","(","doc_tokens",")","-","1",")","for","qa","in","paragraph","[","\"qas\"","]",":","qas_id","=","qa","[","\"id\"","]","question_text","=","qa","[","\"question\"","]","start_position","=","None","end_position","=","None","orig_answer_text","=","None","is_impossible","=","False","if","is_training",":","if","FLAGS",".","version_2_with_negative",":","is_impossible","=","qa","[","\"is_impossible\"","]","if","(","len","(","qa","[","\"answers\"","]",")","!=","1",")","and","(","not","is_impossible",")",":","raise","ValueError","(","\"For training, each question should have exactly 1 answer.\"",")","if","not","is_impossible",":","answer","=","qa","[","\"answers\"","]","[","0","]","orig_answer_text","=","answer","[","\"text\"","]","answer_offset","=","answer","[","\"answer_start\"","]","answer_length","=","len","(","orig_answer_text",")","start_position","=","char_to_word_offset","[","answer_offset","]","end_position","=","char_to_word_offset","[","answer_offset","+","answer_length","-","1","]","# Only add answers where the text can be exactly recovered from the","# document. If this CAN'T happen it's likely due to weird Unicode","# stuff so we will just skip the example.","#","# Note that this means for training mode, every example is NOT","# guaranteed to be preserved.","actual_text","=","\" \"",".","join","(","doc_tokens","[","start_position",":","(","end_position","+","1",")","]",")","cleaned_answer_text","=","\" \"",".","join","(","tokenization",".","whitespace_tokenize","(","orig_answer_text",")",")","if","actual_text",".","find","(","cleaned_answer_text",")","==","-","1",":","tf",".","logging",".","warning","(","\"Could not find answer: '%s' vs. '%s'\"",",","actual_text",",","cleaned_answer_text",")","continue","else",":","start_position","=","-","1","end_position","=","-","1","orig_answer_text","=","\"\"","example","=","SquadExample","(","qas_id","=","qas_id",",","question_text","=","question_text",",","doc_tokens","=","doc_tokens",",","orig_answer_text","=","orig_answer_text",",","start_position","=","start_position",",","end_position","=","end_position",",","is_impossible","=","is_impossible",")","examples",".","append","(","example",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_squad.py#L227-L306"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_squad.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn)","argument_list":"","return_statement":"","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"token_to_orig_map: %s\" % \" \".join(\n [\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))\n tf.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n tf.logging.info(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\n \"answer: %s\" % (tokenization.printable_text(answer_text)))\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1","function_tokens":["def","convert_examples_to_features","(","examples",",","tokenizer",",","max_seq_length",",","doc_stride",",","max_query_length",",","is_training",",","output_fn",")",":","unique_id","=","1000000000","for","(","example_index",",","example",")","in","enumerate","(","examples",")",":","query_tokens","=","tokenizer",".","tokenize","(","example",".","question_text",")","if","len","(","query_tokens",")",">","max_query_length",":","query_tokens","=","query_tokens","[","0",":","max_query_length","]","tok_to_orig_index","=","[","]","orig_to_tok_index","=","[","]","all_doc_tokens","=","[","]","for","(","i",",","token",")","in","enumerate","(","example",".","doc_tokens",")",":","orig_to_tok_index",".","append","(","len","(","all_doc_tokens",")",")","sub_tokens","=","tokenizer",".","tokenize","(","token",")","for","sub_token","in","sub_tokens",":","tok_to_orig_index",".","append","(","i",")","all_doc_tokens",".","append","(","sub_token",")","tok_start_position","=","None","tok_end_position","=","None","if","is_training","and","example",".","is_impossible",":","tok_start_position","=","-","1","tok_end_position","=","-","1","if","is_training","and","not","example",".","is_impossible",":","tok_start_position","=","orig_to_tok_index","[","example",".","start_position","]","if","example",".","end_position","<","len","(","example",".","doc_tokens",")","-","1",":","tok_end_position","=","orig_to_tok_index","[","example",".","end_position","+","1","]","-","1","else",":","tok_end_position","=","len","(","all_doc_tokens",")","-","1","(","tok_start_position",",","tok_end_position",")","=","_improve_answer_span","(","all_doc_tokens",",","tok_start_position",",","tok_end_position",",","tokenizer",",","example",".","orig_answer_text",")","# The -3 accounts for [CLS], [SEP] and [SEP]","max_tokens_for_doc","=","max_seq_length","-","len","(","query_tokens",")","-","3","# We can have documents that are longer than the maximum sequence length.","# To deal with this we do a sliding window approach, where we take chunks","# of the up to our max length with a stride of `doc_stride`.","_DocSpan","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"DocSpan\"",",","[","\"start\"",",","\"length\"","]",")","doc_spans","=","[","]","start_offset","=","0","while","start_offset","<","len","(","all_doc_tokens",")",":","length","=","len","(","all_doc_tokens",")","-","start_offset","if","length",">","max_tokens_for_doc",":","length","=","max_tokens_for_doc","doc_spans",".","append","(","_DocSpan","(","start","=","start_offset",",","length","=","length",")",")","if","start_offset","+","length","==","len","(","all_doc_tokens",")",":","break","start_offset","+=","min","(","length",",","doc_stride",")","for","(","doc_span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","tokens","=","[","]","token_to_orig_map","=","{","}","token_is_max_context","=","{","}","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","query_tokens",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","i","in","range","(","doc_span",".","length",")",":","split_token_index","=","doc_span",".","start","+","i","token_to_orig_map","[","len","(","tokens",")","]","=","tok_to_orig_index","[","split_token_index","]","is_max_context","=","_check_is_max_context","(","doc_spans",",","doc_span_index",",","split_token_index",")","token_is_max_context","[","len","(","tokens",")","]","=","is_max_context","tokens",".","append","(","all_doc_tokens","[","split_token_index","]",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","start_position","=","None","end_position","=","None","if","is_training","and","not","example",".","is_impossible",":","# For training, if our document chunk does not contain an annotation","# we throw it out, since there is nothing to predict.","doc_start","=","doc_span",".","start","doc_end","=","doc_span",".","start","+","doc_span",".","length","-","1","out_of_span","=","False","if","not","(","tok_start_position",">=","doc_start","and","tok_end_position","<=","doc_end",")",":","out_of_span","=","True","if","out_of_span",":","start_position","=","0","end_position","=","0","else",":","doc_offset","=","len","(","query_tokens",")","+","2","start_position","=","tok_start_position","-","doc_start","+","doc_offset","end_position","=","tok_end_position","-","doc_start","+","doc_offset","if","is_training","and","example",".","is_impossible",":","start_position","=","0","end_position","=","0","if","example_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"unique_id: %s\"","%","(","unique_id",")",")","tf",".","logging",".","info","(","\"example_index: %s\"","%","(","example_index",")",")","tf",".","logging",".","info","(","\"doc_span_index: %s\"","%","(","doc_span_index",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"token_to_orig_map: %s\"","%","\" \"",".","join","(","[","\"%d:%d\"","%","(","x",",","y",")","for","(","x",",","y",")","in","six",".","iteritems","(","token_to_orig_map",")","]",")",")","tf",".","logging",".","info","(","\"token_is_max_context: %s\"","%","\" \"",".","join","(","[","\"%d:%s\"","%","(","x",",","y",")","for","(","x",",","y",")","in","six",".","iteritems","(","token_is_max_context",")","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","if","is_training","and","example",".","is_impossible",":","tf",".","logging",".","info","(","\"impossible example\"",")","if","is_training","and","not","example",".","is_impossible",":","answer_text","=","\" \"",".","join","(","tokens","[","start_position",":","(","end_position","+","1",")","]",")","tf",".","logging",".","info","(","\"start_position: %d\"","%","(","start_position",")",")","tf",".","logging",".","info","(","\"end_position: %d\"","%","(","end_position",")",")","tf",".","logging",".","info","(","\"answer: %s\"","%","(","tokenization",".","printable_text","(","answer_text",")",")",")","feature","=","InputFeatures","(","unique_id","=","unique_id",",","example_index","=","example_index",",","doc_span_index","=","doc_span_index",",","tokens","=","tokens",",","token_to_orig_map","=","token_to_orig_map",",","token_is_max_context","=","token_is_max_context",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","start_position","=","start_position",",","end_position","=","end_position",",","is_impossible","=","example",".","is_impossible",")","# Run callback","output_fn","(","feature",")","unique_id","+=","1"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_squad.py#L309-L473"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_squad.py","language":"python","identifier":"_improve_answer_span","parameters":"(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text)","argument_list":"","return_statement":"return (input_start, input_end)","docstring":"Returns tokenized answer spans that better match the annotated answer.","docstring_summary":"Returns tokenized answer spans that better match the annotated answer.","docstring_tokens":["Returns","tokenized","answer","spans","that","better","match","the","annotated","answer","."],"function":"def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)","function_tokens":["def","_improve_answer_span","(","doc_tokens",",","input_start",",","input_end",",","tokenizer",",","orig_answer_text",")",":","# The SQuAD annotations are character based. We first project them to","# whitespace-tokenized words. But then after WordPiece tokenization, we can","# often find a \"better match\". For example:","#","# Question: What year was John Smith born?","# Context: The leader was John Smith (1895-1943).","# Answer: 1895","#","# The original whitespace-tokenized answer will be \"(1895-1943).\". However","# after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match","# the exact answer, 1895.","#","# However, this is not always possible. Consider the following:","#","# Question: What country is the top exporter of electornics?","# Context: The Japanese electronics industry is the lagest in the world.","# Answer: Japan","#","# In this case, the annotator chose \"Japan\" as a character sub-span of","# the word \"Japanese\". Since our WordPiece tokenizer does not split","# \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare","# in SQuAD, but does happen.","tok_answer_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_answer_text",")",")","for","new_start","in","range","(","input_start",",","input_end","+","1",")",":","for","new_end","in","range","(","input_end",",","new_start","-","1",",","-","1",")",":","text_span","=","\" \"",".","join","(","doc_tokens","[","new_start",":","(","new_end","+","1",")","]",")","if","text_span","==","tok_answer_text",":","return","(","new_start",",","new_end",")","return","(","input_start",",","input_end",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_squad.py#L476-L510"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_squad.py","language":"python","identifier":"_check_is_max_context","parameters":"(doc_spans, cur_span_index, position)","argument_list":"","return_statement":"return cur_span_index == best_span_index","docstring":"Check if this is the 'max context' doc span for the token.","docstring_summary":"Check if this is the 'max context' doc span for the token.","docstring_tokens":["Check","if","this","is","the","max","context","doc","span","for","the","token","."],"function":"def _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index","function_tokens":["def","_check_is_max_context","(","doc_spans",",","cur_span_index",",","position",")",":","# Because of the sliding window approach taken to scoring documents, a single","# token can appear in multiple documents. E.g.","# Doc: the man went to the store and bought a gallon of milk","# Span A: the man went to the","# Span B: to the store and bought","# Span C: and bought a gallon of","# ...","#","# Now the word 'bought' will have two scores from spans B and C. We only","# want to consider the score with \"maximum context\", which we define as","# the *minimum* of its left and right context (the *sum* of left and","# right context will always be the same, of course).","#","# In the example the maximum context for 'bought' would be span C since","# it has 1 left context and 3 right context, while span B has 4 left context","# and 0 right context.","best_score","=","None","best_span_index","=","None","for","(","span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","end","=","doc_span",".","start","+","doc_span",".","length","-","1","if","position","<","doc_span",".","start",":","continue","if","position",">","end",":","continue","num_left_context","=","position","-","doc_span",".","start","num_right_context","=","end","-","position","score","=","min","(","num_left_context",",","num_right_context",")","+","0.01","*","doc_span",".","length","if","best_score","is","None","or","score",">","best_score",":","best_score","=","score","best_span_index","=","span_index","return","cur_span_index","==","best_span_index"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_squad.py#L513-L547"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_squad.py","language":"python","identifier":"create_model","parameters":"(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return (start_logits, end_logits)","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n final_hidden = model.get_sequence_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n hidden_size = final_hidden_shape[2]\n\n output_weights = tf.get_variable(\n \"cls\/squad\/output_weights\", [2, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"cls\/squad\/output_bias\", [2], initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden,\n [batch_size * seq_length, hidden_size])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(logits, [2, 0, 1])\n\n unstacked_logits = tf.unstack(logits, axis=0)\n\n (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n return (start_logits, end_logits)","function_tokens":["def","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","use_one_hot_embeddings",")",":","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","final_hidden","=","model",".","get_sequence_output","(",")","final_hidden_shape","=","modeling",".","get_shape_list","(","final_hidden",",","expected_rank","=","3",")","batch_size","=","final_hidden_shape","[","0","]","seq_length","=","final_hidden_shape","[","1","]","hidden_size","=","final_hidden_shape","[","2","]","output_weights","=","tf",".","get_variable","(","\"cls\/squad\/output_weights\"",",","[","2",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"cls\/squad\/output_bias\"",",","[","2","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","final_hidden_matrix","=","tf",".","reshape","(","final_hidden",",","[","batch_size","*","seq_length",",","hidden_size","]",")","logits","=","tf",".","matmul","(","final_hidden_matrix",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","logits","=","tf",".","reshape","(","logits",",","[","batch_size",",","seq_length",",","2","]",")","logits","=","tf",".","transpose","(","logits",",","[","2",",","0",",","1","]",")","unstacked_logits","=","tf",".","unstack","(","logits",",","axis","=","0",")","(","start_logits",",","end_logits",")","=","(","unstacked_logits","[","0","]",",","unstacked_logits","[","1","]",")","return","(","start_logits",",","end_logits",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_squad.py#L550-L587"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_squad.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (start_logits, end_logits) = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n start_positions = features[\"start_positions\"]\n end_positions = features[\"end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) \/ 2.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"start_logits\": start_logits,\n \"end_logits\": end_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","unique_ids","=","features","[","\"unique_ids\"","]","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","start_logits",",","end_logits",")","=","create_model","(","bert_config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","seq_length","=","modeling",".","get_shape_list","(","input_ids",")","[","1","]","def","compute_loss","(","logits",",","positions",")",":","one_hot_positions","=","tf",".","one_hot","(","positions",",","depth","=","seq_length",",","dtype","=","tf",".","float32",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","loss","=","-","tf",".","reduce_mean","(","tf",".","reduce_sum","(","one_hot_positions","*","log_probs",",","axis","=","-","1",")",")","return","loss","start_positions","=","features","[","\"start_positions\"","]","end_positions","=","features","[","\"end_positions\"","]","start_loss","=","compute_loss","(","start_logits",",","start_positions",")","end_loss","=","compute_loss","(","end_logits",",","end_positions",")","total_loss","=","(","start_loss","+","end_loss",")","\/","2.0","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","PREDICT",":","predictions","=","{","\"unique_ids\"",":","unique_ids",",","\"start_logits\"",":","start_logits",",","\"end_logits\"",":","end_logits",",","}","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","predictions",",","scaffold_fn","=","scaffold_fn",")","else",":","raise","ValueError","(","\"Only TRAIN and PREDICT modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_squad.py#L590-L684"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_squad.py","language":"python","identifier":"input_fn_builder","parameters":"(input_file, seq_length, is_training, drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(input_file, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","input_file",",","seq_length",",","is_training",",","drop_remainder",")",":","name_to_features","=","{","\"unique_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","}","if","is_training",":","name_to_features","[","\"start_positions\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")","name_to_features","[","\"end_positions\"","]","=","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","d","=","tf",".","data",".","TFRecordDataset","(","input_file",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_squad.py#L687-L734"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_squad.py","language":"python","identifier":"write_predictions","parameters":"(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file)","argument_list":"","return_statement":"","docstring":"Write final predictions to the json file and log-odds of null if needed.","docstring_summary":"Write final predictions to the json file and log-odds of null if needed.","docstring_tokens":["Write","final","predictions","to","the","json","file","and","log","-","odds","of","null","if","needed","."],"function":"def write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if FLAGS.version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n\n if FLAGS.version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # if we didn't inlude the empty option in the n-best, inlcude it\n if FLAGS.version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\", start_logit=null_start_logit,\n end_logit=null_end_logit))\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not FLAGS.version_2_with_negative:\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (\n best_non_null_entry.end_logit)\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > FLAGS.null_score_diff_threshold:\n all_predictions[example.qas_id] = \"\"\n else:\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n if FLAGS.version_2_with_negative:\n with tf.gfile.GFile(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")","function_tokens":["def","write_predictions","(","all_examples",",","all_features",",","all_results",",","n_best_size",",","max_answer_length",",","do_lower_case",",","output_prediction_file",",","output_nbest_file",",","output_null_log_odds_file",")",":","tf",".","logging",".","info","(","\"Writing predictions to: %s\"","%","(","output_prediction_file",")",")","tf",".","logging",".","info","(","\"Writing nbest to: %s\"","%","(","output_nbest_file",")",")","example_index_to_features","=","collections",".","defaultdict","(","list",")","for","feature","in","all_features",":","example_index_to_features","[","feature",".","example_index","]",".","append","(","feature",")","unique_id_to_result","=","{","}","for","result","in","all_results",":","unique_id_to_result","[","result",".","unique_id","]","=","result","_PrelimPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"PrelimPrediction\"",",","[","\"feature_index\"",",","\"start_index\"",",","\"end_index\"",",","\"start_logit\"",",","\"end_logit\"","]",")","all_predictions","=","collections",".","OrderedDict","(",")","all_nbest_json","=","collections",".","OrderedDict","(",")","scores_diff_json","=","collections",".","OrderedDict","(",")","for","(","example_index",",","example",")","in","enumerate","(","all_examples",")",":","features","=","example_index_to_features","[","example_index","]","prelim_predictions","=","[","]","# keep track of the minimum score of null start+end of position 0","score_null","=","1000000","# large and positive","min_null_feature_index","=","0","# the paragraph slice with min mull score","null_start_logit","=","0","# the start logit at the slice with min null score","null_end_logit","=","0","# the end logit at the slice with min null score","for","(","feature_index",",","feature",")","in","enumerate","(","features",")",":","result","=","unique_id_to_result","[","feature",".","unique_id","]","start_indexes","=","_get_best_indexes","(","result",".","start_logits",",","n_best_size",")","end_indexes","=","_get_best_indexes","(","result",".","end_logits",",","n_best_size",")","# if we could have irrelevant answers, get the min score of irrelevant","if","FLAGS",".","version_2_with_negative",":","feature_null_score","=","result",".","start_logits","[","0","]","+","result",".","end_logits","[","0","]","if","feature_null_score","<","score_null",":","score_null","=","feature_null_score","min_null_feature_index","=","feature_index","null_start_logit","=","result",".","start_logits","[","0","]","null_end_logit","=","result",".","end_logits","[","0","]","for","start_index","in","start_indexes",":","for","end_index","in","end_indexes",":","# We could hypothetically create invalid predictions, e.g., predict","# that the start of the span is in the question. We throw out all","# invalid predictions.","if","start_index",">=","len","(","feature",".","tokens",")",":","continue","if","end_index",">=","len","(","feature",".","tokens",")",":","continue","if","start_index","not","in","feature",".","token_to_orig_map",":","continue","if","end_index","not","in","feature",".","token_to_orig_map",":","continue","if","not","feature",".","token_is_max_context",".","get","(","start_index",",","False",")",":","continue","if","end_index","<","start_index",":","continue","length","=","end_index","-","start_index","+","1","if","length",">","max_answer_length",":","continue","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","feature_index",",","start_index","=","start_index",",","end_index","=","end_index",",","start_logit","=","result",".","start_logits","[","start_index","]",",","end_logit","=","result",".","end_logits","[","end_index","]",")",")","if","FLAGS",".","version_2_with_negative",":","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","min_null_feature_index",",","start_index","=","0",",","end_index","=","0",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","prelim_predictions","=","sorted","(","prelim_predictions",",","key","=","lambda","x",":","(","x",".","start_logit","+","x",".","end_logit",")",",","reverse","=","True",")","_NbestPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"NbestPrediction\"",",","[","\"text\"",",","\"start_logit\"",",","\"end_logit\"","]",")","seen_predictions","=","{","}","nbest","=","[","]","for","pred","in","prelim_predictions",":","if","len","(","nbest",")",">=","n_best_size",":","break","feature","=","features","[","pred",".","feature_index","]","if","pred",".","start_index",">","0",":","# this is a non-null prediction","tok_tokens","=","feature",".","tokens","[","pred",".","start_index",":","(","pred",".","end_index","+","1",")","]","orig_doc_start","=","feature",".","token_to_orig_map","[","pred",".","start_index","]","orig_doc_end","=","feature",".","token_to_orig_map","[","pred",".","end_index","]","orig_tokens","=","example",".","doc_tokens","[","orig_doc_start",":","(","orig_doc_end","+","1",")","]","tok_text","=","\" \"",".","join","(","tok_tokens",")","# De-tokenize WordPieces that have been split off.","tok_text","=","tok_text",".","replace","(","\" ##\"",",","\"\"",")","tok_text","=","tok_text",".","replace","(","\"##\"",",","\"\"",")","# Clean whitespace","tok_text","=","tok_text",".","strip","(",")","tok_text","=","\" \"",".","join","(","tok_text",".","split","(",")",")","orig_text","=","\" \"",".","join","(","orig_tokens",")","final_text","=","get_final_text","(","tok_text",",","orig_text",",","do_lower_case",")","if","final_text","in","seen_predictions",":","continue","seen_predictions","[","final_text","]","=","True","else",":","final_text","=","\"\"","seen_predictions","[","final_text","]","=","True","nbest",".","append","(","_NbestPrediction","(","text","=","final_text",",","start_logit","=","pred",".","start_logit",",","end_logit","=","pred",".","end_logit",")",")","# if we didn't inlude the empty option in the n-best, inlcude it","if","FLAGS",".","version_2_with_negative",":","if","\"\"","not","in","seen_predictions",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"\"",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","# In very rare edge cases we could have no valid predictions. So we","# just create a nonce prediction in this case to avoid failure.","if","not","nbest",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"empty\"",",","start_logit","=","0.0",",","end_logit","=","0.0",")",")","assert","len","(","nbest",")",">=","1","total_scores","=","[","]","best_non_null_entry","=","None","for","entry","in","nbest",":","total_scores",".","append","(","entry",".","start_logit","+","entry",".","end_logit",")","if","not","best_non_null_entry",":","if","entry",".","text",":","best_non_null_entry","=","entry","probs","=","_compute_softmax","(","total_scores",")","nbest_json","=","[","]","for","(","i",",","entry",")","in","enumerate","(","nbest",")",":","output","=","collections",".","OrderedDict","(",")","output","[","\"text\"","]","=","entry",".","text","output","[","\"probability\"","]","=","probs","[","i","]","output","[","\"start_logit\"","]","=","entry",".","start_logit","output","[","\"end_logit\"","]","=","entry",".","end_logit","nbest_json",".","append","(","output",")","assert","len","(","nbest_json",")",">=","1","if","not","FLAGS",".","version_2_with_negative",":","all_predictions","[","example",".","qas_id","]","=","nbest_json","[","0","]","[","\"text\"","]","else",":","# predict \"\" iff the null score - the score of best non-null > threshold","score_diff","=","score_null","-","best_non_null_entry",".","start_logit","-","(","best_non_null_entry",".","end_logit",")","scores_diff_json","[","example",".","qas_id","]","=","score_diff","if","score_diff",">","FLAGS",".","null_score_diff_threshold",":","all_predictions","[","example",".","qas_id","]","=","\"\"","else",":","all_predictions","[","example",".","qas_id","]","=","best_non_null_entry",".","text","all_nbest_json","[","example",".","qas_id","]","=","nbest_json","with","tf",".","gfile",".","GFile","(","output_prediction_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_predictions",",","indent","=","4",")","+","\"\\n\"",")","with","tf",".","gfile",".","GFile","(","output_nbest_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_nbest_json",",","indent","=","4",")","+","\"\\n\"",")","if","FLAGS",".","version_2_with_negative",":","with","tf",".","gfile",".","GFile","(","output_null_log_odds_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","scores_diff_json",",","indent","=","4",")","+","\"\\n\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_squad.py#L741-L924"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_squad.py","language":"python","identifier":"get_final_text","parameters":"(pred_text, orig_text, do_lower_case)","argument_list":"","return_statement":"return output_text","docstring":"Project the tokenized prediction back to the original text.","docstring_summary":"Project the tokenized prediction back to the original text.","docstring_tokens":["Project","the","tokenized","prediction","back","to","the","original","text","."],"function":"def get_final_text(pred_text, orig_text, do_lower_case):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping\/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if FLAGS.verbose_logging:\n tf.logging.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if FLAGS.verbose_logging:\n tf.logging.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text","function_tokens":["def","get_final_text","(","pred_text",",","orig_text",",","do_lower_case",")",":","# When we created the data, we kept track of the alignment between original","# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So","# now `orig_text` contains the span of our original text corresponding to the","# span that we predicted.","#","# However, `orig_text` may contain extra characters that we don't want in","# our prediction.","#","# For example, let's say:","# pred_text = steve smith","# orig_text = Steve Smith's","#","# We don't want to return `orig_text` because it contains the extra \"'s\".","#","# We don't want to return `pred_text` because it's already been normalized","# (the SQuAD eval script also does punctuation stripping\/lower casing but","# our tokenizer does additional normalization like stripping accent","# characters).","#","# What we really want to return is \"Steve Smith\".","#","# Therefore, we have to apply a semi-complicated alignment heruistic between","# `pred_text` and `orig_text` to get a character-to-charcter alignment. This","# can fail in certain cases in which case we just return `orig_text`.","def","_strip_spaces","(","text",")",":","ns_chars","=","[","]","ns_to_s_map","=","collections",".","OrderedDict","(",")","for","(","i",",","c",")","in","enumerate","(","text",")",":","if","c","==","\" \"",":","continue","ns_to_s_map","[","len","(","ns_chars",")","]","=","i","ns_chars",".","append","(","c",")","ns_text","=","\"\"",".","join","(","ns_chars",")","return","(","ns_text",",","ns_to_s_map",")","# We first tokenize `orig_text`, strip whitespace from the result","# and `pred_text`, and check if they are the same length. If they are","# NOT the same length, the heuristic has failed. If they are the same","# length, we assume the characters are one-to-one aligned.","tokenizer","=","tokenization",".","BasicTokenizer","(","do_lower_case","=","do_lower_case",")","tok_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_text",")",")","start_position","=","tok_text",".","find","(","pred_text",")","if","start_position","==","-","1",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Unable to find text: '%s' in '%s'\"","%","(","pred_text",",","orig_text",")",")","return","orig_text","end_position","=","start_position","+","len","(","pred_text",")","-","1","(","orig_ns_text",",","orig_ns_to_s_map",")","=","_strip_spaces","(","orig_text",")","(","tok_ns_text",",","tok_ns_to_s_map",")","=","_strip_spaces","(","tok_text",")","if","len","(","orig_ns_text",")","!=","len","(","tok_ns_text",")",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Length not equal after stripping spaces: '%s' vs '%s'\"",",","orig_ns_text",",","tok_ns_text",")","return","orig_text","# We then project the characters in `pred_text` back to `orig_text` using","# the character-to-character alignment.","tok_s_to_ns_map","=","{","}","for","(","i",",","tok_index",")","in","six",".","iteritems","(","tok_ns_to_s_map",")",":","tok_s_to_ns_map","[","tok_index","]","=","i","orig_start_position","=","None","if","start_position","in","tok_s_to_ns_map",":","ns_start_position","=","tok_s_to_ns_map","[","start_position","]","if","ns_start_position","in","orig_ns_to_s_map",":","orig_start_position","=","orig_ns_to_s_map","[","ns_start_position","]","if","orig_start_position","is","None",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Couldn't map start position\"",")","return","orig_text","orig_end_position","=","None","if","end_position","in","tok_s_to_ns_map",":","ns_end_position","=","tok_s_to_ns_map","[","end_position","]","if","ns_end_position","in","orig_ns_to_s_map",":","orig_end_position","=","orig_ns_to_s_map","[","ns_end_position","]","if","orig_end_position","is","None",":","if","FLAGS",".","verbose_logging",":","tf",".","logging",".","info","(","\"Couldn't map end position\"",")","return","orig_text","output_text","=","orig_text","[","orig_start_position",":","(","orig_end_position","+","1",")","]","return","output_text"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_squad.py#L927-L1020"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_squad.py","language":"python","identifier":"_get_best_indexes","parameters":"(logits, n_best_size)","argument_list":"","return_statement":"return best_indexes","docstring":"Get the n-best logits from a list.","docstring_summary":"Get the n-best logits from a list.","docstring_tokens":["Get","the","n","-","best","logits","from","a","list","."],"function":"def _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes","function_tokens":["def","_get_best_indexes","(","logits",",","n_best_size",")",":","index_and_score","=","sorted","(","enumerate","(","logits",")",",","key","=","lambda","x",":","x","[","1","]",",","reverse","=","True",")","best_indexes","=","[","]","for","i","in","range","(","len","(","index_and_score",")",")",":","if","i",">=","n_best_size",":","break","best_indexes",".","append","(","index_and_score","[","i","]","[","0","]",")","return","best_indexes"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_squad.py#L1023-L1032"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_squad.py","language":"python","identifier":"_compute_softmax","parameters":"(scores)","argument_list":"","return_statement":"return probs","docstring":"Compute softmax probability over raw logits.","docstring_summary":"Compute softmax probability over raw logits.","docstring_tokens":["Compute","softmax","probability","over","raw","logits","."],"function":"def _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score \/ total_sum)\n return probs","function_tokens":["def","_compute_softmax","(","scores",")",":","if","not","scores",":","return","[","]","max_score","=","None","for","score","in","scores",":","if","max_score","is","None","or","score",">","max_score",":","max_score","=","score","exp_scores","=","[","]","total_sum","=","0.0","for","score","in","scores",":","x","=","math",".","exp","(","score","-","max_score",")","exp_scores",".","append","(","x",")","total_sum","+=","x","probs","=","[","]","for","score","in","exp_scores",":","probs",".","append","(","score","\/","total_sum",")","return","probs"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_squad.py#L1035-L1055"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_squad.py","language":"python","identifier":"validate_flags_or_throw","parameters":"(bert_config)","argument_list":"","return_statement":"","docstring":"Validate the input FLAGS or throw an exception.","docstring_summary":"Validate the input FLAGS or throw an exception.","docstring_tokens":["Validate","the","input","FLAGS","or","throw","an","exception","."],"function":"def validate_flags_or_throw(bert_config):\n \"\"\"Validate the input FLAGS or throw an exception.\"\"\"\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_predict` must be True.\")\n\n if FLAGS.do_train:\n if not FLAGS.train_file:\n raise ValueError(\n \"If `do_train` is True, then `train_file` must be specified.\")\n if FLAGS.do_predict:\n if not FLAGS.predict_file:\n raise ValueError(\n \"If `do_predict` is True, then `predict_file` must be specified.\")\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:\n raise ValueError(\n \"The max_seq_length (%d) must be greater than max_query_length \"\n \"(%d) + 3\" % (FLAGS.max_seq_length, FLAGS.max_query_length))","function_tokens":["def","validate_flags_or_throw","(","bert_config",")",":","tokenization",".","validate_case_matches_checkpoint","(","FLAGS",".","do_lower_case",",","FLAGS",".","init_checkpoint",")","if","not","FLAGS",".","do_train","and","not","FLAGS",".","do_predict",":","raise","ValueError","(","\"At least one of `do_train` or `do_predict` must be True.\"",")","if","FLAGS",".","do_train",":","if","not","FLAGS",".","train_file",":","raise","ValueError","(","\"If `do_train` is True, then `train_file` must be specified.\"",")","if","FLAGS",".","do_predict",":","if","not","FLAGS",".","predict_file",":","raise","ValueError","(","\"If `do_predict` is True, then `predict_file` must be specified.\"",")","if","FLAGS",".","max_seq_length",">","bert_config",".","max_position_embeddings",":","raise","ValueError","(","\"Cannot use sequence length %d because the BERT model \"","\"was only trained up to sequence length %d\"","%","(","FLAGS",".","max_seq_length",",","bert_config",".","max_position_embeddings",")",")","if","FLAGS",".","max_seq_length","<=","FLAGS",".","max_query_length","+","3",":","raise","ValueError","(","\"The max_seq_length (%d) must be greater than max_query_length \"","\"(%d) + 3\"","%","(","FLAGS",".","max_seq_length",",","FLAGS",".","max_query_length",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_squad.py#L1097-L1123"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_squad.py","language":"python","identifier":"FeatureWriter.process_feature","parameters":"(self, feature)","argument_list":"","return_statement":"","docstring":"Write a InputFeature to the TFRecordWriter as a tf.train.Example.","docstring_summary":"Write a InputFeature to the TFRecordWriter as a tf.train.Example.","docstring_tokens":["Write","a","InputFeature","to","the","TFRecordWriter","as","a","tf",".","train",".","Example","."],"function":"def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())","function_tokens":["def","process_feature","(","self",",","feature",")",":","self",".","num_features","+=","1","def","create_int_feature","(","values",")",":","feature","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","feature","features","=","collections",".","OrderedDict","(",")","features","[","\"unique_ids\"","]","=","create_int_feature","(","[","feature",".","unique_id","]",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","if","self",".","is_training",":","features","[","\"start_positions\"","]","=","create_int_feature","(","[","feature",".","start_position","]",")","features","[","\"end_positions\"","]","=","create_int_feature","(","[","feature",".","end_position","]",")","impossible","=","0","if","feature",".","is_impossible",":","impossible","=","1","features","[","\"is_impossible\"","]","=","create_int_feature","(","[","impossible","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","self",".","_writer",".","write","(","tf_example",".","SerializeToString","(",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_squad.py#L1067-L1091"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"validate_case_matches_checkpoint","parameters":"(do_lower_case, init_checkpoint)","argument_list":"","return_statement":"","docstring":"Checks whether the casing config is consistent with the checkpoint name.","docstring_summary":"Checks whether the casing config is consistent with the checkpoint name.","docstring_tokens":["Checks","whether","the","casing","config","is","consistent","with","the","checkpoint","name","."],"function":"def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):\n \"\"\"Checks whether the casing config is consistent with the checkpoint name.\"\"\"\n\n # The casing has to be passed in by the user and there is no explicit check\n # as to whether it matches the checkpoint. The casing information probably\n # should have been stored in the bert_config.json file, but it's not, so\n # we have to heuristically detect it to validate.\n\n if not init_checkpoint:\n return\n\n m = re.match(\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\", init_checkpoint)\n if m is None:\n return\n\n model_name = m.group(1)\n\n lower_models = [\n \"uncased_L-24_H-1024_A-16\", \"uncased_L-12_H-768_A-12\",\n \"multilingual_L-12_H-768_A-12\", \"chinese_L-12_H-768_A-12\"\n ]\n\n cased_models = [\n \"cased_L-12_H-768_A-12\", \"cased_L-24_H-1024_A-16\",\n \"multi_cased_L-12_H-768_A-12\"\n ]\n\n is_bad_config = False\n if model_name in lower_models and not do_lower_case:\n is_bad_config = True\n actual_flag = \"False\"\n case_name = \"lowercased\"\n opposite_flag = \"True\"\n\n if model_name in cased_models and do_lower_case:\n is_bad_config = True\n actual_flag = \"True\"\n case_name = \"cased\"\n opposite_flag = \"False\"\n\n if is_bad_config:\n raise ValueError(\n \"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"\n \"However, `%s` seems to be a %s model, so you \"\n \"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"\n \"how the model was pre-training. If this error is wrong, please \"\n \"just comment out this check.\" % (actual_flag, init_checkpoint,\n model_name, case_name, opposite_flag))","function_tokens":["def","validate_case_matches_checkpoint","(","do_lower_case",",","init_checkpoint",")",":","# The casing has to be passed in by the user and there is no explicit check","# as to whether it matches the checkpoint. The casing information probably","# should have been stored in the bert_config.json file, but it's not, so","# we have to heuristically detect it to validate.","if","not","init_checkpoint",":","return","m","=","re",".","match","(","\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\"",",","init_checkpoint",")","if","m","is","None",":","return","model_name","=","m",".","group","(","1",")","lower_models","=","[","\"uncased_L-24_H-1024_A-16\"",",","\"uncased_L-12_H-768_A-12\"",",","\"multilingual_L-12_H-768_A-12\"",",","\"chinese_L-12_H-768_A-12\"","]","cased_models","=","[","\"cased_L-12_H-768_A-12\"",",","\"cased_L-24_H-1024_A-16\"",",","\"multi_cased_L-12_H-768_A-12\"","]","is_bad_config","=","False","if","model_name","in","lower_models","and","not","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"False\"","case_name","=","\"lowercased\"","opposite_flag","=","\"True\"","if","model_name","in","cased_models","and","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"True\"","case_name","=","\"cased\"","opposite_flag","=","\"False\"","if","is_bad_config",":","raise","ValueError","(","\"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"","\"However, `%s` seems to be a %s model, so you \"","\"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"","\"how the model was pre-training. If this error is wrong, please \"","\"just comment out this check.\"","%","(","actual_flag",",","init_checkpoint",",","model_name",",","case_name",",","opposite_flag",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L28-L75"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"convert_to_unicode","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_summary":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_tokens":["Converts","text","to","Unicode","(","if","it","s","not","already",")","assuming","utf","-","8","input","."],"function":"def convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","convert_to_unicode","(","text",")",":","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","elif","isinstance","(","text",",","unicode",")",":","return","text","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L78-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"printable_text","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_summary":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_tokens":["Returns","text","encoded","in","a","way","suitable","for","print","or","tf",".","logging","."],"function":"def printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","printable_text","(","text",")",":","# These functions want `str` for both Python2 and Python3, but in one case","# it's a Unicode string and in the other it's a byte string.","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","unicode",")",":","return","text",".","encode","(","\"utf-8\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L98-L118"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"load_vocab","parameters":"(vocab_file)","argument_list":"","return_statement":"return vocab","docstring":"Loads a vocabulary file into a dictionary.","docstring_summary":"Loads a vocabulary file into a dictionary.","docstring_tokens":["Loads","a","vocabulary","file","into","a","dictionary","."],"function":"def load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab","function_tokens":["def","load_vocab","(","vocab_file",")",":","vocab","=","collections",".","OrderedDict","(",")","index","=","0","with","tf",".","gfile",".","GFile","(","vocab_file",",","\"r\"",")","as","reader",":","while","True",":","token","=","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","token",":","break","token","=","token",".","strip","(",")","vocab","[","token","]","=","index","index","+=","1","return","vocab"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L121-L133"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"convert_by_vocab","parameters":"(vocab, items)","argument_list":"","return_statement":"return output","docstring":"Converts a sequence of [tokens|ids] using the vocab.","docstring_summary":"Converts a sequence of [tokens|ids] using the vocab.","docstring_tokens":["Converts","a","sequence","of","[","tokens|ids","]","using","the","vocab","."],"function":"def convert_by_vocab(vocab, items):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n output = []\n for item in items:\n output.append(vocab[item])\n return output","function_tokens":["def","convert_by_vocab","(","vocab",",","items",")",":","output","=","[","]","for","item","in","items",":","output",".","append","(","vocab","[","item","]",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L136-L141"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"whitespace_tokenize","parameters":"(text)","argument_list":"","return_statement":"return tokens","docstring":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_summary":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_tokens":["Runs","basic","whitespace","cleaning","and","splitting","on","a","piece","of","text","."],"function":"def whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens","function_tokens":["def","whitespace_tokenize","(","text",")",":","text","=","text",".","strip","(",")","if","not","text",":","return","[","]","tokens","=","text",".","split","(",")","return","tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L152-L158"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"_is_whitespace","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a whitespace character.","docstring_summary":"Checks whether `chars` is a whitespace character.","docstring_tokens":["Checks","whether","chars","is","a","whitespace","character","."],"function":"def _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False","function_tokens":["def","_is_whitespace","(","char",")",":","# \\t, \\n, and \\r are technically contorl characters but we treat them","# as whitespace since they are generally considered as such.","if","char","==","\" \"","or","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Zs\"",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L362-L371"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"_is_control","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a control character.","docstring_summary":"Checks whether `chars` is a control character.","docstring_tokens":["Checks","whether","chars","is","a","control","character","."],"function":"def _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False","function_tokens":["def","_is_control","(","char",")",":","# These are technically control characters but we count them as whitespace","# characters.","if","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","False","cat","=","unicodedata",".","category","(","char",")","if","cat","in","(","\"Cc\"",",","\"Cf\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L374-L383"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"_is_punctuation","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a punctuation character.","docstring_summary":"Checks whether `chars` is a punctuation character.","docstring_tokens":["Checks","whether","chars","is","a","punctuation","character","."],"function":"def _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter\/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False","function_tokens":["def","_is_punctuation","(","char",")",":","cp","=","ord","(","char",")","# We treat all non-letter\/number ASCII as punctuation.","# Characters such as \"^\", \"$\", and \"`\" are not in the Unicode","# Punctuation class but we treat them as punctuation anyways, for","# consistency.","if","(","(","cp",">=","33","and","cp","<=","47",")","or","(","cp",">=","58","and","cp","<=","64",")","or","(","cp",">=","91","and","cp","<=","96",")","or","(","cp",">=","123","and","cp","<=","126",")",")",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat",".","startswith","(","\"P\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L386-L399"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"BasicTokenizer.__init__","parameters":"(self, do_lower_case=True)","argument_list":"","return_statement":"","docstring":"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.","docstring_summary":"Constructs a BasicTokenizer.","docstring_tokens":["Constructs","a","BasicTokenizer","."],"function":"def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case","function_tokens":["def","__init__","(","self",",","do_lower_case","=","True",")",":","self",".","do_lower_case","=","do_lower_case"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L188-L194"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"BasicTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text.","docstring_summary":"Tokenizes a piece of text.","docstring_tokens":["Tokenizes","a","piece","of","text","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","text","=","self",".","_clean_text","(","text",")","# This was added on November 1st, 2018 for the multilingual and Chinese","# models. This is also applied to the English models now, but it doesn't","# matter since the English models were not trained on any Chinese data","# and generally don't have any Chinese data in them (there are Chinese","# characters in the vocabulary because Wikipedia does have some Chinese","# words in the English Wikipedia.).","text","=","self",".","_tokenize_chinese_chars","(","text",")","orig_tokens","=","whitespace_tokenize","(","text",")","split_tokens","=","[","]","for","token","in","orig_tokens",":","if","self",".","do_lower_case",":","token","=","token",".","lower","(",")","token","=","self",".","_run_strip_accents","(","token",")","split_tokens",".","extend","(","self",".","_run_split_on_punc","(","token",")",")","output_tokens","=","whitespace_tokenize","(","\" \"",".","join","(","split_tokens",")",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L196-L218"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_strip_accents","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Strips accents from a piece of text.","docstring_summary":"Strips accents from a piece of text.","docstring_tokens":["Strips","accents","from","a","piece","of","text","."],"function":"def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_run_strip_accents","(","self",",","text",")",":","text","=","unicodedata",".","normalize","(","\"NFD\"",",","text",")","output","=","[","]","for","char","in","text",":","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Mn\"",":","continue","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L220-L229"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_split_on_punc","parameters":"(self, text)","argument_list":"","return_statement":"return [\"\".join(x) for x in output]","docstring":"Splits punctuation on a piece of text.","docstring_summary":"Splits punctuation on a piece of text.","docstring_tokens":["Splits","punctuation","on","a","piece","of","text","."],"function":"def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]","function_tokens":["def","_run_split_on_punc","(","self",",","text",")",":","chars","=","list","(","text",")","i","=","0","start_new_word","=","True","output","=","[","]","while","i","<","len","(","chars",")",":","char","=","chars","[","i","]","if","_is_punctuation","(","char",")",":","output",".","append","(","[","char","]",")","start_new_word","=","True","else",":","if","start_new_word",":","output",".","append","(","[","]",")","start_new_word","=","False","output","[","-","1","]",".","append","(","char",")","i","+=","1","return","[","\"\"",".","join","(","x",")","for","x","in","output","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L231-L249"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"BasicTokenizer._tokenize_chinese_chars","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Adds whitespace around any CJK character.","docstring_summary":"Adds whitespace around any CJK character.","docstring_tokens":["Adds","whitespace","around","any","CJK","character","."],"function":"def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_tokenize_chinese_chars","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","self",".","_is_chinese_char","(","cp",")",":","output",".","append","(","\" \"",")","output",".","append","(","char",")","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L251-L262"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"BasicTokenizer._is_chinese_char","parameters":"(self, cp)","argument_list":"","return_statement":"return False","docstring":"Checks whether CP is the codepoint of a CJK character.","docstring_summary":"Checks whether CP is the codepoint of a CJK character.","docstring_tokens":["Checks","whether","CP","is","the","codepoint","of","a","CJK","character","."],"function":"def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False","function_tokens":["def","_is_chinese_char","(","self",",","cp",")",":","# This defines a \"chinese character\" as anything in the CJK Unicode block:","# https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)","#","# Note that the CJK Unicode block is NOT all Japanese and Korean characters,","# despite its name. The modern Korean Hangul alphabet is a different block,","# as is Japanese Hiragana and Katakana. Those alphabets are used to write","# space-separated words, so they are not treated specially and handled","# like the all of the other languages.","if","(","(","cp",">=","0x4E00","and","cp","<=","0x9FFF",")","or","#","(","cp",">=","0x3400","and","cp","<=","0x4DBF",")","or","#","(","cp",">=","0x20000","and","cp","<=","0x2A6DF",")","or","#","(","cp",">=","0x2A700","and","cp","<=","0x2B73F",")","or","#","(","cp",">=","0x2B740","and","cp","<=","0x2B81F",")","or","#","(","cp",">=","0x2B820","and","cp","<=","0x2CEAF",")","or","(","cp",">=","0xF900","and","cp","<=","0xFAFF",")","or","#","(","cp",">=","0x2F800","and","cp","<=","0x2FA1F",")",")",":","#","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L264-L284"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"BasicTokenizer._clean_text","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Performs invalid character removal and whitespace cleanup on text.","docstring_summary":"Performs invalid character removal and whitespace cleanup on text.","docstring_tokens":["Performs","invalid","character","removal","and","whitespace","cleanup","on","text","."],"function":"def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_clean_text","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","cp","==","0","or","cp","==","0xfffd","or","_is_control","(","char",")",":","continue","if","_is_whitespace","(","char",")",":","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L286-L297"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tokenization.py","language":"python","identifier":"WordpieceTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.","docstring_summary":"Tokenizes a piece of text into its word pieces.","docstring_tokens":["Tokenizes","a","piece","of","text","into","its","word","pieces","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","output_tokens","=","[","]","for","token","in","whitespace_tokenize","(","text",")",":","chars","=","list","(","token",")","if","len","(","chars",")",">","self",".","max_input_chars_per_word",":","output_tokens",".","append","(","self",".","unk_token",")","continue","is_bad","=","False","start","=","0","sub_tokens","=","[","]","while","start","<","len","(","chars",")",":","end","=","len","(","chars",")","cur_substr","=","None","while","start","<","end",":","substr","=","\"\"",".","join","(","chars","[","start",":","end","]",")","if","start",">","0",":","substr","=","\"##\"","+","substr","if","substr","in","self",".","vocab",":","cur_substr","=","substr","break","end","-=","1","if","cur_substr","is","None",":","is_bad","=","True","break","sub_tokens",".","append","(","cur_substr",")","start","=","end","if","is_bad",":","output_tokens",".","append","(","self",".","unk_token",")","else",":","output_tokens",".","extend","(","sub_tokens",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tokenization.py#L308-L359"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tf_metrics.py","language":"python","identifier":"precision","parameters":"(labels, predictions, num_classes, pos_indices=None,\n weights=None, average='micro')","argument_list":"","return_statement":"return (pr, op)","docstring":"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","precision","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def precision(labels, predictions, num_classes, pos_indices=None,\n weights=None, average='micro'):\n \"\"\"Multi-class precision metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n pr, _, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n op, _, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (pr, op)","function_tokens":["def","precision","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","pr",",","_",",","_","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",")","op",",","_",",","_","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",")","return","(","pr",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tf_metrics.py#L15-L50"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tf_metrics.py","language":"python","identifier":"recall","parameters":"(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro')","argument_list":"","return_statement":"return (re, op)","docstring":"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","recall","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def recall(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro'):\n \"\"\"Multi-class recall metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, re, _ = metrics_from_confusion_matrix(\n cm, pos_indices, average=average)\n _, op, _ = metrics_from_confusion_matrix(\n op, pos_indices, average=average)\n return (re, op)","function_tokens":["def","recall","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","_",",","re",",","_","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",")","_",",","op",",","_","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",")","return","(","re",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tf_metrics.py#L53-L88"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tf_metrics.py","language":"python","identifier":"fbeta","parameters":"(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro', beta=1)","argument_list":"","return_statement":"return (fbeta, op)","docstring":"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_summary":"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)","docstring_tokens":["Multi","-","class","fbeta","metric","for","Tensorflow","Parameters","----------","labels",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","true","labels","predictions",":","Tensor","of","tf",".","int32","or","tf",".","int64","The","predictions","same","shape","as","labels","num_classes",":","int","The","number","of","classes","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","default","is","all","weights",":","Tensor","of","tf",".","int32","optional","Mask","must","be","of","compatible","shape","with","labels","average",":","str","optional","micro",":","counts","the","total","number","of","true","positives","false","positives","and","false","negatives","for","the","classes","in","pos_indices","and","infer","the","metric","from","it",".","macro",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","average",".","Will","not","account","for","class","imbalance",".","weighted",":","will","compute","the","metric","separately","for","each","class","in","pos_indices","and","perform","a","weighted","average","by","the","total","number","of","true","labels","for","each","class",".","beta",":","int","optional","Weight","of","precision","in","harmonic","mean","Returns","-------","tuple","of","(","scalar","float","Tensor","update_op",")"],"function":"def fbeta(labels, predictions, num_classes, pos_indices=None, weights=None,\n average='micro', beta=1):\n \"\"\"Multi-class fbeta metric for Tensorflow\n Parameters\n ----------\n labels : Tensor of tf.int32 or tf.int64\n The true labels\n predictions : Tensor of tf.int32 or tf.int64\n The predictions, same shape as labels\n num_classes : int\n The number of classes\n pos_indices : list of int, optional\n The indices of the positive classes, default is all\n weights : Tensor of tf.int32, optional\n Mask, must be of compatible shape with labels\n average : str, optional\n 'micro': counts the total number of true positives, false\n positives, and false negatives for the classes in\n `pos_indices` and infer the metric from it.\n 'macro': will compute the metric separately for each class in\n `pos_indices` and average. Will not account for class\n imbalance.\n 'weighted': will compute the metric separately for each class in\n `pos_indices` and perform a weighted average by the total\n number of true labels for each class.\n beta : int, optional\n Weight of precision in harmonic mean\n Returns\n -------\n tuple of (scalar float Tensor, update_op)\n \"\"\"\n cm, op = _streaming_confusion_matrix(\n labels, predictions, num_classes, weights)\n _, _, fbeta = metrics_from_confusion_matrix(\n cm, pos_indices, average=average, beta=beta)\n _, _, op = metrics_from_confusion_matrix(\n op, pos_indices, average=average, beta=beta)\n return (fbeta, op)","function_tokens":["def","fbeta","(","labels",",","predictions",",","num_classes",",","pos_indices","=","None",",","weights","=","None",",","average","=","'micro'",",","beta","=","1",")",":","cm",",","op","=","_streaming_confusion_matrix","(","labels",",","predictions",",","num_classes",",","weights",")","_",",","_",",","fbeta","=","metrics_from_confusion_matrix","(","cm",",","pos_indices",",","average","=","average",",","beta","=","beta",")","_",",","_",",","op","=","metrics_from_confusion_matrix","(","op",",","pos_indices",",","average","=","average",",","beta","=","beta",")","return","(","fbeta",",","op",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tf_metrics.py#L97-L134"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tf_metrics.py","language":"python","identifier":"safe_div","parameters":"(numerator, denominator)","argument_list":"","return_statement":"return tf.where(denominator_is_zero, zeros, numerator \/ denominator)","docstring":"Safe division, return 0 if denominator is 0","docstring_summary":"Safe division, return 0 if denominator is 0","docstring_tokens":["Safe","division","return","0","if","denominator","is","0"],"function":"def safe_div(numerator, denominator):\n \"\"\"Safe division, return 0 if denominator is 0\"\"\"\n numerator, denominator = tf.to_float(numerator), tf.to_float(denominator)\n zeros = tf.zeros_like(numerator, dtype=numerator.dtype)\n denominator_is_zero = tf.equal(denominator, zeros)\n return tf.where(denominator_is_zero, zeros, numerator \/ denominator)","function_tokens":["def","safe_div","(","numerator",",","denominator",")",":","numerator",",","denominator","=","tf",".","to_float","(","numerator",")",",","tf",".","to_float","(","denominator",")","zeros","=","tf",".","zeros_like","(","numerator",",","dtype","=","numerator",".","dtype",")","denominator_is_zero","=","tf",".","equal","(","denominator",",","zeros",")","return","tf",".","where","(","denominator_is_zero",",","zeros",",","numerator","\/","denominator",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tf_metrics.py#L137-L142"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tf_metrics.py","language":"python","identifier":"pr_re_fbeta","parameters":"(cm, pos_indices, beta=1)","argument_list":"","return_statement":"return pr, re, fbeta","docstring":"Uses a confusion matrix to compute precision, recall and fbeta","docstring_summary":"Uses a confusion matrix to compute precision, recall and fbeta","docstring_tokens":["Uses","a","confusion","matrix","to","compute","precision","recall","and","fbeta"],"function":"def pr_re_fbeta(cm, pos_indices, beta=1):\n \"\"\"Uses a confusion matrix to compute precision, recall and fbeta\"\"\"\n num_classes = cm.shape[0]\n neg_indices = [i for i in range(num_classes) if i not in pos_indices]\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, neg_indices] = 0\n diag_sum = tf.reduce_sum(tf.diag_part(cm * cm_mask))\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[:, neg_indices] = 0\n tot_pred = tf.reduce_sum(cm * cm_mask)\n\n cm_mask = np.ones([num_classes, num_classes])\n cm_mask[neg_indices, :] = 0\n tot_gold = tf.reduce_sum(cm * cm_mask)\n\n pr = safe_div(diag_sum, tot_pred)\n re = safe_div(diag_sum, tot_gold)\n fbeta = safe_div((1. + beta**2) * pr * re, beta**2 * pr + re)\n\n return pr, re, fbeta","function_tokens":["def","pr_re_fbeta","(","cm",",","pos_indices",",","beta","=","1",")",":","num_classes","=","cm",".","shape","[","0","]","neg_indices","=","[","i","for","i","in","range","(","num_classes",")","if","i","not","in","pos_indices","]","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[","neg_indices",",","neg_indices","]","=","0","diag_sum","=","tf",".","reduce_sum","(","tf",".","diag_part","(","cm","*","cm_mask",")",")","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[",":",",","neg_indices","]","=","0","tot_pred","=","tf",".","reduce_sum","(","cm","*","cm_mask",")","cm_mask","=","np",".","ones","(","[","num_classes",",","num_classes","]",")","cm_mask","[","neg_indices",",",":","]","=","0","tot_gold","=","tf",".","reduce_sum","(","cm","*","cm_mask",")","pr","=","safe_div","(","diag_sum",",","tot_pred",")","re","=","safe_div","(","diag_sum",",","tot_gold",")","fbeta","=","safe_div","(","(","1.","+","beta","**","2",")","*","pr","*","re",",","beta","**","2","*","pr","+","re",")","return","pr",",","re",",","fbeta"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tf_metrics.py#L145-L165"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/tf_metrics.py","language":"python","identifier":"metrics_from_confusion_matrix","parameters":"(cm, pos_indices=None, average='micro',\n beta=1)","argument_list":"","return_statement":"","docstring":"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'","docstring_summary":"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'","docstring_tokens":["Precision","Recall","and","F1","from","the","confusion","matrix","Parameters","----------","cm",":","tf",".","Tensor","of","type","tf",".","int32","of","shape","(","num_classes","num_classes",")","The","streaming","confusion","matrix",".","pos_indices",":","list","of","int","optional","The","indices","of","the","positive","classes","beta",":","int","optional","Weight","of","precision","in","harmonic","mean","average",":","str","optional","micro","macro","or","weighted"],"function":"def metrics_from_confusion_matrix(cm, pos_indices=None, average='micro',\n beta=1):\n \"\"\"Precision, Recall and F1 from the confusion matrix\n Parameters\n ----------\n cm : tf.Tensor of type tf.int32, of shape (num_classes, num_classes)\n The streaming confusion matrix.\n pos_indices : list of int, optional\n The indices of the positive classes\n beta : int, optional\n Weight of precision in harmonic mean\n average : str, optional\n 'micro', 'macro' or 'weighted'\n \"\"\"\n num_classes = cm.shape[0]\n if pos_indices is None:\n pos_indices = [i for i in range(num_classes)]\n\n if average == 'micro':\n return pr_re_fbeta(cm, pos_indices, beta)\n elif average in {'macro', 'weighted'}:\n precisions, recalls, fbetas, n_golds = [], [], [], []\n for idx in pos_indices:\n pr, re, fbeta = pr_re_fbeta(cm, [idx], beta)\n precisions.append(pr)\n recalls.append(re)\n fbetas.append(fbeta)\n cm_mask = np.zeros([num_classes, num_classes])\n cm_mask[idx, :] = 1\n n_golds.append(tf.to_float(tf.reduce_sum(cm * cm_mask)))\n\n if average == 'macro':\n pr = tf.reduce_mean(precisions)\n re = tf.reduce_mean(recalls)\n fbeta = tf.reduce_mean(fbetas)\n return pr, re, fbeta\n if average == 'weighted':\n n_gold = tf.reduce_sum(n_golds)\n pr_sum = sum(p * n for p, n in zip(precisions, n_golds))\n pr = safe_div(pr_sum, n_gold)\n re_sum = sum(r * n for r, n in zip(recalls, n_golds))\n re = safe_div(re_sum, n_gold)\n fbeta_sum = sum(f * n for f, n in zip(fbetas, n_golds))\n fbeta = safe_div(fbeta_sum, n_gold)\n return pr, re, fbeta\n\n else:\n raise NotImplementedError()","function_tokens":["def","metrics_from_confusion_matrix","(","cm",",","pos_indices","=","None",",","average","=","'micro'",",","beta","=","1",")",":","num_classes","=","cm",".","shape","[","0","]","if","pos_indices","is","None",":","pos_indices","=","[","i","for","i","in","range","(","num_classes",")","]","if","average","==","'micro'",":","return","pr_re_fbeta","(","cm",",","pos_indices",",","beta",")","elif","average","in","{","'macro'",",","'weighted'","}",":","precisions",",","recalls",",","fbetas",",","n_golds","=","[","]",",","[","]",",","[","]",",","[","]","for","idx","in","pos_indices",":","pr",",","re",",","fbeta","=","pr_re_fbeta","(","cm",",","[","idx","]",",","beta",")","precisions",".","append","(","pr",")","recalls",".","append","(","re",")","fbetas",".","append","(","fbeta",")","cm_mask","=","np",".","zeros","(","[","num_classes",",","num_classes","]",")","cm_mask","[","idx",",",":","]","=","1","n_golds",".","append","(","tf",".","to_float","(","tf",".","reduce_sum","(","cm","*","cm_mask",")",")",")","if","average","==","'macro'",":","pr","=","tf",".","reduce_mean","(","precisions",")","re","=","tf",".","reduce_mean","(","recalls",")","fbeta","=","tf",".","reduce_mean","(","fbetas",")","return","pr",",","re",",","fbeta","if","average","==","'weighted'",":","n_gold","=","tf",".","reduce_sum","(","n_golds",")","pr_sum","=","sum","(","p","*","n","for","p",",","n","in","zip","(","precisions",",","n_golds",")",")","pr","=","safe_div","(","pr_sum",",","n_gold",")","re_sum","=","sum","(","r","*","n","for","r",",","n","in","zip","(","recalls",",","n_golds",")",")","re","=","safe_div","(","re_sum",",","n_gold",")","fbeta_sum","=","sum","(","f","*","n","for","f",",","n","in","zip","(","fbetas",",","n_golds",")",")","fbeta","=","safe_div","(","fbeta_sum",",","n_gold",")","return","pr",",","re",",","fbeta","else",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/tf_metrics.py#L168-L215"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"gelu","parameters":"(x)","argument_list":"","return_statement":"return x * cdf","docstring":"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.","docstring_summary":"Gaussian Error Linear Unit.","docstring_tokens":["Gaussian","Error","Linear","Unit","."],"function":"def gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.tanh(\n (np.sqrt(2 \/ np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf","function_tokens":["def","gelu","(","x",")",":","cdf","=","0.5","*","(","1.0","+","tf",".","tanh","(","(","np",".","sqrt","(","2","\/","np",".","pi",")","*","(","x","+","0.044715","*","tf",".","pow","(","x",",","3",")",")",")",")",")","return","x","*","cdf"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L264-L277"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"get_activation","parameters":"(activation_string)","argument_list":"","return_statement":"","docstring":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.","docstring_summary":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.","docstring_tokens":["Maps","a","string","to","a","Python","function","e",".","g",".","relu","=",">","tf",".","nn",".","relu","."],"function":"def get_activation(activation_string):\n \"\"\"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.\n \"\"\"\n\n # We assume that anything that\"s not a string is already an activation\n # function, so we just return it.\n if not isinstance(activation_string, six.string_types):\n return activation_string\n\n if not activation_string:\n return None\n\n act = activation_string.lower()\n if act == \"linear\":\n return None\n elif act == \"relu\":\n return tf.nn.relu\n elif act == \"gelu\":\n return gelu\n elif act == \"tanh\":\n return tf.tanh\n else:\n raise ValueError(\"Unsupported activation: %s\" % act)","function_tokens":["def","get_activation","(","activation_string",")",":","# We assume that anything that\"s not a string is already an activation","# function, so we just return it.","if","not","isinstance","(","activation_string",",","six",".","string_types",")",":","return","activation_string","if","not","activation_string",":","return","None","act","=","activation_string",".","lower","(",")","if","act","==","\"linear\"",":","return","None","elif","act","==","\"relu\"",":","return","tf",".","nn",".","relu","elif","act","==","\"gelu\"",":","return","gelu","elif","act","==","\"tanh\"",":","return","tf",".","tanh","else",":","raise","ValueError","(","\"Unsupported activation: %s\"","%","act",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L280-L314"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"get_assignment_map_from_checkpoint","parameters":"(tvars, init_checkpoint)","argument_list":"","return_statement":"return (assignment_map, initialized_variable_names)","docstring":"Compute the union of the current variables and checkpoint variables.","docstring_summary":"Compute the union of the current variables and checkpoint variables.","docstring_tokens":["Compute","the","union","of","the","current","variables","and","checkpoint","variables","."],"function":"def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)","function_tokens":["def","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")",":","assignment_map","=","{","}","initialized_variable_names","=","{","}","name_to_variable","=","collections",".","OrderedDict","(",")","for","var","in","tvars",":","name","=","var",".","name","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","name",")","if","m","is","not","None",":","name","=","m",".","group","(","1",")","name_to_variable","[","name","]","=","var","init_vars","=","tf",".","train",".","list_variables","(","init_checkpoint",")","assignment_map","=","collections",".","OrderedDict","(",")","for","x","in","init_vars",":","(","name",",","var",")","=","(","x","[","0","]",",","x","[","1","]",")","if","name","not","in","name_to_variable",":","continue","assignment_map","[","name","]","=","name","initialized_variable_names","[","name","]","=","1","initialized_variable_names","[","name","+","\":0\"","]","=","1","return","(","assignment_map",",","initialized_variable_names",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L317-L341"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"dropout","parameters":"(input_tensor, dropout_prob)","argument_list":"","return_statement":"return output","docstring":"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.","docstring_summary":"Perform dropout.","docstring_tokens":["Perform","dropout","."],"function":"def dropout(input_tensor, dropout_prob):\n \"\"\"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.\n \"\"\"\n if dropout_prob is None or dropout_prob == 0.0:\n return input_tensor\n\n output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)\n return output","function_tokens":["def","dropout","(","input_tensor",",","dropout_prob",")",":","if","dropout_prob","is","None","or","dropout_prob","==","0.0",":","return","input_tensor","output","=","tf",".","nn",".","dropout","(","input_tensor",",","1.0","-","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L344-L359"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"layer_norm","parameters":"(input_tensor, name=None)","argument_list":"","return_statement":"return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","docstring":"Run layer normalization on the last dimension of the tensor.","docstring_summary":"Run layer normalization on the last dimension of the tensor.","docstring_tokens":["Run","layer","normalization","on","the","last","dimension","of","the","tensor","."],"function":"def layer_norm(input_tensor, name=None):\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\n return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","function_tokens":["def","layer_norm","(","input_tensor",",","name","=","None",")",":","return","tf",".","contrib",".","layers",".","layer_norm","(","inputs","=","input_tensor",",","begin_norm_axis","=","-","1",",","begin_params_axis","=","-","1",",","scope","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L362-L365"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"layer_norm_and_dropout","parameters":"(input_tensor, dropout_prob, name=None)","argument_list":"","return_statement":"return output_tensor","docstring":"Runs layer normalization followed by dropout.","docstring_summary":"Runs layer normalization followed by dropout.","docstring_tokens":["Runs","layer","normalization","followed","by","dropout","."],"function":"def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):\n \"\"\"Runs layer normalization followed by dropout.\"\"\"\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob)\n return output_tensor","function_tokens":["def","layer_norm_and_dropout","(","input_tensor",",","dropout_prob",",","name","=","None",")",":","output_tensor","=","layer_norm","(","input_tensor",",","name",")","output_tensor","=","dropout","(","output_tensor",",","dropout_prob",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L368-L372"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"create_initializer","parameters":"(initializer_range=0.02)","argument_list":"","return_statement":"return tf.truncated_normal_initializer(stddev=initializer_range)","docstring":"Creates a `truncated_normal_initializer` with the given range.","docstring_summary":"Creates a `truncated_normal_initializer` with the given range.","docstring_tokens":["Creates","a","truncated_normal_initializer","with","the","given","range","."],"function":"def create_initializer(initializer_range=0.02):\n \"\"\"Creates a `truncated_normal_initializer` with the given range.\"\"\"\n return tf.truncated_normal_initializer(stddev=initializer_range)","function_tokens":["def","create_initializer","(","initializer_range","=","0.02",")",":","return","tf",".","truncated_normal_initializer","(","stddev","=","initializer_range",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L375-L377"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"embedding_lookup","parameters":"(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False)","argument_list":"","return_statement":"return (output, embedding_table)","docstring":"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].","docstring_summary":"Looks up words embeddings for id tensor.","docstring_tokens":["Looks","up","words","embeddings","for","id","tensor","."],"function":"def embedding_lookup(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False):\n \"\"\"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].\n \"\"\"\n # This function assumes that the input is of shape [batch_size, seq_length,\n # num_inputs].\n #\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\n # reshape to [batch_size, seq_length, 1].\n if input_ids.shape.ndims == 2:\n input_ids = tf.expand_dims(input_ids, axis=[-1])\n\n embedding_table = tf.get_variable(\n name=word_embedding_name,\n shape=[vocab_size, embedding_size],\n initializer=create_initializer(initializer_range))\n\n flat_input_ids = tf.reshape(input_ids, [-1])\n if use_one_hot_embeddings:\n one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)\n output = tf.matmul(one_hot_input_ids, embedding_table)\n else:\n output = tf.gather(embedding_table, flat_input_ids)\n\n input_shape = get_shape_list(input_ids)\n\n output = tf.reshape(output,\n input_shape[0:-1] + [input_shape[-1] * embedding_size])\n return (output, embedding_table)","function_tokens":["def","embedding_lookup","(","input_ids",",","vocab_size",",","embedding_size","=","128",",","initializer_range","=","0.02",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","False",")",":","# This function assumes that the input is of shape [batch_size, seq_length,","# num_inputs].","#","# If the input is a 2D tensor of shape [batch_size, seq_length], we","# reshape to [batch_size, seq_length, 1].","if","input_ids",".","shape",".","ndims","==","2",":","input_ids","=","tf",".","expand_dims","(","input_ids",",","axis","=","[","-","1","]",")","embedding_table","=","tf",".","get_variable","(","name","=","word_embedding_name",",","shape","=","[","vocab_size",",","embedding_size","]",",","initializer","=","create_initializer","(","initializer_range",")",")","flat_input_ids","=","tf",".","reshape","(","input_ids",",","[","-","1","]",")","if","use_one_hot_embeddings",":","one_hot_input_ids","=","tf",".","one_hot","(","flat_input_ids",",","depth","=","vocab_size",")","output","=","tf",".","matmul","(","one_hot_input_ids",",","embedding_table",")","else",":","output","=","tf",".","gather","(","embedding_table",",","flat_input_ids",")","input_shape","=","get_shape_list","(","input_ids",")","output","=","tf",".","reshape","(","output",",","input_shape","[","0",":","-","1","]","+","[","input_shape","[","-","1","]","*","embedding_size","]",")","return","(","output",",","embedding_table",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L380-L425"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"embedding_postprocessor","parameters":"(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1)","argument_list":"","return_statement":"return output","docstring":"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.","docstring_summary":"Performs various post-processing on a word embedding tensor.","docstring_tokens":["Performs","various","post","-","processing","on","a","word","embedding","tensor","."],"function":"def embedding_postprocessor(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1):\n \"\"\"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.\n \"\"\"\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = input_tensor\n\n if use_token_type:\n if token_type_ids is None:\n raise ValueError(\"`token_type_ids` must be specified if\"\n \"`use_token_type` is True.\")\n token_type_table = tf.get_variable(\n name=token_type_embedding_name,\n shape=[token_type_vocab_size, width],\n initializer=create_initializer(initializer_range))\n # This vocab will be small so we always do one-hot here, since it is always\n # faster for a small vocabulary.\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)\n token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if use_position_embeddings:\n assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)\n with tf.control_dependencies([assert_op]):\n full_position_embeddings = tf.get_variable(\n name=position_embedding_name,\n shape=[max_position_embeddings, width],\n initializer=create_initializer(initializer_range))\n # Since the position embedding table is a learned variable, we create it\n # using a (long) sequence length `max_position_embeddings`. The actual\n # sequence length might be shorter than this, for faster training of\n # tasks that do not have long sequences.\n #\n # So `full_position_embeddings` is effectively an embedding table\n # for position [0, 1, 2, ..., max_position_embeddings-1], and the current\n # sequence has positions [0, 1, 2, ... seq_length-1], so we can just\n # perform a slice.\n position_embeddings = tf.slice(full_position_embeddings, [0, 0],\n [seq_length, -1])\n num_dims = len(output.shape.as_list())\n\n # Only the last two dimensions are relevant (`seq_length` and `width`), so\n # we broadcast among the first dimensions, which is typically just\n # the batch size.\n position_broadcast_shape = []\n for _ in range(num_dims - 2):\n position_broadcast_shape.append(1)\n position_broadcast_shape.extend([seq_length, width])\n position_embeddings = tf.reshape(position_embeddings,\n position_broadcast_shape)\n output += position_embeddings\n\n output = layer_norm_and_dropout(output, dropout_prob)\n return output","function_tokens":["def","embedding_postprocessor","(","input_tensor",",","use_token_type","=","False",",","token_type_ids","=","None",",","token_type_vocab_size","=","16",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","0.02",",","max_position_embeddings","=","512",",","dropout_prob","=","0.1",")",":","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","width","=","input_shape","[","2","]","output","=","input_tensor","if","use_token_type",":","if","token_type_ids","is","None",":","raise","ValueError","(","\"`token_type_ids` must be specified if\"","\"`use_token_type` is True.\"",")","token_type_table","=","tf",".","get_variable","(","name","=","token_type_embedding_name",",","shape","=","[","token_type_vocab_size",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# This vocab will be small so we always do one-hot here, since it is always","# faster for a small vocabulary.","flat_token_type_ids","=","tf",".","reshape","(","token_type_ids",",","[","-","1","]",")","one_hot_ids","=","tf",".","one_hot","(","flat_token_type_ids",",","depth","=","token_type_vocab_size",")","token_type_embeddings","=","tf",".","matmul","(","one_hot_ids",",","token_type_table",")","token_type_embeddings","=","tf",".","reshape","(","token_type_embeddings",",","[","batch_size",",","seq_length",",","width","]",")","output","+=","token_type_embeddings","if","use_position_embeddings",":","assert_op","=","tf",".","assert_less_equal","(","seq_length",",","max_position_embeddings",")","with","tf",".","control_dependencies","(","[","assert_op","]",")",":","full_position_embeddings","=","tf",".","get_variable","(","name","=","position_embedding_name",",","shape","=","[","max_position_embeddings",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# Since the position embedding table is a learned variable, we create it","# using a (long) sequence length `max_position_embeddings`. The actual","# sequence length might be shorter than this, for faster training of","# tasks that do not have long sequences.","#","# So `full_position_embeddings` is effectively an embedding table","# for position [0, 1, 2, ..., max_position_embeddings-1], and the current","# sequence has positions [0, 1, 2, ... seq_length-1], so we can just","# perform a slice.","position_embeddings","=","tf",".","slice","(","full_position_embeddings",",","[","0",",","0","]",",","[","seq_length",",","-","1","]",")","num_dims","=","len","(","output",".","shape",".","as_list","(",")",")","# Only the last two dimensions are relevant (`seq_length` and `width`), so","# we broadcast among the first dimensions, which is typically just","# the batch size.","position_broadcast_shape","=","[","]","for","_","in","range","(","num_dims","-","2",")",":","position_broadcast_shape",".","append","(","1",")","position_broadcast_shape",".","extend","(","[","seq_length",",","width","]",")","position_embeddings","=","tf",".","reshape","(","position_embeddings",",","position_broadcast_shape",")","output","+=","position_embeddings","output","=","layer_norm_and_dropout","(","output",",","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L428-L521"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"create_attention_mask_from_input_mask","parameters":"(from_tensor, to_mask)","argument_list":"","return_statement":"return mask","docstring":"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].","docstring_summary":"Create 3D attention mask from a 2D tensor mask.","docstring_tokens":["Create","3D","attention","mask","from","a","2D","tensor","mask","."],"function":"def create_attention_mask_from_input_mask(from_tensor, to_mask):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n\n return mask","function_tokens":["def","create_attention_mask_from_input_mask","(","from_tensor",",","to_mask",")",":","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_shape","=","get_shape_list","(","to_mask",",","expected_rank","=","2",")","to_seq_length","=","to_shape","[","1","]","to_mask","=","tf",".","cast","(","tf",".","reshape","(","to_mask",",","[","batch_size",",","1",",","to_seq_length","]",")",",","tf",".","float32",")","# We don't assume that `from_tensor` is a mask (although it could be). We","# don't actually care if we attend *from* padding tokens (only *to* padding)","# tokens so we create a tensor of all ones.","#","# `broadcast_ones` = [batch_size, from_seq_length, 1]","broadcast_ones","=","tf",".","ones","(","shape","=","[","batch_size",",","from_seq_length",",","1","]",",","dtype","=","tf",".","float32",")","# Here we broadcast along two dimensions to create the mask.","mask","=","broadcast_ones","*","to_mask","return","mask"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L524-L555"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"attention_layer","parameters":"(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None)","argument_list":"","return_statement":"return context_layer","docstring":"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.","docstring_summary":"Performs multi-headed attention from `from_tensor` to `to_tensor`.","docstring_tokens":["Performs","multi","-","headed","attention","from","from_tensor","to","to_tensor","."],"function":"def attention_layer(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n \"\"\"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])\n return output_tensor\n\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n from_tensor_2d = reshape_to_matrix(from_tensor)\n to_tensor_2d = reshape_to_matrix(to_tensor)\n\n # `query_layer` = [B*F, N*H]\n query_layer = tf.layers.dense(\n from_tensor_2d,\n num_attention_heads * size_per_head,\n activation=query_act,\n name=\"query\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `key_layer` = [B*T, N*H]\n key_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=key_act,\n name=\"key\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `value_layer` = [B*T, N*H]\n value_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=value_act,\n name=\"value\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `query_layer` = [B, N, F, H]\n query_layer = transpose_for_scores(query_layer, batch_size,\n num_attention_heads, from_seq_length,\n size_per_head)\n\n # `key_layer` = [B, N, T, H]\n key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,\n to_seq_length, size_per_head)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n # `attention_scores` = [B, N, F, T]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(attention_scores,\n 1.0 \/ math.sqrt(float(size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\n\n # `value_layer` = [B, T, N, H]\n value_layer = tf.reshape(\n value_layer,\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\n\n # `value_layer` = [B, N, T, H]\n value_layer = tf.transpose(value_layer, [0, 2, 1, 3])\n\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\n\n if do_return_2d_tensor:\n # `context_layer` = [B*F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\n else:\n # `context_layer` = [B, F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\n\n return context_layer","function_tokens":["def","attention_layer","(","from_tensor",",","to_tensor",",","attention_mask","=","None",",","num_attention_heads","=","1",",","size_per_head","=","512",",","query_act","=","None",",","key_act","=","None",",","value_act","=","None",",","attention_probs_dropout_prob","=","0.0",",","initializer_range","=","0.02",",","do_return_2d_tensor","=","False",",","batch_size","=","None",",","from_seq_length","=","None",",","to_seq_length","=","None",")",":","def","transpose_for_scores","(","input_tensor",",","batch_size",",","num_attention_heads",",","seq_length",",","width",")",":","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","batch_size",",","seq_length",",","num_attention_heads",",","width","]",")","output_tensor","=","tf",".","transpose","(","output_tensor",",","[","0",",","2",",","1",",","3","]",")","return","output_tensor","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","to_shape","=","get_shape_list","(","to_tensor",",","expected_rank","=","[","2",",","3","]",")","if","len","(","from_shape",")","!=","len","(","to_shape",")",":","raise","ValueError","(","\"The rank of `from_tensor` must match the rank of `to_tensor`.\"",")","if","len","(","from_shape",")","==","3",":","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_seq_length","=","to_shape","[","1","]","elif","len","(","from_shape",")","==","2",":","if","(","batch_size","is","None","or","from_seq_length","is","None","or","to_seq_length","is","None",")",":","raise","ValueError","(","\"When passing in rank 2 tensors to attention_layer, the values \"","\"for `batch_size`, `from_seq_length`, and `to_seq_length` \"","\"must all be specified.\"",")","# Scalar dimensions referenced here:","# B = batch size (number of sequences)","# F = `from_tensor` sequence length","# T = `to_tensor` sequence length","# N = `num_attention_heads`","# H = `size_per_head`","from_tensor_2d","=","reshape_to_matrix","(","from_tensor",")","to_tensor_2d","=","reshape_to_matrix","(","to_tensor",")","# `query_layer` = [B*F, N*H]","query_layer","=","tf",".","layers",".","dense","(","from_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","query_act",",","name","=","\"query\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `key_layer` = [B*T, N*H]","key_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","key_act",",","name","=","\"key\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `value_layer` = [B*T, N*H]","value_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","value_act",",","name","=","\"value\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `query_layer` = [B, N, F, H]","query_layer","=","transpose_for_scores","(","query_layer",",","batch_size",",","num_attention_heads",",","from_seq_length",",","size_per_head",")","# `key_layer` = [B, N, T, H]","key_layer","=","transpose_for_scores","(","key_layer",",","batch_size",",","num_attention_heads",",","to_seq_length",",","size_per_head",")","# Take the dot product between \"query\" and \"key\" to get the raw","# attention scores.","# `attention_scores` = [B, N, F, T]","attention_scores","=","tf",".","matmul","(","query_layer",",","key_layer",",","transpose_b","=","True",")","attention_scores","=","tf",".","multiply","(","attention_scores",",","1.0","\/","math",".","sqrt","(","float","(","size_per_head",")",")",")","if","attention_mask","is","not","None",":","# `attention_mask` = [B, 1, F, T]","attention_mask","=","tf",".","expand_dims","(","attention_mask",",","axis","=","[","1","]",")","# Since attention_mask is 1.0 for positions we want to attend and 0.0 for","# masked positions, this operation will create a tensor which is 0.0 for","# positions we want to attend and -10000.0 for masked positions.","adder","=","(","1.0","-","tf",".","cast","(","attention_mask",",","tf",".","float32",")",")","*","-","10000.0","# Since we are adding it to the raw scores before the softmax, this is","# effectively the same as removing these entirely.","attention_scores","+=","adder","# Normalize the attention scores to probabilities.","# `attention_probs` = [B, N, F, T]","attention_probs","=","tf",".","nn",".","softmax","(","attention_scores",")","# This is actually dropping out entire tokens to attend to, which might","# seem a bit unusual, but is taken from the original Transformer paper.","attention_probs","=","dropout","(","attention_probs",",","attention_probs_dropout_prob",")","# `value_layer` = [B, T, N, H]","value_layer","=","tf",".","reshape","(","value_layer",",","[","batch_size",",","to_seq_length",",","num_attention_heads",",","size_per_head","]",")","# `value_layer` = [B, N, T, H]","value_layer","=","tf",".","transpose","(","value_layer",",","[","0",",","2",",","1",",","3","]",")","# `context_layer` = [B, N, F, H]","context_layer","=","tf",".","matmul","(","attention_probs",",","value_layer",")","# `context_layer` = [B, F, N, H]","context_layer","=","tf",".","transpose","(","context_layer",",","[","0",",","2",",","1",",","3","]",")","if","do_return_2d_tensor",":","# `context_layer` = [B*F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size","*","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","else",":","# `context_layer` = [B, F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size",",","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","return","context_layer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L558-L751"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"transformer_model","parameters":"(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False)","argument_list":"","return_statement":"","docstring":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.","docstring_summary":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".","docstring_tokens":["Multi","-","headed","multi","-","layer","Transformer","from","Attention","is","All","You","Need","."],"function":"def transformer_model(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False):\n \"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.\n \"\"\"\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n\n attention_head_size = int(hidden_size \/ num_attention_heads)\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n input_width = input_shape[2]\n\n # The Transformer performs sum residuals on all layers so the input needs\n # to be the same as the hidden size.\n if input_width != hidden_size:\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n (input_width, hidden_size))\n\n # We keep the representation as a 2D tensor to avoid re-shaping it back and\n # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\n # the GPU\/CPU but may not be free on the TPU, so we want to minimize them to\n # help the optimizer.\n prev_output = reshape_to_matrix(input_tensor)\n\n all_layer_outputs = []\n for layer_idx in range(num_hidden_layers):\n with tf.variable_scope(\"layer_%d\" % layer_idx):\n layer_input = prev_output\n\n with tf.variable_scope(\"attention\"):\n attention_heads = []\n with tf.variable_scope(\"self\"):\n attention_head = attention_layer(\n from_tensor=layer_input,\n to_tensor=layer_input,\n attention_mask=attention_mask,\n num_attention_heads=num_attention_heads,\n size_per_head=attention_head_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n initializer_range=initializer_range,\n do_return_2d_tensor=True,\n batch_size=batch_size,\n from_seq_length=seq_length,\n to_seq_length=seq_length)\n attention_heads.append(attention_head)\n\n attention_output = None\n if len(attention_heads) == 1:\n attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n attention_output = tf.concat(attention_heads, axis=-1)\n\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n with tf.variable_scope(\"output\"):\n attention_output = tf.layers.dense(\n attention_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n attention_output = dropout(attention_output, hidden_dropout_prob)\n attention_output = layer_norm(attention_output + layer_input)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = tf.layers.dense(\n attention_output,\n intermediate_size,\n activation=intermediate_act_fn,\n kernel_initializer=create_initializer(initializer_range))\n\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"):\n layer_output = tf.layers.dense(\n intermediate_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n layer_output = dropout(layer_output, hidden_dropout_prob)\n layer_output = layer_norm(layer_output + attention_output)\n prev_output = layer_output\n all_layer_outputs.append(layer_output)\n\n if do_return_all_layers:\n final_outputs = []\n for layer_output in all_layer_outputs:\n final_output = reshape_from_matrix(layer_output, input_shape)\n final_outputs.append(final_output)\n return final_outputs\n else:\n final_output = reshape_from_matrix(prev_output, input_shape)\n return final_output","function_tokens":["def","transformer_model","(","input_tensor",",","attention_mask","=","None",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","intermediate_act_fn","=","gelu",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","initializer_range","=","0.02",",","do_return_all_layers","=","False",")",":","if","hidden_size","%","num_attention_heads","!=","0",":","raise","ValueError","(","\"The hidden size (%d) is not a multiple of the number of attention \"","\"heads (%d)\"","%","(","hidden_size",",","num_attention_heads",")",")","attention_head_size","=","int","(","hidden_size","\/","num_attention_heads",")","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","input_width","=","input_shape","[","2","]","# The Transformer performs sum residuals on all layers so the input needs","# to be the same as the hidden size.","if","input_width","!=","hidden_size",":","raise","ValueError","(","\"The width of the input tensor (%d) != hidden size (%d)\"","%","(","input_width",",","hidden_size",")",")","# We keep the representation as a 2D tensor to avoid re-shaping it back and","# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on","# the GPU\/CPU but may not be free on the TPU, so we want to minimize them to","# help the optimizer.","prev_output","=","reshape_to_matrix","(","input_tensor",")","all_layer_outputs","=","[","]","for","layer_idx","in","range","(","num_hidden_layers",")",":","with","tf",".","variable_scope","(","\"layer_%d\"","%","layer_idx",")",":","layer_input","=","prev_output","with","tf",".","variable_scope","(","\"attention\"",")",":","attention_heads","=","[","]","with","tf",".","variable_scope","(","\"self\"",")",":","attention_head","=","attention_layer","(","from_tensor","=","layer_input",",","to_tensor","=","layer_input",",","attention_mask","=","attention_mask",",","num_attention_heads","=","num_attention_heads",",","size_per_head","=","attention_head_size",",","attention_probs_dropout_prob","=","attention_probs_dropout_prob",",","initializer_range","=","initializer_range",",","do_return_2d_tensor","=","True",",","batch_size","=","batch_size",",","from_seq_length","=","seq_length",",","to_seq_length","=","seq_length",")","attention_heads",".","append","(","attention_head",")","attention_output","=","None","if","len","(","attention_heads",")","==","1",":","attention_output","=","attention_heads","[","0","]","else",":","# In the case where we have other sequences, we just concatenate","# them to the self-attention head before the projection.","attention_output","=","tf",".","concat","(","attention_heads",",","axis","=","-","1",")","# Run a linear projection of `hidden_size` then add a residual","# with `layer_input`.","with","tf",".","variable_scope","(","\"output\"",")",":","attention_output","=","tf",".","layers",".","dense","(","attention_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","attention_output","=","dropout","(","attention_output",",","hidden_dropout_prob",")","attention_output","=","layer_norm","(","attention_output","+","layer_input",")","# The activation is only applied to the \"intermediate\" hidden layer.","with","tf",".","variable_scope","(","\"intermediate\"",")",":","intermediate_output","=","tf",".","layers",".","dense","(","attention_output",",","intermediate_size",",","activation","=","intermediate_act_fn",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# Down-project back to `hidden_size` then add the residual.","with","tf",".","variable_scope","(","\"output\"",")",":","layer_output","=","tf",".","layers",".","dense","(","intermediate_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","layer_output","=","dropout","(","layer_output",",","hidden_dropout_prob",")","layer_output","=","layer_norm","(","layer_output","+","attention_output",")","prev_output","=","layer_output","all_layer_outputs",".","append","(","layer_output",")","if","do_return_all_layers",":","final_outputs","=","[","]","for","layer_output","in","all_layer_outputs",":","final_output","=","reshape_from_matrix","(","layer_output",",","input_shape",")","final_outputs",".","append","(","final_output",")","return","final_outputs","else",":","final_output","=","reshape_from_matrix","(","prev_output",",","input_shape",")","return","final_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L754-L892"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"get_shape_list","parameters":"(tensor, expected_rank=None, name=None)","argument_list":"","return_statement":"return shape","docstring":"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.","docstring_summary":"Returns a list of the shape of tensor, preferring static dimensions.","docstring_tokens":["Returns","a","list","of","the","shape","of","tensor","preferring","static","dimensions","."],"function":"def get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if name is None:\n name = tensor.name\n\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape","function_tokens":["def","get_shape_list","(","tensor",",","expected_rank","=","None",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","if","expected_rank","is","not","None",":","assert_rank","(","tensor",",","expected_rank",",","name",")","shape","=","tensor",".","shape",".","as_list","(",")","non_static_indexes","=","[","]","for","(","index",",","dim",")","in","enumerate","(","shape",")",":","if","dim","is","None",":","non_static_indexes",".","append","(","index",")","if","not","non_static_indexes",":","return","shape","dyn_shape","=","tf",".","shape","(","tensor",")","for","index","in","non_static_indexes",":","shape","[","index","]","=","dyn_shape","[","index","]","return","shape"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L895-L929"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"reshape_to_matrix","parameters":"(input_tensor)","argument_list":"","return_statement":"return output_tensor","docstring":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_summary":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_tokens":["Reshapes","a",">","=","rank","2","tensor","to","a","rank","2","tensor","(","i",".","e",".","a","matrix",")","."],"function":"def reshape_to_matrix(input_tensor):\n \"\"\"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).\"\"\"\n ndims = input_tensor.shape.ndims\n if ndims < 2:\n raise ValueError(\"Input tensor must have at least rank 2. Shape = %s\" %\n (input_tensor.shape))\n if ndims == 2:\n return input_tensor\n\n width = input_tensor.shape[-1]\n output_tensor = tf.reshape(input_tensor, [-1, width])\n return output_tensor","function_tokens":["def","reshape_to_matrix","(","input_tensor",")",":","ndims","=","input_tensor",".","shape",".","ndims","if","ndims","<","2",":","raise","ValueError","(","\"Input tensor must have at least rank 2. Shape = %s\"","%","(","input_tensor",".","shape",")",")","if","ndims","==","2",":","return","input_tensor","width","=","input_tensor",".","shape","[","-","1","]","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","-","1",",","width","]",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L932-L943"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"reshape_from_matrix","parameters":"(output_tensor, orig_shape_list)","argument_list":"","return_statement":"return tf.reshape(output_tensor, orig_dims + [width])","docstring":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_summary":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_tokens":["Reshapes","a","rank","2","tensor","back","to","its","original","rank",">","=","2","tensor","."],"function":"def reshape_from_matrix(output_tensor, orig_shape_list):\n \"\"\"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.\"\"\"\n if len(orig_shape_list) == 2:\n return output_tensor\n\n output_shape = get_shape_list(output_tensor)\n\n orig_dims = orig_shape_list[0:-1]\n width = output_shape[-1]\n\n return tf.reshape(output_tensor, orig_dims + [width])","function_tokens":["def","reshape_from_matrix","(","output_tensor",",","orig_shape_list",")",":","if","len","(","orig_shape_list",")","==","2",":","return","output_tensor","output_shape","=","get_shape_list","(","output_tensor",")","orig_dims","=","orig_shape_list","[","0",":","-","1","]","width","=","output_shape","[","-","1","]","return","tf",".","reshape","(","output_tensor",",","orig_dims","+","[","width","]",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L946-L956"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"assert_rank","parameters":"(tensor, expected_rank, name=None)","argument_list":"","return_statement":"","docstring":"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.","docstring_summary":"Raises an exception if the tensor rank is not of the expected rank.","docstring_tokens":["Raises","an","exception","if","the","tensor","rank","is","not","of","the","expected","rank","."],"function":"def assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n if name is None:\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n scope_name = tf.get_variable_scope().name\n raise ValueError(\n \"For the tensor `%s` in scope `%s`, the actual rank \"\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))","function_tokens":["def","assert_rank","(","tensor",",","expected_rank",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","expected_rank_dict","=","{","}","if","isinstance","(","expected_rank",",","six",".","integer_types",")",":","expected_rank_dict","[","expected_rank","]","=","True","else",":","for","x","in","expected_rank",":","expected_rank_dict","[","x","]","=","True","actual_rank","=","tensor",".","shape",".","ndims","if","actual_rank","not","in","expected_rank_dict",":","scope_name","=","tf",".","get_variable_scope","(",")",".","name","raise","ValueError","(","\"For the tensor `%s` in scope `%s`, the actual rank \"","\"`%d` (shape = %s) is not equal to the expected rank `%s`\"","%","(","name",",","scope_name",",","actual_rank",",","str","(","tensor",".","shape",")",",","str","(","expected_rank",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L959-L986"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"BertConfig.__init__","parameters":"(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02)","argument_list":"","return_statement":"","docstring":"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.","docstring_summary":"Constructs BertConfig.","docstring_tokens":["Constructs","BertConfig","."],"function":"def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range","function_tokens":["def","__init__","(","self",",","vocab_size",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","hidden_act","=","\"gelu\"",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","max_position_embeddings","=","512",",","type_vocab_size","=","16",",","initializer_range","=","0.02",")",":","self",".","vocab_size","=","vocab_size","self",".","hidden_size","=","hidden_size","self",".","num_hidden_layers","=","num_hidden_layers","self",".","num_attention_heads","=","num_attention_heads","self",".","hidden_act","=","hidden_act","self",".","intermediate_size","=","intermediate_size","self",".","hidden_dropout_prob","=","hidden_dropout_prob","self",".","attention_probs_dropout_prob","=","attention_probs_dropout_prob","self",".","max_position_embeddings","=","max_position_embeddings","self",".","type_vocab_size","=","type_vocab_size","self",".","initializer_range","=","initializer_range"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L34-L80"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"BertConfig.from_dict","parameters":"(cls, json_object)","argument_list":"","return_statement":"return config","docstring":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_summary":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","Python","dictionary","of","parameters","."],"function":"def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config","function_tokens":["def","from_dict","(","cls",",","json_object",")",":","config","=","BertConfig","(","vocab_size","=","None",")","for","(","key",",","value",")","in","six",".","iteritems","(","json_object",")",":","config",".","__dict__","[","key","]","=","value","return","config"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L83-L88"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"BertConfig.from_json_file","parameters":"(cls, json_file)","argument_list":"","return_statement":"return cls.from_dict(json.loads(text))","docstring":"Constructs a `BertConfig` from a json file of parameters.","docstring_summary":"Constructs a `BertConfig` from a json file of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","json","file","of","parameters","."],"function":"def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with tf.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))","function_tokens":["def","from_json_file","(","cls",",","json_file",")",":","with","tf",".","gfile",".","GFile","(","json_file",",","\"r\"",")","as","reader",":","text","=","reader",".","read","(",")","return","cls",".","from_dict","(","json",".","loads","(","text",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L91-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"BertConfig.to_dict","parameters":"(self)","argument_list":"","return_statement":"return output","docstring":"Serializes this instance to a Python dictionary.","docstring_summary":"Serializes this instance to a Python dictionary.","docstring_tokens":["Serializes","this","instance","to","a","Python","dictionary","."],"function":"def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output","function_tokens":["def","to_dict","(","self",")",":","output","=","copy",".","deepcopy","(","self",".","__dict__",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L97-L100"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"BertConfig.to_json_string","parameters":"(self)","argument_list":"","return_statement":"return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","docstring":"Serializes this instance to a JSON string.","docstring_summary":"Serializes this instance to a JSON string.","docstring_tokens":["Serializes","this","instance","to","a","JSON","string","."],"function":"def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","function_tokens":["def","to_json_string","(","self",")",":","return","json",".","dumps","(","self",".","to_dict","(",")",",","indent","=","2",",","sort_keys","=","True",")","+","\"\\n\""],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L102-L104"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"BertModel.__init__","parameters":"(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None)","argument_list":"","return_statement":"","docstring":"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.","docstring_summary":"Constructor for BertModel.","docstring_tokens":["Constructor","for","BertModel","."],"function":"def __init__(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n\n input_shape = get_shape_list(input_ids, expected_rank=2)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n\n if input_mask is None:\n input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)\n\n if token_type_ids is None:\n token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)\n\n with tf.variable_scope(scope, default_name=\"bert\"):\n with tf.variable_scope(\"embeddings\"):\n # Perform embedding lookup on the word ids.\n (self.embedding_output, self.embedding_table) = embedding_lookup(\n input_ids=input_ids,\n vocab_size=config.vocab_size,\n embedding_size=config.hidden_size,\n initializer_range=config.initializer_range,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n self.embedding_output = embedding_postprocessor(\n input_tensor=self.embedding_output,\n use_token_type=True,\n token_type_ids=token_type_ids,\n token_type_vocab_size=config.type_vocab_size,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=config.initializer_range,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n\n with tf.variable_scope(\"encoder\"):\n # This converts a 2D mask of shape [batch_size, seq_length] to a 3D\n # mask of shape [batch_size, seq_length, seq_length] which is used\n # for the attention scores.\n attention_mask = create_attention_mask_from_input_mask(\n input_ids, input_mask)\n\n # Run the stacked transformer.\n # `sequence_output` shape = [batch_size, seq_length, hidden_size].\n self.all_encoder_layers = transformer_model(\n input_tensor=self.embedding_output,\n attention_mask=attention_mask,\n hidden_size=config.hidden_size,\n num_hidden_layers=config.num_hidden_layers,\n num_attention_heads=config.num_attention_heads,\n intermediate_size=config.intermediate_size,\n intermediate_act_fn=get_activation(config.hidden_act),\n hidden_dropout_prob=config.hidden_dropout_prob,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n initializer_range=config.initializer_range,\n do_return_all_layers=True)\n\n self.sequence_output = self.all_encoder_layers[-1]\n # The \"pooler\" converts the encoded sequence tensor of shape\n # [batch_size, seq_length, hidden_size] to a tensor of shape\n # [batch_size, hidden_size]. This is necessary for segment-level\n # (or segment-pair-level) classification tasks where we need a fixed\n # dimensional representation of the segment.\n with tf.variable_scope(\"pooler\"):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token. We assume that this has been pre-trained\n first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)\n self.pooled_output = tf.layers.dense(\n first_token_tensor,\n config.hidden_size,\n activation=tf.tanh,\n kernel_initializer=create_initializer(config.initializer_range))","function_tokens":["def","__init__","(","self",",","config",",","is_training",",","input_ids",",","input_mask","=","None",",","token_type_ids","=","None",",","use_one_hot_embeddings","=","False",",","scope","=","None",")",":","config","=","copy",".","deepcopy","(","config",")","if","not","is_training",":","config",".","hidden_dropout_prob","=","0.0","config",".","attention_probs_dropout_prob","=","0.0","input_shape","=","get_shape_list","(","input_ids",",","expected_rank","=","2",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","if","input_mask","is","None",":","input_mask","=","tf",".","ones","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","if","token_type_ids","is","None",":","token_type_ids","=","tf",".","zeros","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","with","tf",".","variable_scope","(","scope",",","default_name","=","\"bert\"",")",":","with","tf",".","variable_scope","(","\"embeddings\"",")",":","# Perform embedding lookup on the word ids.","(","self",".","embedding_output",",","self",".","embedding_table",")","=","embedding_lookup","(","input_ids","=","input_ids",",","vocab_size","=","config",".","vocab_size",",","embedding_size","=","config",".","hidden_size",",","initializer_range","=","config",".","initializer_range",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# Add positional embeddings and token type embeddings, then layer","# normalize and perform dropout.","self",".","embedding_output","=","embedding_postprocessor","(","input_tensor","=","self",".","embedding_output",",","use_token_type","=","True",",","token_type_ids","=","token_type_ids",",","token_type_vocab_size","=","config",".","type_vocab_size",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","config",".","initializer_range",",","max_position_embeddings","=","config",".","max_position_embeddings",",","dropout_prob","=","config",".","hidden_dropout_prob",")","with","tf",".","variable_scope","(","\"encoder\"",")",":","# This converts a 2D mask of shape [batch_size, seq_length] to a 3D","# mask of shape [batch_size, seq_length, seq_length] which is used","# for the attention scores.","attention_mask","=","create_attention_mask_from_input_mask","(","input_ids",",","input_mask",")","# Run the stacked transformer.","# `sequence_output` shape = [batch_size, seq_length, hidden_size].","self",".","all_encoder_layers","=","transformer_model","(","input_tensor","=","self",".","embedding_output",",","attention_mask","=","attention_mask",",","hidden_size","=","config",".","hidden_size",",","num_hidden_layers","=","config",".","num_hidden_layers",",","num_attention_heads","=","config",".","num_attention_heads",",","intermediate_size","=","config",".","intermediate_size",",","intermediate_act_fn","=","get_activation","(","config",".","hidden_act",")",",","hidden_dropout_prob","=","config",".","hidden_dropout_prob",",","attention_probs_dropout_prob","=","config",".","attention_probs_dropout_prob",",","initializer_range","=","config",".","initializer_range",",","do_return_all_layers","=","True",")","self",".","sequence_output","=","self",".","all_encoder_layers","[","-","1","]","# The \"pooler\" converts the encoded sequence tensor of shape","# [batch_size, seq_length, hidden_size] to a tensor of shape","# [batch_size, hidden_size]. This is necessary for segment-level","# (or segment-pair-level) classification tasks where we need a fixed","# dimensional representation of the segment.","with","tf",".","variable_scope","(","\"pooler\"",")",":","# We \"pool\" the model by simply taking the hidden state corresponding","# to the first token. We assume that this has been pre-trained","first_token_tensor","=","tf",".","squeeze","(","self",".","sequence_output","[",":",",","0",":","1",",",":","]",",","axis","=","1",")","self",".","pooled_output","=","tf",".","layers",".","dense","(","first_token_tensor",",","config",".","hidden_size",",","activation","=","tf",".","tanh",",","kernel_initializer","=","create_initializer","(","config",".","initializer_range",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L131-L232"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"BertModel.get_sequence_output","parameters":"(self)","argument_list":"","return_statement":"return self.sequence_output","docstring":"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.","docstring_summary":"Gets final hidden layer of encoder.","docstring_tokens":["Gets","final","hidden","layer","of","encoder","."],"function":"def get_sequence_output(self):\n \"\"\"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.\n \"\"\"\n return self.sequence_output","function_tokens":["def","get_sequence_output","(","self",")",":","return","self",".","sequence_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L237-L244"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/modeling.py","language":"python","identifier":"BertModel.get_embedding_output","parameters":"(self)","argument_list":"","return_statement":"return self.embedding_output","docstring":"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.","docstring_summary":"Gets output of the embedding lookup (i.e., input to the transformer).","docstring_tokens":["Gets","output","of","the","embedding","lookup","(","i",".","e",".","input","to","the","transformer",")","."],"function":"def get_embedding_output(self):\n \"\"\"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.\n \"\"\"\n return self.embedding_output","function_tokens":["def","get_embedding_output","(","self",")",":","return","self",".","embedding_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/modeling.py#L249-L258"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_classifier.py","language":"python","identifier":"convert_example_list_for_inews","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature_list","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_example_list_for_inews(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return [InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)]\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n must_len = len(tokens_a) + 3\n extra_len = max_seq_length - must_len\n feature_list = []\n if example.text_b and extra_len > 0:\n extra_num = int((len(tokens_b) - 1) \/ extra_len) + 1\n for num in range(extra_num):\n max_len = min((num + 1) * extra_len, len(tokens_b))\n tokens_b_sub = tokens_b[num * extra_len: max_len]\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b_sub, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n else:\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n return feature_list","function_tokens":["def","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","[","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","]","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","must_len","=","len","(","tokens_a",")","+","3","extra_len","=","max_seq_length","-","must_len","feature_list","=","[","]","if","example",".","text_b","and","extra_len",">","0",":","extra_num","=","int","(","(","len","(","tokens_b",")","-","1",")","\/","extra_len",")","+","1","for","num","in","range","(","extra_num",")",":","max_len","=","min","(","(","num","+","1",")","*","extra_len",",","len","(","tokens_b",")",")","tokens_b_sub","=","tokens_b","[","num","*","extra_len",":","max_len","]","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b_sub",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","else",":","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","return","feature_list"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_classifier.py#L234-L269"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features_for_inews","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features_for_inews(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n num_example = 0\n for (ex_index, example) in enumerate(examples):\n if ex_index % 1000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature_list = convert_example_list_for_inews(ex_index, example, label_list,\n max_seq_length, tokenizer)\n num_example += len(feature_list)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n for feature in feature_list:\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n tf.logging.info(\"feature num: %s\", num_example)\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features_for_inews","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","num_example","=","0","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","1000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature_list","=","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","num_example","+=","len","(","feature_list",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","for","feature","in","feature_list",":","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","tf",".","logging",".","info","(","\"feature num: %s\"",",","num_example",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_classifier.py#L272-L302"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_classifier.py","language":"python","identifier":"convert_single_example","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature","function_tokens":["def","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","if","tokens_b",":","# Modifies `tokens_a` and `tokens_b` in place so that the total","# length is less than the specified length.","# Account for [CLS], [SEP], [SEP] with \"- 3\"","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_seq_length","-","3",")","else",":","# Account for [CLS] and [SEP] with \"- 2\"","if","len","(","tokens_a",")",">","max_seq_length","-","2",":","tokens_a","=","tokens_a","[","0",":","(","max_seq_length","-","2",")","]","# The convention in BERT is:","# (a) For sequence pairs:","# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]","# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1","# (b) For single sequences:","# tokens: [CLS] the dog is hairy . [SEP]","# type_ids: 0 0 0 0 0 0 0","#","# Where \"type_ids\" are used to indicate whether this is the first","# sequence or the second sequence. The embedding vectors for `type=0` and","# `type=1` were learned during pre-training and are added to the wordpiece","# embedding vector (and position vector). This is not *strictly* necessary","# since the [SEP] token unambiguously separates the sequences, but it makes","# it easier for the model to learn the concept of sequences.","#","# For classification tasks, the first vector (corresponding to [CLS]) is","# used as the \"sentence vector\". Note that this only makes sense because","# the entire model is fine-tuned.","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","label_id","=","label_map","[","example",".","label","]","if","ex_index","<","5",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"guid: %s\"","%","(","example",".","guid",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","tf",".","logging",".","info","(","\"label: %s (id = %d)\"","%","(","example",".","label",",","label_id",")",")","feature","=","InputFeatures","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","label_id","=","label_id",",","is_real_example","=","True",")","return","feature"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_classifier.py#L305-L404"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_classifier.py#L407-L434"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_classifier.py","language":"python","identifier":"file_based_input_fn_builder","parameters":"(input_file, seq_length, is_training,\n drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn","function_tokens":["def","file_based_input_fn_builder","(","input_file",",","seq_length",",","is_training",",","drop_remainder",")",":","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"label_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"is_real_example\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","}","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","d","=","tf",".","data",".","TFRecordDataset","(","input_file",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_classifier.py#L437-L482"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_classifier.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_classifier.py#L485-L499"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_classifier.py","language":"python","identifier":"create_model","parameters":"(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings)","argument_list":"","return_statement":"","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)","function_tokens":["def","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","labels",",","num_labels",",","use_one_hot_embeddings",")",":","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# In the demo, we are doing a simple classification task on the entire","# segment.","#","# If you want to use the token-level output, use model.get_sequence_output()","# instead.","output_layer","=","model",".","get_pooled_output","(",")","hidden_size","=","output_layer",".","shape","[","-","1","]",".","value","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","[","num_labels",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","[","num_labels","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","with","tf",".","variable_scope","(","\"loss\"",")",":","if","is_training",":","# I.e., 0.1 dropout","output_layer","=","tf",".","nn",".","dropout","(","output_layer",",","keep_prob","=","0.9",")","logits","=","tf",".","matmul","(","output_layer",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","probabilities","=","tf",".","nn",".","softmax","(","logits",",","axis","=","-","1",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","num_labels",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","logits",",","probabilities",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_classifier.py#L502-L544"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_classifier.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions, weights=is_real_example)\n loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn,\n [per_example_loss, label_ids, logits, is_real_example])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","num_labels",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","label_ids","=","features","[","\"label_ids\"","]","is_real_example","=","None","if","\"is_real_example\"","in","features",":","is_real_example","=","tf",".","cast","(","features","[","\"is_real_example\"","]",",","dtype","=","tf",".","float32",")","else",":","is_real_example","=","tf",".","ones","(","tf",".","shape","(","label_ids",")",",","dtype","=","tf",".","float32",")","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","total_loss",",","per_example_loss",",","logits",",","probabilities",")","=","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","label_ids",",","num_labels",",","use_one_hot_embeddings",")","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","per_example_loss",",","label_ids",",","logits",",","is_real_example",")",":","predictions","=","tf",".","argmax","(","logits",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","label_ids",",","predictions","=","predictions",",","weights","=","is_real_example",")","loss","=","tf",".","metrics",".","mean","(","values","=","per_example_loss",",","weights","=","is_real_example",")","return","{","\"eval_accuracy\"",":","accuracy",",","\"eval_loss\"",":","loss",",","}","eval_metrics","=","(","metric_fn",",","[","per_example_loss",",","label_ids",",","logits",",","is_real_example","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","{","\"probabilities\"",":","probabilities","}",",","scaffold_fn","=","scaffold_fn",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_classifier.py#L547-L636"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_classifier.py","language":"python","identifier":"input_fn_builder","parameters":"(features, seq_length, is_training, drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","features",",","seq_length",",","is_training",",","drop_remainder",")",":","all_input_ids","=","[","]","all_input_mask","=","[","]","all_segment_ids","=","[","]","all_label_ids","=","[","]","for","feature","in","features",":","all_input_ids",".","append","(","feature",".","input_ids",")","all_input_mask",".","append","(","feature",".","input_mask",")","all_segment_ids",".","append","(","feature",".","segment_ids",")","all_label_ids",".","append","(","feature",".","label_id",")","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","num_examples","=","len","(","features",")","# This is for demo purposes and does NOT scale to large data sets. We do","# not use Dataset.from_generator() because that uses tf.py_func which is","# not TPU compatible. The right way to load data is with TFRecordReader.","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","{","\"input_ids\"",":","tf",".","constant","(","all_input_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_mask\"",":","tf",".","constant","(","all_input_mask",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"segment_ids\"",":","tf",".","constant","(","all_segment_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"label_ids\"",":","tf",".","constant","(","all_label_ids",",","shape","=","[","num_examples","]",",","dtype","=","tf",".","int32",")",",","}",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","batch","(","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_classifier.py#L641-L690"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_classifier.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return features","docstring":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_summary":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","list","of","InputFeatures","."],"function":"def convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n features.append(feature)\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",")",":","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","features",".","append","(","feature",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_classifier.py#L695-L708"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/optimization.py","language":"python","identifier":"create_optimizer","parameters":"(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu)","argument_list":"","return_statement":"return train_op","docstring":"Creates an optimizer training op.","docstring_summary":"Creates an optimizer training op.","docstring_tokens":["Creates","an","optimizer","training","op","."],"function":"def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n \"\"\"Creates an optimizer training op.\"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step\/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float \/ warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m\/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n if use_tpu:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op","function_tokens":["def","create_optimizer","(","loss",",","init_lr",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")",":","global_step","=","tf",".","train",".","get_or_create_global_step","(",")","learning_rate","=","tf",".","constant","(","value","=","init_lr",",","shape","=","[","]",",","dtype","=","tf",".","float32",")","# Implements linear decay of the learning rate.","learning_rate","=","tf",".","train",".","polynomial_decay","(","learning_rate",",","global_step",",","num_train_steps",",","end_learning_rate","=","0.0",",","power","=","1.0",",","cycle","=","False",")","# Implements linear warmup. I.e., if global_step < num_warmup_steps, the","# learning rate will be `global_step\/num_warmup_steps * init_lr`.","if","num_warmup_steps",":","global_steps_int","=","tf",".","cast","(","global_step",",","tf",".","int32",")","warmup_steps_int","=","tf",".","constant","(","num_warmup_steps",",","dtype","=","tf",".","int32",")","global_steps_float","=","tf",".","cast","(","global_steps_int",",","tf",".","float32",")","warmup_steps_float","=","tf",".","cast","(","warmup_steps_int",",","tf",".","float32",")","warmup_percent_done","=","global_steps_float","\/","warmup_steps_float","warmup_learning_rate","=","init_lr","*","warmup_percent_done","is_warmup","=","tf",".","cast","(","global_steps_int","<","warmup_steps_int",",","tf",".","float32",")","learning_rate","=","(","(","1.0","-","is_warmup",")","*","learning_rate","+","is_warmup","*","warmup_learning_rate",")","# It is recommended that you use this optimizer for fine tuning, since this","# is how the model was trained (note that the Adam m\/v variables are NOT","# loaded from init_checkpoint.)","optimizer","=","AdamWeightDecayOptimizer","(","learning_rate","=","learning_rate",",","weight_decay_rate","=","0.01",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","[","\"LayerNorm\"",",","\"layer_norm\"",",","\"bias\"","]",")","if","use_tpu",":","optimizer","=","tf",".","contrib",".","tpu",".","CrossShardOptimizer","(","optimizer",")","tvars","=","tf",".","trainable_variables","(",")","grads","=","tf",".","gradients","(","loss",",","tvars",")","# This is how the model was pre-trained.","(","grads",",","_",")","=","tf",".","clip_by_global_norm","(","grads",",","clip_norm","=","1.0",")","train_op","=","optimizer",".","apply_gradients","(","zip","(","grads",",","tvars",")",",","global_step","=","global_step",")","# Normally the global step update is done inside of `apply_gradients`.","# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use","# a different optimizer, you should probably take this line out.","new_global_step","=","global_step","+","1","train_op","=","tf",".","group","(","train_op",",","[","global_step",".","assign","(","new_global_step",")","]",")","return","train_op"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/optimization.py#L25-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.__init__","parameters":"(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\")","argument_list":"","return_statement":"","docstring":"Constructs a AdamWeightDecayOptimizer.","docstring_summary":"Constructs a AdamWeightDecayOptimizer.","docstring_tokens":["Constructs","a","AdamWeightDecayOptimizer","."],"function":"def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay","function_tokens":["def","__init__","(","self",",","learning_rate",",","weight_decay_rate","=","0.0",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","None",",","name","=","\"AdamWeightDecayOptimizer\"",")",":","super","(","AdamWeightDecayOptimizer",",","self",")",".","__init__","(","False",",","name",")","self",".","learning_rate","=","learning_rate","self",".","weight_decay_rate","=","weight_decay_rate","self",".","beta_1","=","beta_1","self",".","beta_2","=","beta_2","self",".","epsilon","=","epsilon","self",".","exclude_from_weight_decay","=","exclude_from_weight_decay"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/optimization.py#L90-L106"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.apply_gradients","parameters":"(self, grads_and_vars, global_step=None, name=None)","argument_list":"","return_statement":"return tf.group(*assignments, name=name)","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"\/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"\/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m \/ (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization\/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m\/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)","function_tokens":["def","apply_gradients","(","self",",","grads_and_vars",",","global_step","=","None",",","name","=","None",")",":","assignments","=","[","]","for","(","grad",",","param",")","in","grads_and_vars",":","if","grad","is","None","or","param","is","None",":","continue","param_name","=","self",".","_get_variable_name","(","param",".","name",")","m","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_m\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","v","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_v\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","# Standard Adam update.","next_m","=","(","tf",".","multiply","(","self",".","beta_1",",","m",")","+","tf",".","multiply","(","1.0","-","self",".","beta_1",",","grad",")",")","next_v","=","(","tf",".","multiply","(","self",".","beta_2",",","v",")","+","tf",".","multiply","(","1.0","-","self",".","beta_2",",","tf",".","square","(","grad",")",")",")","update","=","next_m","\/","(","tf",".","sqrt","(","next_v",")","+","self",".","epsilon",")","# Just adding the square of the weights to the loss function is *not*","# the correct way of using L2 regularization\/weight decay with Adam,","# since that will interact with the m and v parameters in strange ways.","#","# Instead we want ot decay the weights in a manner that doesn't interact","# with the m\/v parameters. This is equivalent to adding the square","# of the weights to the loss with plain (non-momentum) SGD.","if","self",".","_do_use_weight_decay","(","param_name",")",":","update","+=","self",".","weight_decay_rate","*","param","update_with_lr","=","self",".","learning_rate","*","update","next_param","=","param","-","update_with_lr","assignments",".","extend","(","[","param",".","assign","(","next_param",")",",","m",".","assign","(","next_m",")",",","v",".","assign","(","next_v",")","]",")","return","tf",".","group","(","*","assignments",",","name","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/optimization.py#L108-L157"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._do_use_weight_decay","parameters":"(self, param_name)","argument_list":"","return_statement":"return True","docstring":"Whether to use L2 weight decay for `param_name`.","docstring_summary":"Whether to use L2 weight decay for `param_name`.","docstring_tokens":["Whether","to","use","L2","weight","decay","for","param_name","."],"function":"def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True","function_tokens":["def","_do_use_weight_decay","(","self",",","param_name",")",":","if","not","self",".","weight_decay_rate",":","return","False","if","self",".","exclude_from_weight_decay",":","for","r","in","self",".","exclude_from_weight_decay",":","if","re",".","search","(","r",",","param_name",")","is","not","None",":","return","False","return","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/optimization.py#L159-L167"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._get_variable_name","parameters":"(self, param_name)","argument_list":"","return_statement":"return param_name","docstring":"Get the variable name from the tensor name.","docstring_summary":"Get the variable name from the tensor name.","docstring_tokens":["Get","the","variable","name","from","the","tensor","name","."],"function":"def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name","function_tokens":["def","_get_variable_name","(","self",",","param_name",")",":","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","param_name",")","if","m","is","not","None",":","param_name","=","m",".","group","(","1",")","return","param_name"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/optimization.py#L169-L174"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/create_pretraining_data.py","language":"python","identifier":"write_instance_to_example_files","parameters":"(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files)","argument_list":"","return_statement":"","docstring":"Create TF example files from `TrainingInstance`s.","docstring_summary":"Create TF example files from `TrainingInstance`s.","docstring_tokens":["Create","TF","example","files","from","TrainingInstance","s","."],"function":"def write_instance_to_example_files(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files):\n \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n writers = []\n for output_file in output_files:\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n writer_index = 0\n\n total_written = 0\n for (inst_index, instance) in enumerate(instances):\n input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)\n input_mask = [1] * len(input_ids)\n segment_ids = list(instance.segment_ids)\n assert len(input_ids) <= max_seq_length\n\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n while len(masked_lm_positions) < max_predictions_per_seq:\n masked_lm_positions.append(0)\n masked_lm_ids.append(0)\n masked_lm_weights.append(0.0)\n\n next_sentence_label = 1 if instance.is_random_next else 0\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n features[\"masked_lm_positions\"] = create_int_feature(masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n features[\"next_sentence_labels\"] = create_int_feature([next_sentence_label])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n\n writers[writer_index].write(tf_example.SerializeToString())\n writer_index = (writer_index + 1) % len(writers)\n\n total_written += 1\n\n if inst_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in instance.tokens]))\n\n for feature_name in features.keys():\n feature = features[feature_name]\n values = []\n if feature.int64_list.value:\n values = feature.int64_list.value\n elif feature.float_list.value:\n values = feature.float_list.value\n tf.logging.info(\n \"%s: %s\" % (feature_name, \" \".join([str(x) for x in values])))\n\n for writer in writers:\n writer.close()\n\n tf.logging.info(\"Wrote %d total instances\", total_written)","function_tokens":["def","write_instance_to_example_files","(","instances",",","tokenizer",",","max_seq_length",",","max_predictions_per_seq",",","output_files",")",":","writers","=","[","]","for","output_file","in","output_files",":","writers",".","append","(","tf",".","python_io",".","TFRecordWriter","(","output_file",")",")","writer_index","=","0","total_written","=","0","for","(","inst_index",",","instance",")","in","enumerate","(","instances",")",":","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","tokens",")","input_mask","=","[","1","]","*","len","(","input_ids",")","segment_ids","=","list","(","instance",".","segment_ids",")","assert","len","(","input_ids",")","<=","max_seq_length","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","masked_lm_positions","=","list","(","instance",".","masked_lm_positions",")","masked_lm_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","masked_lm_labels",")","masked_lm_weights","=","[","1.0","]","*","len","(","masked_lm_ids",")","while","len","(","masked_lm_positions",")","<","max_predictions_per_seq",":","masked_lm_positions",".","append","(","0",")","masked_lm_ids",".","append","(","0",")","masked_lm_weights",".","append","(","0.0",")","next_sentence_label","=","1","if","instance",".","is_random_next","else","0","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","segment_ids",")","features","[","\"masked_lm_positions\"","]","=","create_int_feature","(","masked_lm_positions",")","features","[","\"masked_lm_ids\"","]","=","create_int_feature","(","masked_lm_ids",")","features","[","\"masked_lm_weights\"","]","=","create_float_feature","(","masked_lm_weights",")","features","[","\"next_sentence_labels\"","]","=","create_int_feature","(","[","next_sentence_label","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writers","[","writer_index","]",".","write","(","tf_example",".","SerializeToString","(",")",")","writer_index","=","(","writer_index","+","1",")","%","len","(","writers",")","total_written","+=","1","if","inst_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","instance",".","tokens","]",")",")","for","feature_name","in","features",".","keys","(",")",":","feature","=","features","[","feature_name","]","values","=","[","]","if","feature",".","int64_list",".","value",":","values","=","feature",".","int64_list",".","value","elif","feature",".","float_list",".","value",":","values","=","feature",".","float_list",".","value","tf",".","logging",".","info","(","\"%s: %s\"","%","(","feature_name",",","\" \"",".","join","(","[","str","(","x",")","for","x","in","values","]",")",")",")","for","writer","in","writers",":","writer",".","close","(",")","tf",".","logging",".","info","(","\"Wrote %d total instances\"",",","total_written",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/create_pretraining_data.py#L96-L166"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/create_pretraining_data.py","language":"python","identifier":"create_training_instances","parameters":"(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng)","argument_list":"","return_statement":"return instances","docstring":"Create `TrainingInstance`s from raw text.","docstring_summary":"Create `TrainingInstance`s from raw text.","docstring_tokens":["Create","TrainingInstance","s","from","raw","text","."],"function":"def create_training_instances(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng):\n \"\"\"Create `TrainingInstance`s from raw text.\"\"\"\n all_documents = [[]]\n\n # Input file format:\n # (1) One sentence per line. These should ideally be actual sentences, not\n # entire paragraphs or arbitrary spans of text. (Because we use the\n # sentence boundaries for the \"next sentence prediction\" task).\n # (2) Blank lines between documents. Document boundaries are needed so\n # that the \"next sentence prediction\" task doesn't span between documents.\n for input_file in input_files:\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n\n # Empty lines are used as document delimiters\n if not line:\n all_documents.append([])\n tokens = tokenizer.tokenize(line)\n if tokens:\n all_documents[-1].append(tokens)\n\n # Remove empty documents\n all_documents = [x for x in all_documents if x]\n rng.shuffle(all_documents)\n\n vocab_words = list(tokenizer.vocab.keys())\n instances = []\n for _ in range(dupe_factor):\n for document_index in range(len(all_documents)):\n instances.extend(\n create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng))\n\n rng.shuffle(instances)\n return instances","function_tokens":["def","create_training_instances","(","input_files",",","tokenizer",",","max_seq_length",",","dupe_factor",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","rng",")",":","all_documents","=","[","[","]","]","# Input file format:","# (1) One sentence per line. These should ideally be actual sentences, not","# entire paragraphs or arbitrary spans of text. (Because we use the","# sentence boundaries for the \"next sentence prediction\" task).","# (2) Blank lines between documents. Document boundaries are needed so","# that the \"next sentence prediction\" task doesn't span between documents.","for","input_file","in","input_files",":","with","tf",".","gfile",".","GFile","(","input_file",",","\"r\"",")","as","reader",":","while","True",":","line","=","tokenization",".","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","line",":","break","line","=","line",".","strip","(",")","# Empty lines are used as document delimiters","if","not","line",":","all_documents",".","append","(","[","]",")","tokens","=","tokenizer",".","tokenize","(","line",")","if","tokens",":","all_documents","[","-","1","]",".","append","(","tokens",")","# Remove empty documents","all_documents","=","[","x","for","x","in","all_documents","if","x","]","rng",".","shuffle","(","all_documents",")","vocab_words","=","list","(","tokenizer",".","vocab",".","keys","(",")",")","instances","=","[","]","for","_","in","range","(","dupe_factor",")",":","for","document_index","in","range","(","len","(","all_documents",")",")",":","instances",".","extend","(","create_instances_from_document","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",")","rng",".","shuffle","(","instances",")","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/create_pretraining_data.py#L179-L220"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/create_pretraining_data.py","language":"python","identifier":"create_instances_from_document","parameters":"(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return instances","docstring":"Creates `TrainingInstance`s for a single document.","docstring_summary":"Creates `TrainingInstance`s for a single document.","docstring_tokens":["Creates","TrainingInstance","s","for","a","single","document","."],"function":"def create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[document_index]\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if rng.random() < short_seq_prob:\n target_seq_length = rng.randint(2, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n instances = []\n current_chunk = []\n current_length = 0\n i = 0\n while i < len(document):\n segment = document[i]\n current_chunk.append(segment)\n current_length += len(segment)\n if i == len(document) - 1 or current_length >= target_seq_length:\n if current_chunk:\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2:\n a_end = rng.randint(1, len(current_chunk) - 1)\n\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n tokens_b = []\n # Random next\n is_random_next = False\n if len(current_chunk) == 1 or rng.random() < 0.5:\n is_random_next = True\n target_b_length = target_seq_length - len(tokens_a)\n\n # This should rarely go for more than one iteration for large\n # corpora. However, just to be careful, we try to make sure that\n # the random document is not the same as the document\n # we're processing.\n for _ in range(10):\n random_document_index = rng.randint(0, len(all_documents) - 1)\n if random_document_index != document_index:\n break\n\n random_document = all_documents[random_document_index]\n random_start = rng.randint(0, len(random_document) - 1)\n for j in range(random_start, len(random_document)):\n tokens_b.extend(random_document[j])\n if len(tokens_b) >= target_b_length:\n break\n # We didn't actually use these segments so we \"put them back\" so\n # they don't go to waste.\n num_unused_segments = len(current_chunk) - a_end\n i -= num_unused_segments\n # Actual next\n else:\n is_random_next = False\n for j in range(a_end, len(current_chunk)):\n tokens_b.extend(current_chunk[j])\n truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)\n\n assert len(tokens_a) >= 1\n assert len(tokens_b) >= 1\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n current_chunk = []\n current_length = 0\n i += 1\n\n return instances","function_tokens":["def","create_instances_from_document","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","document","=","all_documents","[","document_index","]","# Account for [CLS], [SEP], [SEP]","max_num_tokens","=","max_seq_length","-","3","# We *usually* want to fill up the entire sequence since we are padding","# to `max_seq_length` anyways, so short sequences are generally wasted","# computation. However, we *sometimes*","# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter","# sequences to minimize the mismatch between pre-training and fine-tuning.","# The `target_seq_length` is just a rough target however, whereas","# `max_seq_length` is a hard limit.","target_seq_length","=","max_num_tokens","if","rng",".","random","(",")","<","short_seq_prob",":","target_seq_length","=","rng",".","randint","(","2",",","max_num_tokens",")","# We DON'T just concatenate all of the tokens from a document into a long","# sequence and choose an arbitrary split point because this would make the","# next sentence prediction task too easy. Instead, we split the input into","# segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user","# input.","instances","=","[","]","current_chunk","=","[","]","current_length","=","0","i","=","0","while","i","<","len","(","document",")",":","segment","=","document","[","i","]","current_chunk",".","append","(","segment",")","current_length","+=","len","(","segment",")","if","i","==","len","(","document",")","-","1","or","current_length",">=","target_seq_length",":","if","current_chunk",":","# `a_end` is how many segments from `current_chunk` go into the `A`","# (first) sentence.","a_end","=","1","if","len","(","current_chunk",")",">=","2",":","a_end","=","rng",".","randint","(","1",",","len","(","current_chunk",")","-","1",")","tokens_a","=","[","]","for","j","in","range","(","a_end",")",":","tokens_a",".","extend","(","current_chunk","[","j","]",")","tokens_b","=","[","]","# Random next","is_random_next","=","False","if","len","(","current_chunk",")","==","1","or","rng",".","random","(",")","<","0.5",":","is_random_next","=","True","target_b_length","=","target_seq_length","-","len","(","tokens_a",")","# This should rarely go for more than one iteration for large","# corpora. However, just to be careful, we try to make sure that","# the random document is not the same as the document","# we're processing.","for","_","in","range","(","10",")",":","random_document_index","=","rng",".","randint","(","0",",","len","(","all_documents",")","-","1",")","if","random_document_index","!=","document_index",":","break","random_document","=","all_documents","[","random_document_index","]","random_start","=","rng",".","randint","(","0",",","len","(","random_document",")","-","1",")","for","j","in","range","(","random_start",",","len","(","random_document",")",")",":","tokens_b",".","extend","(","random_document","[","j","]",")","if","len","(","tokens_b",")",">=","target_b_length",":","break","# We didn't actually use these segments so we \"put them back\" so","# they don't go to waste.","num_unused_segments","=","len","(","current_chunk",")","-","a_end","i","-=","num_unused_segments","# Actual next","else",":","is_random_next","=","False","for","j","in","range","(","a_end",",","len","(","current_chunk",")",")",":","tokens_b",".","extend","(","current_chunk","[","j","]",")","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")","assert","len","(","tokens_a",")",">=","1","assert","len","(","tokens_b",")",">=","1","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","(","tokens",",","masked_lm_positions",",","masked_lm_labels",")","=","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")","instance","=","TrainingInstance","(","tokens","=","tokens",",","segment_ids","=","segment_ids",",","is_random_next","=","is_random_next",",","masked_lm_positions","=","masked_lm_positions",",","masked_lm_labels","=","masked_lm_labels",")","instances",".","append","(","instance",")","current_chunk","=","[","]","current_length","=","0","i","+=","1","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/create_pretraining_data.py#L223-L335"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/create_pretraining_data.py","language":"python","identifier":"create_masked_lm_predictions","parameters":"(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return (output_tokens, masked_lm_positions, masked_lm_labels)","docstring":"Creates the predictions for the masked LM objective.","docstring_summary":"Creates the predictions for the masked LM objective.","docstring_tokens":["Creates","the","predictions","for","the","masked","LM","objective","."],"function":"def create_masked_lm_predictions(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n continue\n # Whole Word Masking means that if we mask all of the wordpieces\n # corresponding to an original word. When a word has been split into\n # WordPieces, the first token does not have any marker and any subsequence\n # tokens are prefixed with ##. So whenever we see the ## token, we\n # append it to the previous set of word indexes.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each WordPiece independently, softmaxed\n # over the entire vocabulary.\n if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and\n token.startswith(\"##\")):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n rng.shuffle(cand_indexes)\n\n output_tokens = list(tokens)\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n assert len(masked_lms) <= num_to_predict\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)","function_tokens":["def","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","cand_indexes","=","[","]","for","(","i",",","token",")","in","enumerate","(","tokens",")",":","if","token","==","\"[CLS]\"","or","token","==","\"[SEP]\"",":","continue","# Whole Word Masking means that if we mask all of the wordpieces","# corresponding to an original word. When a word has been split into","# WordPieces, the first token does not have any marker and any subsequence","# tokens are prefixed with ##. So whenever we see the ## token, we","# append it to the previous set of word indexes.","#","# Note that Whole Word Masking does *not* change the training code","# at all -- we still predict each WordPiece independently, softmaxed","# over the entire vocabulary.","if","(","FLAGS",".","do_whole_word_mask","and","len","(","cand_indexes",")",">=","1","and","token",".","startswith","(","\"##\"",")",")",":","cand_indexes","[","-","1","]",".","append","(","i",")","else",":","cand_indexes",".","append","(","[","i","]",")","rng",".","shuffle","(","cand_indexes",")","output_tokens","=","list","(","tokens",")","num_to_predict","=","min","(","max_predictions_per_seq",",","max","(","1",",","int","(","round","(","len","(","tokens",")","*","masked_lm_prob",")",")",")",")","masked_lms","=","[","]","covered_indexes","=","set","(",")","for","index_set","in","cand_indexes",":","if","len","(","masked_lms",")",">=","num_to_predict",":","break","# If adding a whole-word mask would exceed the maximum number of","# predictions, then just skip this candidate.","if","len","(","masked_lms",")","+","len","(","index_set",")",">","num_to_predict",":","continue","is_any_index_covered","=","False","for","index","in","index_set",":","if","index","in","covered_indexes",":","is_any_index_covered","=","True","break","if","is_any_index_covered",":","continue","for","index","in","index_set",":","covered_indexes",".","add","(","index",")","masked_token","=","None","# 80% of the time, replace with [MASK]","if","rng",".","random","(",")","<","0.8",":","masked_token","=","\"[MASK]\"","else",":","# 10% of the time, keep original","if","rng",".","random","(",")","<","0.5",":","masked_token","=","tokens","[","index","]","# 10% of the time, replace with random word","else",":","masked_token","=","vocab_words","[","rng",".","randint","(","0",",","len","(","vocab_words",")","-","1",")","]","output_tokens","[","index","]","=","masked_token","masked_lms",".","append","(","MaskedLmInstance","(","index","=","index",",","label","=","tokens","[","index","]",")",")","assert","len","(","masked_lms",")","<=","num_to_predict","masked_lms","=","sorted","(","masked_lms",",","key","=","lambda","x",":","x",".","index",")","masked_lm_positions","=","[","]","masked_lm_labels","=","[","]","for","p","in","masked_lms",":","masked_lm_positions",".","append","(","p",".","index",")","masked_lm_labels",".","append","(","p",".","label",")","return","(","output_tokens",",","masked_lm_positions",",","masked_lm_labels",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/create_pretraining_data.py#L342-L415"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/create_pretraining_data.py","language":"python","identifier":"truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_num_tokens, rng)","argument_list":"","return_statement":"","docstring":"Truncates a pair of sequences to a maximum sequence length.","docstring_summary":"Truncates a pair of sequences to a maximum sequence length.","docstring_tokens":["Truncates","a","pair","of","sequences","to","a","maximum","sequence","length","."],"function":"def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):\n \"\"\"Truncates a pair of sequences to a maximum sequence length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()","function_tokens":["def","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")",":","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_num_tokens",":","break","trunc_tokens","=","tokens_a","if","len","(","tokens_a",")",">","len","(","tokens_b",")","else","tokens_b","assert","len","(","trunc_tokens",")",">=","1","# We want to sometimes truncate from the front and sometimes from the","# back to add more randomness and avoid biases.","if","rng",".","random","(",")","<","0.5",":","del","trunc_tokens","[","0","]","else",":","trunc_tokens",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/create_pretraining_data.py#L418-L433"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_ner.py","language":"python","identifier":"InputExample.__init__","parameters":"(self, guid, text, label=None)","argument_list":"","return_statement":"","docstring":"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.","docstring_summary":"Constructs a InputExample.","docstring_tokens":["Constructs","a","InputExample","."],"function":"def __init__(self, guid, text, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text = text\n self.label = label","function_tokens":["def","__init__","(","self",",","guid",",","text",",","label","=","None",")",":","self",".","guid","=","guid","self",".","text","=","text","self",".","label","=","label"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_ner.py#L129-L141"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_ner.py","language":"python","identifier":"DataProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the train set.","docstring_summary":"Gets a collection of `InputExample`s for the train set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","train","set","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_ner.py#L158-L160"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_ner.py","language":"python","identifier":"DataProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the dev set.","docstring_summary":"Gets a collection of `InputExample`s for the dev set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","dev","set","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_ner.py#L162-L164"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_ner.py","language":"python","identifier":"DataProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Gets the list of labels for this data set.","docstring_summary":"Gets the list of labels for this data set.","docstring_tokens":["Gets","the","list","of","labels","for","this","data","set","."],"function":"def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_labels","(","self",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_ner.py#L166-L168"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_ner.py","language":"python","identifier":"DataProcessor._read_data","parameters":"(cls, input_file)","argument_list":"","return_statement":"","docstring":"Reads a BIO data.","docstring_summary":"Reads a BIO data.","docstring_tokens":["Reads","a","BIO","data","."],"function":"def _read_data(cls, input_file):\n \"\"\"Reads a BIO data.\"\"\"\n with open(input_file) as f:\n lines = []\n words = []\n labels = []\n for line in f:\n contends = line.strip()\n word = line.strip().split(' ')[0]\n label = line.strip().split(' ')[-1]\n if contends.startswith(\"-DOCSTART-\"):\n words.append('')\n continue\n if len(contends) == 0 and words[-1] == '.':\n l = ' '.join([label for label in labels if len(label) > 0])\n w = ' '.join([word for word in words if len(word) > 0])\n lines.append([l, w])\n words = []\n labels = []\n continue\n if len(contends) == 0:\n continue\n words.append(word)\n labels.append(label)\n return lines","function_tokens":["def","_read_data","(","cls",",","input_file",")",":","with","open","(","input_file",")","as","f",":","lines","=","[","]","words","=","[","]","labels","=","[","]","for","line","in","f",":","contends","=","line",".","strip","(",")","word","=","line",".","strip","(",")",".","split","(","' '",")","[","0","]","label","=","line",".","strip","(",")",".","split","(","' '",")","[","-","1","]","if","contends",".","startswith","(","\"-DOCSTART-\"",")",":","words",".","append","(","''",")","continue","if","len","(","contends",")","==","0","and","words","[","-","1","]","==","'.'",":","l","=","' '",".","join","(","[","label","for","label","in","labels","if","len","(","label",")",">","0","]",")","w","=","' '",".","join","(","[","word","for","word","in","words","if","len","(","word",")",">","0","]",")","lines",".","append","(","[","l",",","w","]",")","words","=","[","]","labels","=","[","]","continue","if","len","(","contends",")","==","0",":","continue","words",".","append","(","word",")","labels",".","append","(","label",")","return","lines"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_ner.py#L171-L195"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/extract_features.py","language":"python","identifier":"input_fn_builder","parameters":"(features, seq_length)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(features, seq_length):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_unique_ids = []\n all_input_ids = []\n all_input_mask = []\n all_input_type_ids = []\n\n for feature in features:\n all_unique_ids.append(feature.unique_id)\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_input_type_ids.append(feature.input_type_ids)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"unique_ids\":\n tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_type_ids\":\n tf.constant(\n all_input_type_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n })\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","features",",","seq_length",")",":","all_unique_ids","=","[","]","all_input_ids","=","[","]","all_input_mask","=","[","]","all_input_type_ids","=","[","]","for","feature","in","features",":","all_unique_ids",".","append","(","feature",".","unique_id",")","all_input_ids",".","append","(","feature",".","input_ids",")","all_input_mask",".","append","(","feature",".","input_mask",")","all_input_type_ids",".","append","(","feature",".","input_type_ids",")","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","num_examples","=","len","(","features",")","# This is for demo purposes and does NOT scale to large data sets. We do","# not use Dataset.from_generator() because that uses tf.py_func which is","# not TPU compatible. The right way to load data is with TFRecordReader.","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","{","\"unique_ids\"",":","tf",".","constant","(","all_unique_ids",",","shape","=","[","num_examples","]",",","dtype","=","tf",".","int32",")",",","\"input_ids\"",":","tf",".","constant","(","all_input_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_mask\"",":","tf",".","constant","(","all_input_mask",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_type_ids\"",":","tf",".","constant","(","all_input_type_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","}",")","d","=","d",".","batch","(","batch_size","=","batch_size",",","drop_remainder","=","False",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/extract_features.py#L100-L145"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/extract_features.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, layer_indexes, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n input_type_ids = features[\"input_type_ids\"]\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=False,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=input_type_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n raise ValueError(\"Only PREDICT modes are supported: %s\" % (mode))\n\n tvars = tf.trainable_variables()\n scaffold_fn = None\n (assignment_map,\n initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n all_layers = model.get_all_encoder_layers()\n\n predictions = {\n \"unique_id\": unique_ids,\n }\n\n for (i, layer_index) in enumerate(layer_indexes):\n predictions[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","layer_indexes",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","unique_ids","=","features","[","\"unique_ids\"","]","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","input_type_ids","=","features","[","\"input_type_ids\"","]","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","False",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","input_type_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","if","mode","!=","tf",".","estimator",".","ModeKeys",".","PREDICT",":","raise","ValueError","(","\"Only PREDICT modes are supported: %s\"","%","(","mode",")",")","tvars","=","tf",".","trainable_variables","(",")","scaffold_fn","=","None","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","all_layers","=","model",".","get_all_encoder_layers","(",")","predictions","=","{","\"unique_id\"",":","unique_ids",",","}","for","(","i",",","layer_index",")","in","enumerate","(","layer_indexes",")",":","predictions","[","\"layer_output_%d\"","%","i","]","=","all_layers","[","layer_index","]","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","predictions",",","scaffold_fn","=","scaffold_fn",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/extract_features.py#L148-L207"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/extract_features.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, seq_length, tokenizer)","argument_list":"","return_statement":"return features","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > seq_length - 2:\n tokens_a = tokens_a[0:(seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n input_type_ids = []\n tokens.append(\"[CLS]\")\n input_type_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n input_type_ids.append(0)\n tokens.append(\"[SEP]\")\n input_type_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n input_type_ids.append(1)\n tokens.append(\"[SEP]\")\n input_type_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < seq_length:\n input_ids.append(0)\n input_mask.append(0)\n input_type_ids.append(0)\n\n assert len(input_ids) == seq_length\n assert len(input_mask) == seq_length\n assert len(input_type_ids) == seq_length\n\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (example.unique_id))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"input_type_ids: %s\" % \" \".join([str(x) for x in input_type_ids]))\n\n features.append(\n InputFeatures(\n unique_id=example.unique_id,\n tokens=tokens,\n input_ids=input_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids))\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","seq_length",",","tokenizer",")",":","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","if","tokens_b",":","# Modifies `tokens_a` and `tokens_b` in place so that the total","# length is less than the specified length.","# Account for [CLS], [SEP], [SEP] with \"- 3\"","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","seq_length","-","3",")","else",":","# Account for [CLS] and [SEP] with \"- 2\"","if","len","(","tokens_a",")",">","seq_length","-","2",":","tokens_a","=","tokens_a","[","0",":","(","seq_length","-","2",")","]","# The convention in BERT is:","# (a) For sequence pairs:","# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]","# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1","# (b) For single sequences:","# tokens: [CLS] the dog is hairy . [SEP]","# type_ids: 0 0 0 0 0 0 0","#","# Where \"type_ids\" are used to indicate whether this is the first","# sequence or the second sequence. The embedding vectors for `type=0` and","# `type=1` were learned during pre-training and are added to the wordpiece","# embedding vector (and position vector). This is not *strictly* necessary","# since the [SEP] token unambiguously separates the sequences, but it makes","# it easier for the model to learn the concept of sequences.","#","# For classification tasks, the first vector (corresponding to [CLS]) is","# used as as the \"sentence vector\". Note that this only makes sense because","# the entire model is fine-tuned.","tokens","=","[","]","input_type_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","input_type_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","input_type_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","input_type_ids",".","append","(","0",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","input_type_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","input_type_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","input_type_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","seq_length","assert","len","(","input_mask",")","==","seq_length","assert","len","(","input_type_ids",")","==","seq_length","if","ex_index","<","5",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"unique_id: %s\"","%","(","example",".","unique_id",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"input_type_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_type_ids","]",")",")","features",".","append","(","InputFeatures","(","unique_id","=","example",".","unique_id",",","tokens","=","tokens",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","input_type_ids","=","input_type_ids",")",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/extract_features.py#L210-L299"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/extract_features.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/extract_features.py#L302-L316"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/extract_features.py","language":"python","identifier":"read_examples","parameters":"(input_file)","argument_list":"","return_statement":"return examples","docstring":"Read a list of `InputExample`s from an input file.","docstring_summary":"Read a list of `InputExample`s from an input file.","docstring_tokens":["Read","a","list","of","InputExample","s","from","an","input","file","."],"function":"def read_examples(input_file):\n \"\"\"Read a list of `InputExample`s from an input file.\"\"\"\n examples = []\n unique_id = 0\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n examples.append(\n InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))\n unique_id += 1\n return examples","function_tokens":["def","read_examples","(","input_file",")",":","examples","=","[","]","unique_id","=","0","with","tf",".","gfile",".","GFile","(","input_file",",","\"r\"",")","as","reader",":","while","True",":","line","=","tokenization",".","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","line",":","break","line","=","line",".","strip","(",")","text_a","=","None","text_b","=","None","m","=","re",".","match","(","r\"^(.*) \\|\\|\\| (.*)$\"",",","line",")","if","m","is","None",":","text_a","=","line","else",":","text_a","=","m",".","group","(","1",")","text_b","=","m",".","group","(","2",")","examples",".","append","(","InputExample","(","unique_id","=","unique_id",",","text_a","=","text_a",",","text_b","=","text_b",")",")","unique_id","+=","1","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/extract_features.py#L319-L340"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_classifier_with_tfhub.py","language":"python","identifier":"create_model","parameters":"(is_training, input_ids, input_mask, segment_ids, labels,\n num_labels, bert_hub_module_handle)","argument_list":"","return_statement":"","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(is_training, input_ids, input_mask, segment_ids, labels,\n num_labels, bert_hub_module_handle):\n \"\"\"Creates a classification model.\"\"\"\n tags = set()\n if is_training:\n tags.add(\"train\")\n bert_module = hub.Module(bert_hub_module_handle, tags=tags, trainable=True)\n bert_inputs = dict(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids)\n bert_outputs = bert_module(\n inputs=bert_inputs,\n signature=\"tokens\",\n as_dict=True)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use\n # bert_outputs[\"sequence_output\"] instead.\n output_layer = bert_outputs[\"pooled_output\"]\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)","function_tokens":["def","create_model","(","is_training",",","input_ids",",","input_mask",",","segment_ids",",","labels",",","num_labels",",","bert_hub_module_handle",")",":","tags","=","set","(",")","if","is_training",":","tags",".","add","(","\"train\"",")","bert_module","=","hub",".","Module","(","bert_hub_module_handle",",","tags","=","tags",",","trainable","=","True",")","bert_inputs","=","dict","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",")","bert_outputs","=","bert_module","(","inputs","=","bert_inputs",",","signature","=","\"tokens\"",",","as_dict","=","True",")","# In the demo, we are doing a simple classification task on the entire","# segment.","#","# If you want to use the token-level output, use","# bert_outputs[\"sequence_output\"] instead.","output_layer","=","bert_outputs","[","\"pooled_output\"","]","hidden_size","=","output_layer",".","shape","[","-","1","]",".","value","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","[","num_labels",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","[","num_labels","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","with","tf",".","variable_scope","(","\"loss\"",")",":","if","is_training",":","# I.e., 0.1 dropout","output_layer","=","tf",".","nn",".","dropout","(","output_layer",",","keep_prob","=","0.9",")","logits","=","tf",".","matmul","(","output_layer",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","probabilities","=","tf",".","nn",".","softmax","(","logits",",","axis","=","-","1",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","num_labels",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","logits",",","probabilities",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_classifier_with_tfhub.py#L37-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_classifier_with_tfhub.py","language":"python","identifier":"model_fn_builder","parameters":"(num_labels, learning_rate, num_train_steps,\n num_warmup_steps, use_tpu, bert_hub_module_handle)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(num_labels, learning_rate, num_train_steps,\n num_warmup_steps, use_tpu, bert_hub_module_handle):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n is_training, input_ids, input_mask, segment_ids, label_ids, num_labels,\n bert_hub_module_handle)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions={\"probabilities\": probabilities})\n else:\n raise ValueError(\n \"Only TRAIN, EVAL and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","num_labels",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","bert_hub_module_handle",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","label_ids","=","features","[","\"label_ids\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","total_loss",",","per_example_loss",",","logits",",","probabilities",")","=","create_model","(","is_training",",","input_ids",",","input_mask",",","segment_ids",",","label_ids",",","num_labels",",","bert_hub_module_handle",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","per_example_loss",",","label_ids",",","logits",")",":","predictions","=","tf",".","argmax","(","logits",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","accuracy","=","tf",".","metrics",".","accuracy","(","label_ids",",","predictions",")","loss","=","tf",".","metrics",".","mean","(","per_example_loss",")","return","{","\"eval_accuracy\"",":","accuracy",",","\"eval_loss\"",":","loss",",","}","eval_metrics","=","(","metric_fn",",","[","per_example_loss",",","label_ids",",","logits","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","PREDICT",":","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","{","\"probabilities\"",":","probabilities","}",")","else",":","raise","ValueError","(","\"Only TRAIN, EVAL and PREDICT modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_classifier_with_tfhub.py#L87-L143"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/bert\/run_classifier_with_tfhub.py","language":"python","identifier":"create_tokenizer_from_hub_module","parameters":"(bert_hub_module_handle)","argument_list":"","return_statement":"return tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)","docstring":"Get the vocab file and casing info from the Hub module.","docstring_summary":"Get the vocab file and casing info from the Hub module.","docstring_tokens":["Get","the","vocab","file","and","casing","info","from","the","Hub","module","."],"function":"def create_tokenizer_from_hub_module(bert_hub_module_handle):\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n bert_module = hub.Module(bert_hub_module_handle)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n return tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)","function_tokens":["def","create_tokenizer_from_hub_module","(","bert_hub_module_handle",")",":","with","tf",".","Graph","(",")",".","as_default","(",")",":","bert_module","=","hub",".","Module","(","bert_hub_module_handle",")","tokenization_info","=","bert_module","(","signature","=","\"tokenization_info\"",",","as_dict","=","True",")","with","tf",".","Session","(",")","as","sess",":","vocab_file",",","do_lower_case","=","sess",".","run","(","[","tokenization_info","[","\"vocab_file\"","]",",","tokenization_info","[","\"do_lower_case\"","]","]",")","return","tokenization",".","FullTokenizer","(","vocab_file","=","vocab_file",",","do_lower_case","=","do_lower_case",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/bert\/run_classifier_with_tfhub.py#L146-L155"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_pretraining.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n next_sentence_labels = features[\"next_sentence_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), model.get_embedding_table(),model.get_embedding_table_2(),\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n (next_sentence_loss, next_sentence_example_loss,\n next_sentence_log_probs) = get_next_sentence_output(\n bert_config, model.get_pooled_output(), next_sentence_labels)\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n print(\"init_checkpoint:\",init_checkpoint)\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,[-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n next_sentence_log_probs = tf.reshape(\n next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])\n next_sentence_predictions = tf.argmax(\n next_sentence_log_probs, axis=-1, output_type=tf.int32)\n next_sentence_labels = tf.reshape(next_sentence_labels, [-1])\n next_sentence_accuracy = tf.metrics.accuracy(\n labels=next_sentence_labels, predictions=next_sentence_predictions)\n next_sentence_mean_loss = tf.metrics.mean(\n values=next_sentence_example_loss)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n \"next_sentence_accuracy\": next_sentence_accuracy,\n \"next_sentence_loss\": next_sentence_mean_loss,\n }\n\n # next_sentence_example_loss=0.0 TODO\n # next_sentence_log_probs=0.0 # TODO\n eval_metrics = (metric_fn, [\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels\n ])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\"Only TRAIN and EVAL modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","masked_lm_positions","=","features","[","\"masked_lm_positions\"","]","masked_lm_ids","=","features","[","\"masked_lm_ids\"","]","masked_lm_weights","=","features","[","\"masked_lm_weights\"","]","next_sentence_labels","=","features","[","\"next_sentence_labels\"","]","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","(","masked_lm_loss",",","masked_lm_example_loss",",","masked_lm_log_probs",")","=","get_masked_lm_output","(","bert_config",",","model",".","get_sequence_output","(",")",",","model",".","get_embedding_table","(",")",",","model",".","get_embedding_table_2","(",")",",","masked_lm_positions",",","masked_lm_ids",",","masked_lm_weights",")","(","next_sentence_loss",",","next_sentence_example_loss",",","next_sentence_log_probs",")","=","get_next_sentence_output","(","bert_config",",","model",".","get_pooled_output","(",")",",","next_sentence_labels",")","total_loss","=","masked_lm_loss","+","next_sentence_loss","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","print","(","\"init_checkpoint:\"",",","init_checkpoint",")","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels",")",":","\"\"\"Computes the loss and accuracy of the model.\"\"\"","masked_lm_log_probs","=","tf",".","reshape","(","masked_lm_log_probs",",","[","-","1",",","masked_lm_log_probs",".","shape","[","-","1","]","]",")","masked_lm_predictions","=","tf",".","argmax","(","masked_lm_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","masked_lm_example_loss","=","tf",".","reshape","(","masked_lm_example_loss",",","[","-","1","]",")","masked_lm_ids","=","tf",".","reshape","(","masked_lm_ids",",","[","-","1","]",")","masked_lm_weights","=","tf",".","reshape","(","masked_lm_weights",",","[","-","1","]",")","masked_lm_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","masked_lm_ids",",","predictions","=","masked_lm_predictions",",","weights","=","masked_lm_weights",")","masked_lm_mean_loss","=","tf",".","metrics",".","mean","(","values","=","masked_lm_example_loss",",","weights","=","masked_lm_weights",")","next_sentence_log_probs","=","tf",".","reshape","(","next_sentence_log_probs",",","[","-","1",",","next_sentence_log_probs",".","shape","[","-","1","]","]",")","next_sentence_predictions","=","tf",".","argmax","(","next_sentence_log_probs",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","next_sentence_labels","=","tf",".","reshape","(","next_sentence_labels",",","[","-","1","]",")","next_sentence_accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","next_sentence_labels",",","predictions","=","next_sentence_predictions",")","next_sentence_mean_loss","=","tf",".","metrics",".","mean","(","values","=","next_sentence_example_loss",")","return","{","\"masked_lm_accuracy\"",":","masked_lm_accuracy",",","\"masked_lm_loss\"",":","masked_lm_mean_loss",",","\"next_sentence_accuracy\"",":","next_sentence_accuracy",",","\"next_sentence_loss\"",":","next_sentence_mean_loss",",","}","# next_sentence_example_loss=0.0 TODO","# next_sentence_log_probs=0.0 # TODO","eval_metrics","=","(","metric_fn",",","[","masked_lm_example_loss",",","masked_lm_log_probs",",","masked_lm_ids",",","masked_lm_weights",",","next_sentence_example_loss",",","next_sentence_log_probs",",","next_sentence_labels","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","raise","ValueError","(","\"Only TRAIN and EVAL modes are supported: %s\"","%","(","mode",")",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_pretraining.py#L109-L238"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_pretraining.py","language":"python","identifier":"get_masked_lm_output","parameters":"(bert_config, input_tensor, output_weights,project_weights, positions,\n label_ids, label_weights)","argument_list":"","return_statement":"return (loss, per_example_loss, log_probs)","docstring":"Get loss and log probs for the masked LM.","docstring_summary":"Get loss and log probs for the masked LM.","docstring_tokens":["Get","loss","and","log","probs","for","the","masked","LM","."],"function":"def get_masked_lm_output(bert_config, input_tensor, output_weights,project_weights, positions,\n label_ids, label_weights):\n \"\"\"Get loss and log probs for the masked LM.\"\"\"\n input_tensor = gather_indexes(input_tensor, positions)\n\n with tf.variable_scope(\"cls\/predictions\"):\n # We apply one more non-linear transformation before the output layer.\n # This matrix is not used after pre-training.\n with tf.variable_scope(\"transform\"):\n input_tensor = tf.layers.dense(\n input_tensor,\n units=bert_config.hidden_size,\n activation=modeling.get_activation(bert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n bert_config.initializer_range))\n input_tensor = modeling.layer_norm(input_tensor)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n output_bias = tf.get_variable(\n \"output_bias\",\n shape=[bert_config.vocab_size],\n initializer=tf.zeros_initializer())\n # logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n # input_tensor=[-1,hidden_size], project_weights=[embedding_size, hidden_size], project_weights_transpose=[hidden_size, embedding_size]--->[-1, embedding_size]\n input_project = tf.matmul(input_tensor, project_weights, transpose_b=True)\n logits = tf.matmul(input_project, output_weights, transpose_b=True)\n # # input_project=[-1, embedding_size], output_weights=[vocab_size, embedding_size], output_weights_transpose=[embedding_size, vocab_size] ---> [-1, vocab_size]\n\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n label_ids = tf.reshape(label_ids, [-1])\n label_weights = tf.reshape(label_weights, [-1])\n\n one_hot_labels = tf.one_hot(label_ids, depth=bert_config.vocab_size, dtype=tf.float32)\n\n # The `positions` tensor might be zero-padded (if the sequence is too\n # short to have the maximum number of predictions). The `label_weights`\n # tensor has a value of 1.0 for every real prediction and 0.0 for the\n # padding predictions.\n per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])\n numerator = tf.reduce_sum(label_weights * per_example_loss)\n denominator = tf.reduce_sum(label_weights) + 1e-5\n loss = numerator \/ denominator\n\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_masked_lm_output","(","bert_config",",","input_tensor",",","output_weights",",","project_weights",",","positions",",","label_ids",",","label_weights",")",":","input_tensor","=","gather_indexes","(","input_tensor",",","positions",")","with","tf",".","variable_scope","(","\"cls\/predictions\"",")",":","# We apply one more non-linear transformation before the output layer.","# This matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"transform\"",")",":","input_tensor","=","tf",".","layers",".","dense","(","input_tensor",",","units","=","bert_config",".","hidden_size",",","activation","=","modeling",".","get_activation","(","bert_config",".","hidden_act",")",",","kernel_initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","input_tensor","=","modeling",".","layer_norm","(","input_tensor",")","# The output weights are the same as the input embeddings, but there is","# an output-only bias for each token.","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","bert_config",".","vocab_size","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","# logits = tf.matmul(input_tensor, output_weights, transpose_b=True)","# input_tensor=[-1,hidden_size], project_weights=[embedding_size, hidden_size], project_weights_transpose=[hidden_size, embedding_size]--->[-1, embedding_size]","input_project","=","tf",".","matmul","(","input_tensor",",","project_weights",",","transpose_b","=","True",")","logits","=","tf",".","matmul","(","input_project",",","output_weights",",","transpose_b","=","True",")","# # input_project=[-1, embedding_size], output_weights=[vocab_size, embedding_size], output_weights_transpose=[embedding_size, vocab_size] ---> [-1, vocab_size]","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","label_ids","=","tf",".","reshape","(","label_ids",",","[","-","1","]",")","label_weights","=","tf",".","reshape","(","label_weights",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","label_ids",",","depth","=","bert_config",".","vocab_size",",","dtype","=","tf",".","float32",")","# The `positions` tensor might be zero-padded (if the sequence is too","# short to have the maximum number of predictions). The `label_weights`","# tensor has a value of 1.0 for every real prediction and 0.0 for the","# padding predictions.","per_example_loss","=","-","tf",".","reduce_sum","(","log_probs","*","one_hot_labels",",","axis","=","[","-","1","]",")","numerator","=","tf",".","reduce_sum","(","label_weights","*","per_example_loss",")","denominator","=","tf",".","reduce_sum","(","label_weights",")","+","1e-5","loss","=","numerator","\/","denominator","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_pretraining.py#L241-L287"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_pretraining.py","language":"python","identifier":"get_next_sentence_output","parameters":"(bert_config, input_tensor, labels)","argument_list":"","return_statement":"","docstring":"Get loss and log probs for the next sentence prediction.","docstring_summary":"Get loss and log probs for the next sentence prediction.","docstring_tokens":["Get","loss","and","log","probs","for","the","next","sentence","prediction","."],"function":"def get_next_sentence_output(bert_config, input_tensor, labels):\n \"\"\"Get loss and log probs for the next sentence prediction.\"\"\"\n\n # Simple binary classification. Note that 0 is \"next sentence\" and 1 is\n # \"random sentence\". This weight matrix is not used after pre-training.\n with tf.variable_scope(\"cls\/seq_relationship\"):\n output_weights = tf.get_variable(\n \"output_weights\",\n shape=[2, bert_config.hidden_size],\n initializer=modeling.create_initializer(bert_config.initializer_range))\n output_bias = tf.get_variable(\n \"output_bias\", shape=[2], initializer=tf.zeros_initializer())\n\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n labels = tf.reshape(labels, [-1])\n one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, per_example_loss, log_probs)","function_tokens":["def","get_next_sentence_output","(","bert_config",",","input_tensor",",","labels",")",":","# Simple binary classification. Note that 0 is \"next sentence\" and 1 is","# \"random sentence\". This weight matrix is not used after pre-training.","with","tf",".","variable_scope","(","\"cls\/seq_relationship\"",")",":","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","shape","=","[","2",",","bert_config",".","hidden_size","]",",","initializer","=","modeling",".","create_initializer","(","bert_config",".","initializer_range",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","shape","=","[","2","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","logits","=","tf",".","matmul","(","input_tensor",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","labels","=","tf",".","reshape","(","labels",",","[","-","1","]",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","2",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","log_probs",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_pretraining.py#L290-L310"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_pretraining.py","language":"python","identifier":"gather_indexes","parameters":"(sequence_tensor, positions)","argument_list":"","return_statement":"return output_tensor","docstring":"Gathers the vectors at the specific positions over a minibatch.","docstring_summary":"Gathers the vectors at the specific positions over a minibatch.","docstring_tokens":["Gathers","the","vectors","at","the","specific","positions","over","a","minibatch","."],"function":"def gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor","function_tokens":["def","gather_indexes","(","sequence_tensor",",","positions",")",":","sequence_shape","=","modeling",".","get_shape_list","(","sequence_tensor",",","expected_rank","=","3",")","batch_size","=","sequence_shape","[","0","]","seq_length","=","sequence_shape","[","1","]","width","=","sequence_shape","[","2","]","flat_offsets","=","tf",".","reshape","(","tf",".","range","(","0",",","batch_size",",","dtype","=","tf",".","int32",")","*","seq_length",",","[","-","1",",","1","]",")","flat_positions","=","tf",".","reshape","(","positions","+","flat_offsets",",","[","-","1","]",")","flat_sequence_tensor","=","tf",".","reshape","(","sequence_tensor",",","[","batch_size","*","seq_length",",","width","]",")","output_tensor","=","tf",".","gather","(","flat_sequence_tensor",",","flat_positions",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_pretraining.py#L313-L326"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_pretraining.py","language":"python","identifier":"input_fn_builder","parameters":"(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n name_to_features = {\n \"input_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"input_mask\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"segment_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"masked_lm_positions\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_ids\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_weights\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.float32),\n \"next_sentence_labels\":\n tf.FixedLenFeature([1], tf.int64),\n }\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n if is_training:\n d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))\n d = d.repeat()\n d = d.shuffle(buffer_size=len(input_files))\n\n # `cycle_length` is the number of parallel files that get read.\n cycle_length = min(num_cpu_threads, len(input_files))\n\n # `sloppy` mode means that the interleaving is not exact. This adds\n # even more randomness to the training pipeline.\n d = d.apply(\n tf.contrib.data.parallel_interleave(\n tf.data.TFRecordDataset,\n sloppy=is_training,\n cycle_length=cycle_length))\n d = d.shuffle(buffer_size=100)\n else:\n d = tf.data.TFRecordDataset(input_files)\n # Since we evaluate for a fixed number of steps we don't want to encounter\n # out-of-range exceptions.\n d = d.repeat()\n\n # We must `drop_remainder` on training because the TPU requires fixed\n # size dimensions. For eval, we assume we are evaluating on the CPU or GPU\n # and we *don't* want to drop the remainder, otherwise we wont cover\n # every sample.\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n num_parallel_batches=num_cpu_threads,\n drop_remainder=True))\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","input_files",",","max_seq_length",",","max_predictions_per_seq",",","is_training",",","num_cpu_threads","=","4",")",":","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","max_seq_length","]",",","tf",".","int64",")",",","\"masked_lm_positions\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_ids\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","int64",")",",","\"masked_lm_weights\"",":","tf",".","FixedLenFeature","(","[","max_predictions_per_seq","]",",","tf",".","float32",")",",","\"next_sentence_labels\"",":","tf",".","FixedLenFeature","(","[","1","]",",","tf",".","int64",")",",","}","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","if","is_training",":","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","tf",".","constant","(","input_files",")",")","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","len","(","input_files",")",")","# `cycle_length` is the number of parallel files that get read.","cycle_length","=","min","(","num_cpu_threads",",","len","(","input_files",")",")","# `sloppy` mode means that the interleaving is not exact. This adds","# even more randomness to the training pipeline.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","parallel_interleave","(","tf",".","data",".","TFRecordDataset",",","sloppy","=","is_training",",","cycle_length","=","cycle_length",")",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","else",":","d","=","tf",".","data",".","TFRecordDataset","(","input_files",")","# Since we evaluate for a fixed number of steps we don't want to encounter","# out-of-range exceptions.","d","=","d",".","repeat","(",")","# We must `drop_remainder` on training because the TPU requires fixed","# size dimensions. For eval, we assume we are evaluating on the CPU or GPU","# and we *don't* want to drop the remainder, otherwise we wont cover","# every sample.","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","num_parallel_batches","=","num_cpu_threads",",","drop_remainder","=","True",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_pretraining.py#L329-L393"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_pretraining.py","language":"python","identifier":"_decode_record","parameters":"(record, name_to_features)","argument_list":"","return_statement":"return example","docstring":"Decodes a record to a TensorFlow example.","docstring_summary":"Decodes a record to a TensorFlow example.","docstring_tokens":["Decodes","a","record","to","a","TensorFlow","example","."],"function":"def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example","function_tokens":["def","_decode_record","(","record",",","name_to_features",")",":","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_pretraining.py#L396-L408"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"validate_case_matches_checkpoint","parameters":"(do_lower_case, init_checkpoint)","argument_list":"","return_statement":"","docstring":"Checks whether the casing config is consistent with the checkpoint name.","docstring_summary":"Checks whether the casing config is consistent with the checkpoint name.","docstring_tokens":["Checks","whether","the","casing","config","is","consistent","with","the","checkpoint","name","."],"function":"def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):\n \"\"\"Checks whether the casing config is consistent with the checkpoint name.\"\"\"\n\n # The casing has to be passed in by the user and there is no explicit check\n # as to whether it matches the checkpoint. The casing information probably\n # should have been stored in the bert_config.json file, but it's not, so\n # we have to heuristically detect it to validate.\n\n if not init_checkpoint:\n return\n\n m = re.match(\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\", init_checkpoint)\n if m is None:\n return\n\n model_name = m.group(1)\n\n lower_models = [\n \"uncased_L-24_H-1024_A-16\", \"uncased_L-12_H-768_A-12\",\n \"multilingual_L-12_H-768_A-12\", \"chinese_L-12_H-768_A-12\"\n ]\n\n cased_models = [\n \"cased_L-12_H-768_A-12\", \"cased_L-24_H-1024_A-16\",\n \"multi_cased_L-12_H-768_A-12\"\n ]\n\n is_bad_config = False\n if model_name in lower_models and not do_lower_case:\n is_bad_config = True\n actual_flag = \"False\"\n case_name = \"lowercased\"\n opposite_flag = \"True\"\n\n if model_name in cased_models and do_lower_case:\n is_bad_config = True\n actual_flag = \"True\"\n case_name = \"cased\"\n opposite_flag = \"False\"\n\n if is_bad_config:\n raise ValueError(\n \"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"\n \"However, `%s` seems to be a %s model, so you \"\n \"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"\n \"how the model was pre-training. If this error is wrong, please \"\n \"just comment out this check.\" % (actual_flag, init_checkpoint,\n model_name, case_name, opposite_flag))","function_tokens":["def","validate_case_matches_checkpoint","(","do_lower_case",",","init_checkpoint",")",":","# The casing has to be passed in by the user and there is no explicit check","# as to whether it matches the checkpoint. The casing information probably","# should have been stored in the bert_config.json file, but it's not, so","# we have to heuristically detect it to validate.","if","not","init_checkpoint",":","return","m","=","re",".","match","(","\"^.*?([A-Za-z0-9_-]+)\/bert_model.ckpt\"",",","init_checkpoint",")","if","m","is","None",":","return","model_name","=","m",".","group","(","1",")","lower_models","=","[","\"uncased_L-24_H-1024_A-16\"",",","\"uncased_L-12_H-768_A-12\"",",","\"multilingual_L-12_H-768_A-12\"",",","\"chinese_L-12_H-768_A-12\"","]","cased_models","=","[","\"cased_L-12_H-768_A-12\"",",","\"cased_L-24_H-1024_A-16\"",",","\"multi_cased_L-12_H-768_A-12\"","]","is_bad_config","=","False","if","model_name","in","lower_models","and","not","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"False\"","case_name","=","\"lowercased\"","opposite_flag","=","\"True\"","if","model_name","in","cased_models","and","do_lower_case",":","is_bad_config","=","True","actual_flag","=","\"True\"","case_name","=","\"cased\"","opposite_flag","=","\"False\"","if","is_bad_config",":","raise","ValueError","(","\"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"","\"However, `%s` seems to be a %s model, so you \"","\"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"","\"how the model was pre-training. If this error is wrong, please \"","\"just comment out this check.\"","%","(","actual_flag",",","init_checkpoint",",","model_name",",","case_name",",","opposite_flag",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L28-L75"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"convert_to_unicode","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_summary":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_tokens":["Converts","text","to","Unicode","(","if","it","s","not","already",")","assuming","utf","-","8","input","."],"function":"def convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","convert_to_unicode","(","text",")",":","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","elif","isinstance","(","text",",","unicode",")",":","return","text","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L78-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"printable_text","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_summary":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_tokens":["Returns","text","encoded","in","a","way","suitable","for","print","or","tf",".","logging","."],"function":"def printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","printable_text","(","text",")",":","# These functions want `str` for both Python2 and Python3, but in one case","# it's a Unicode string and in the other it's a byte string.","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","unicode",")",":","return","text",".","encode","(","\"utf-8\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L98-L118"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"load_vocab","parameters":"(vocab_file)","argument_list":"","return_statement":"return vocab","docstring":"Loads a vocabulary file into a dictionary.","docstring_summary":"Loads a vocabulary file into a dictionary.","docstring_tokens":["Loads","a","vocabulary","file","into","a","dictionary","."],"function":"def load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab","function_tokens":["def","load_vocab","(","vocab_file",")",":","vocab","=","collections",".","OrderedDict","(",")","index","=","0","with","tf",".","gfile",".","GFile","(","vocab_file",",","\"r\"",")","as","reader",":","while","True",":","token","=","convert_to_unicode","(","reader",".","readline","(",")",")","if","not","token",":","break","token","=","token",".","strip","(",")","vocab","[","token","]","=","index","index","+=","1","return","vocab"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L121-L133"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"convert_by_vocab","parameters":"(vocab, items)","argument_list":"","return_statement":"return output","docstring":"Converts a sequence of [tokens|ids] using the vocab.","docstring_summary":"Converts a sequence of [tokens|ids] using the vocab.","docstring_tokens":["Converts","a","sequence","of","[","tokens|ids","]","using","the","vocab","."],"function":"def convert_by_vocab(vocab, items):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n output = []\n #print(\"items:\",items) #['[CLS]', '\u65e5', '##\u671f', '\uff0c', '\u4f46', '\u88ab', '##\u544a', '\u91d1', '##\u4e1c', '##\u798f', '\u8f7d', '##\u660e', '[MASK]', 'U', '##N', '##K', ']', '\u4fdd', '##\u8bc1', '\u672c', '##\u6708', '1', '##4', '[MASK]', '\u5230', '##\u4f4d', '\uff0c', '2', '##0', '##1', '##5', '\u5e74', '6', '[MASK]', '1', '##1', '\u65e5', '[', 'U', '##N', '##K', ']', '\uff0c', '\u539f', '##\u544a', '[MASK]', '\u8ba4', '##\u53ef', '\u4e8e', '2', '##0', '##1', '##5', '[MASK]', '6', '\u6708', '[MASK]', '[MASK]', '\u65e5', '##\u5411', '\u88ab', '##\u544a', '\u4e3b', '##\u5f20', '\u6743', '##\u5229', '\u3002', '\u800c', '[MASK]', '[MASK]', '\u81ea', '[MASK]', '[MASK]', '[MASK]', '[MASK]', '\u5e74', '6', '\u6708', '1', '##1', '\u65e5', '[SEP]', '\u539f', '##\u544a', '\u4e8e', '2', '##0', '##1', '##6', '[MASK]', '6', '[MASK]', '2', '##4', '\u65e5', '\u8d77', '##\u8bc9', '\uff0c', '\u4e3b', '##\u5f20', '\u4fdd', '##\u8bc1', '\u8d23', '##\u4efb', '\uff0c', '\u5df2', '\u8d85', '##\u8fc7', '\u4fdd', '##\u8bc1', '\u671f', '##\u9650', '[MASK]', '\u4fdd', '##\u8bc1', '\u4eba', '\u4f9d', '##\u6cd5', '\u4e0d', '##\u518d', '\u627f', '##\u62c5', '\u4fdd', '##\u8bc1', '[MASK]', '[MASK]', '[MASK]', '[SEP]']\n for i,item in enumerate(items):\n #print(i,\"item:\",item) # ##\u671f\n output.append(vocab[item])\n return output","function_tokens":["def","convert_by_vocab","(","vocab",",","items",")",":","output","=","[","]","#print(\"items:\",items) #['[CLS]', '\u65e5', '##\u671f', '\uff0c', '\u4f46', '\u88ab', '##\u544a', '\u91d1', '##\u4e1c', '##\u798f', '\u8f7d', '##\u660e', '[MASK]', 'U', '##N', '##K', ']', '\u4fdd', '##\u8bc1', '\u672c', '##\u6708', '1', '##4', '[MASK]', '\u5230', '##\u4f4d', '\uff0c', '2', '##0', '##1', '##5', '\u5e74', '6', '[MASK]', '1', '##1', '\u65e5', '[', 'U', '##N', '##K', ']', '\uff0c', '\u539f', '##\u544a', '[MASK]', '\u8ba4', '##\u53ef', '\u4e8e', '2', '##0', '##1', '##5', '[MASK]', '6', '\u6708', '[MASK]', '[MASK]', '\u65e5', '##\u5411', '\u88ab', '##\u544a', '\u4e3b', '##\u5f20', '\u6743', '##\u5229', '\u3002', '\u800c', '[MASK]', '[MASK]', '\u81ea', '[MASK]', '[MASK]', '[MASK]', '[MASK]', '\u5e74', '6', '\u6708', '1', '##1', '\u65e5', '[SEP]', '\u539f', '##\u544a', '\u4e8e', '2', '##0', '##1', '##6', '[MASK]', '6', '[MASK]', '2', '##4', '\u65e5', '\u8d77', '##\u8bc9', '\uff0c', '\u4e3b', '##\u5f20', '\u4fdd', '##\u8bc1', '\u8d23', '##\u4efb', '\uff0c', '\u5df2', '\u8d85', '##\u8fc7', '\u4fdd', '##\u8bc1', '\u671f', '##\u9650', '[MASK]', '\u4fdd', '##\u8bc1', '\u4eba', '\u4f9d', '##\u6cd5', '\u4e0d', '##\u518d', '\u627f', '##\u62c5', '\u4fdd', '##\u8bc1', '[MASK]', '[MASK]', '[MASK]', '[SEP]']","for","i",",","item","in","enumerate","(","items",")",":","#print(i,\"item:\",item) # ##\u671f","output",".","append","(","vocab","[","item","]",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L136-L143"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"whitespace_tokenize","parameters":"(text)","argument_list":"","return_statement":"return tokens","docstring":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_summary":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_tokens":["Runs","basic","whitespace","cleaning","and","splitting","on","a","piece","of","text","."],"function":"def whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens","function_tokens":["def","whitespace_tokenize","(","text",")",":","text","=","text",".","strip","(",")","if","not","text",":","return","[","]","tokens","=","text",".","split","(",")","return","tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L154-L160"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"_is_whitespace","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a whitespace character.","docstring_summary":"Checks whether `chars` is a whitespace character.","docstring_tokens":["Checks","whether","chars","is","a","whitespace","character","."],"function":"def _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False","function_tokens":["def","_is_whitespace","(","char",")",":","# \\t, \\n, and \\r are technically contorl characters but we treat them","# as whitespace since they are generally considered as such.","if","char","==","\" \"","or","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Zs\"",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L364-L373"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"_is_control","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a control character.","docstring_summary":"Checks whether `chars` is a control character.","docstring_tokens":["Checks","whether","chars","is","a","control","character","."],"function":"def _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False","function_tokens":["def","_is_control","(","char",")",":","# These are technically control characters but we count them as whitespace","# characters.","if","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","False","cat","=","unicodedata",".","category","(","char",")","if","cat","in","(","\"Cc\"",",","\"Cf\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L376-L385"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"_is_punctuation","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a punctuation character.","docstring_summary":"Checks whether `chars` is a punctuation character.","docstring_tokens":["Checks","whether","chars","is","a","punctuation","character","."],"function":"def _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter\/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False","function_tokens":["def","_is_punctuation","(","char",")",":","cp","=","ord","(","char",")","# We treat all non-letter\/number ASCII as punctuation.","# Characters such as \"^\", \"$\", and \"`\" are not in the Unicode","# Punctuation class but we treat them as punctuation anyways, for","# consistency.","if","(","(","cp",">=","33","and","cp","<=","47",")","or","(","cp",">=","58","and","cp","<=","64",")","or","(","cp",">=","91","and","cp","<=","96",")","or","(","cp",">=","123","and","cp","<=","126",")",")",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat",".","startswith","(","\"P\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L388-L401"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"BasicTokenizer.__init__","parameters":"(self, do_lower_case=True)","argument_list":"","return_statement":"","docstring":"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.","docstring_summary":"Constructs a BasicTokenizer.","docstring_tokens":["Constructs","a","BasicTokenizer","."],"function":"def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case","function_tokens":["def","__init__","(","self",",","do_lower_case","=","True",")",":","self",".","do_lower_case","=","do_lower_case"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L190-L196"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"BasicTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text.","docstring_summary":"Tokenizes a piece of text.","docstring_tokens":["Tokenizes","a","piece","of","text","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","text","=","self",".","_clean_text","(","text",")","# This was added on November 1st, 2018 for the multilingual and Chinese","# models. This is also applied to the English models now, but it doesn't","# matter since the English models were not trained on any Chinese data","# and generally don't have any Chinese data in them (there are Chinese","# characters in the vocabulary because Wikipedia does have some Chinese","# words in the English Wikipedia.).","text","=","self",".","_tokenize_chinese_chars","(","text",")","orig_tokens","=","whitespace_tokenize","(","text",")","split_tokens","=","[","]","for","token","in","orig_tokens",":","if","self",".","do_lower_case",":","token","=","token",".","lower","(",")","token","=","self",".","_run_strip_accents","(","token",")","split_tokens",".","extend","(","self",".","_run_split_on_punc","(","token",")",")","output_tokens","=","whitespace_tokenize","(","\" \"",".","join","(","split_tokens",")",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L198-L220"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_strip_accents","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Strips accents from a piece of text.","docstring_summary":"Strips accents from a piece of text.","docstring_tokens":["Strips","accents","from","a","piece","of","text","."],"function":"def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_run_strip_accents","(","self",",","text",")",":","text","=","unicodedata",".","normalize","(","\"NFD\"",",","text",")","output","=","[","]","for","char","in","text",":","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Mn\"",":","continue","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L222-L231"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"BasicTokenizer._run_split_on_punc","parameters":"(self, text)","argument_list":"","return_statement":"return [\"\".join(x) for x in output]","docstring":"Splits punctuation on a piece of text.","docstring_summary":"Splits punctuation on a piece of text.","docstring_tokens":["Splits","punctuation","on","a","piece","of","text","."],"function":"def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]","function_tokens":["def","_run_split_on_punc","(","self",",","text",")",":","chars","=","list","(","text",")","i","=","0","start_new_word","=","True","output","=","[","]","while","i","<","len","(","chars",")",":","char","=","chars","[","i","]","if","_is_punctuation","(","char",")",":","output",".","append","(","[","char","]",")","start_new_word","=","True","else",":","if","start_new_word",":","output",".","append","(","[","]",")","start_new_word","=","False","output","[","-","1","]",".","append","(","char",")","i","+=","1","return","[","\"\"",".","join","(","x",")","for","x","in","output","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L233-L251"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"BasicTokenizer._tokenize_chinese_chars","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Adds whitespace around any CJK character.","docstring_summary":"Adds whitespace around any CJK character.","docstring_tokens":["Adds","whitespace","around","any","CJK","character","."],"function":"def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_tokenize_chinese_chars","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","self",".","_is_chinese_char","(","cp",")",":","output",".","append","(","\" \"",")","output",".","append","(","char",")","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L253-L264"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"BasicTokenizer._is_chinese_char","parameters":"(self, cp)","argument_list":"","return_statement":"return False","docstring":"Checks whether CP is the codepoint of a CJK character.","docstring_summary":"Checks whether CP is the codepoint of a CJK character.","docstring_tokens":["Checks","whether","CP","is","the","codepoint","of","a","CJK","character","."],"function":"def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False","function_tokens":["def","_is_chinese_char","(","self",",","cp",")",":","# This defines a \"chinese character\" as anything in the CJK Unicode block:","# https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)","#","# Note that the CJK Unicode block is NOT all Japanese and Korean characters,","# despite its name. The modern Korean Hangul alphabet is a different block,","# as is Japanese Hiragana and Katakana. Those alphabets are used to write","# space-separated words, so they are not treated specially and handled","# like the all of the other languages.","if","(","(","cp",">=","0x4E00","and","cp","<=","0x9FFF",")","or","#","(","cp",">=","0x3400","and","cp","<=","0x4DBF",")","or","#","(","cp",">=","0x20000","and","cp","<=","0x2A6DF",")","or","#","(","cp",">=","0x2A700","and","cp","<=","0x2B73F",")","or","#","(","cp",">=","0x2B740","and","cp","<=","0x2B81F",")","or","#","(","cp",">=","0x2B820","and","cp","<=","0x2CEAF",")","or","(","cp",">=","0xF900","and","cp","<=","0xFAFF",")","or","#","(","cp",">=","0x2F800","and","cp","<=","0x2FA1F",")",")",":","#","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L266-L286"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"BasicTokenizer._clean_text","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Performs invalid character removal and whitespace cleanup on text.","docstring_summary":"Performs invalid character removal and whitespace cleanup on text.","docstring_tokens":["Performs","invalid","character","removal","and","whitespace","cleanup","on","text","."],"function":"def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_clean_text","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","cp","==","0","or","cp","==","0xfffd","or","_is_control","(","char",")",":","continue","if","_is_whitespace","(","char",")",":","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L288-L299"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/tokenization.py","language":"python","identifier":"WordpieceTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.","docstring_summary":"Tokenizes a piece of text into its word pieces.","docstring_tokens":["Tokenizes","a","piece","of","text","into","its","word","pieces","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","convert_to_unicode","(","text",")","output_tokens","=","[","]","for","token","in","whitespace_tokenize","(","text",")",":","chars","=","list","(","token",")","if","len","(","chars",")",">","self",".","max_input_chars_per_word",":","output_tokens",".","append","(","self",".","unk_token",")","continue","is_bad","=","False","start","=","0","sub_tokens","=","[","]","while","start","<","len","(","chars",")",":","end","=","len","(","chars",")","cur_substr","=","None","while","start","<","end",":","substr","=","\"\"",".","join","(","chars","[","start",":","end","]",")","if","start",">","0",":","substr","=","\"##\"","+","substr","if","substr","in","self",".","vocab",":","cur_substr","=","substr","break","end","-=","1","if","cur_substr","is","None",":","is_bad","=","True","break","sub_tokens",".","append","(","cur_substr",")","start","=","end","if","is_bad",":","output_tokens",".","append","(","self",".","unk_token",")","else",":","output_tokens",".","extend","(","sub_tokens",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/tokenization.py#L310-L361"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"gelu","parameters":"(x)","argument_list":"","return_statement":"return x * cdf","docstring":"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.","docstring_summary":"Gaussian Error Linear Unit.","docstring_tokens":["Gaussian","Error","Linear","Unit","."],"function":"def gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https:\/\/arxiv.org\/abs\/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.tanh(\n (np.sqrt(2 \/ np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf","function_tokens":["def","gelu","(","x",")",":","cdf","=","0.5","*","(","1.0","+","tf",".","tanh","(","(","np",".","sqrt","(","2","\/","np",".","pi",")","*","(","x","+","0.044715","*","tf",".","pow","(","x",",","3",")",")",")",")",")","return","x","*","cdf"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L286-L299"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"get_activation","parameters":"(activation_string)","argument_list":"","return_statement":"","docstring":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.","docstring_summary":"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.","docstring_tokens":["Maps","a","string","to","a","Python","function","e",".","g",".","relu","=",">","tf",".","nn",".","relu","."],"function":"def get_activation(activation_string):\n \"\"\"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.\n \"\"\"\n\n # We assume that anything that\"s not a string is already an activation\n # function, so we just return it.\n if not isinstance(activation_string, six.string_types):\n return activation_string\n\n if not activation_string:\n return None\n\n act = activation_string.lower()\n if act == \"linear\":\n return None\n elif act == \"relu\":\n return tf.nn.relu\n elif act == \"gelu\":\n return gelu\n elif act == \"tanh\":\n return tf.tanh\n else:\n raise ValueError(\"Unsupported activation: %s\" % act)","function_tokens":["def","get_activation","(","activation_string",")",":","# We assume that anything that\"s not a string is already an activation","# function, so we just return it.","if","not","isinstance","(","activation_string",",","six",".","string_types",")",":","return","activation_string","if","not","activation_string",":","return","None","act","=","activation_string",".","lower","(",")","if","act","==","\"linear\"",":","return","None","elif","act","==","\"relu\"",":","return","tf",".","nn",".","relu","elif","act","==","\"gelu\"",":","return","gelu","elif","act","==","\"tanh\"",":","return","tf",".","tanh","else",":","raise","ValueError","(","\"Unsupported activation: %s\"","%","act",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L302-L336"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"get_assignment_map_from_checkpoint","parameters":"(tvars, init_checkpoint)","argument_list":"","return_statement":"return (assignment_map, initialized_variable_names)","docstring":"Compute the union of the current variables and checkpoint variables.","docstring_summary":"Compute the union of the current variables and checkpoint variables.","docstring_tokens":["Compute","the","union","of","the","current","variables","and","checkpoint","variables","."],"function":"def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)","function_tokens":["def","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")",":","assignment_map","=","{","}","initialized_variable_names","=","{","}","name_to_variable","=","collections",".","OrderedDict","(",")","for","var","in","tvars",":","name","=","var",".","name","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","name",")","if","m","is","not","None",":","name","=","m",".","group","(","1",")","name_to_variable","[","name","]","=","var","init_vars","=","tf",".","train",".","list_variables","(","init_checkpoint",")","assignment_map","=","collections",".","OrderedDict","(",")","for","x","in","init_vars",":","(","name",",","var",")","=","(","x","[","0","]",",","x","[","1","]",")","if","name","not","in","name_to_variable",":","continue","assignment_map","[","name","]","=","name","initialized_variable_names","[","name","]","=","1","initialized_variable_names","[","name","+","\":0\"","]","=","1","return","(","assignment_map",",","initialized_variable_names",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L339-L363"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"dropout","parameters":"(input_tensor, dropout_prob)","argument_list":"","return_statement":"return output","docstring":"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.","docstring_summary":"Perform dropout.","docstring_tokens":["Perform","dropout","."],"function":"def dropout(input_tensor, dropout_prob):\n \"\"\"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.\n \"\"\"\n if dropout_prob is None or dropout_prob == 0.0:\n return input_tensor\n\n output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)\n return output","function_tokens":["def","dropout","(","input_tensor",",","dropout_prob",")",":","if","dropout_prob","is","None","or","dropout_prob","==","0.0",":","return","input_tensor","output","=","tf",".","nn",".","dropout","(","input_tensor",",","1.0","-","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L366-L381"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"layer_norm","parameters":"(input_tensor, name=None)","argument_list":"","return_statement":"return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","docstring":"Run layer normalization on the last dimension of the tensor.","docstring_summary":"Run layer normalization on the last dimension of the tensor.","docstring_tokens":["Run","layer","normalization","on","the","last","dimension","of","the","tensor","."],"function":"def layer_norm(input_tensor, name=None):\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\n return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","function_tokens":["def","layer_norm","(","input_tensor",",","name","=","None",")",":","return","tf",".","contrib",".","layers",".","layer_norm","(","inputs","=","input_tensor",",","begin_norm_axis","=","-","1",",","begin_params_axis","=","-","1",",","scope","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L384-L387"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"layer_norm_and_dropout","parameters":"(input_tensor, dropout_prob, name=None)","argument_list":"","return_statement":"return output_tensor","docstring":"Runs layer normalization followed by dropout.","docstring_summary":"Runs layer normalization followed by dropout.","docstring_tokens":["Runs","layer","normalization","followed","by","dropout","."],"function":"def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):\n \"\"\"Runs layer normalization followed by dropout.\"\"\"\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob)\n return output_tensor","function_tokens":["def","layer_norm_and_dropout","(","input_tensor",",","dropout_prob",",","name","=","None",")",":","output_tensor","=","layer_norm","(","input_tensor",",","name",")","output_tensor","=","dropout","(","output_tensor",",","dropout_prob",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L390-L394"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"create_initializer","parameters":"(initializer_range=0.02)","argument_list":"","return_statement":"return tf.truncated_normal_initializer(stddev=initializer_range)","docstring":"Creates a `truncated_normal_initializer` with the given range.","docstring_summary":"Creates a `truncated_normal_initializer` with the given range.","docstring_tokens":["Creates","a","truncated_normal_initializer","with","the","given","range","."],"function":"def create_initializer(initializer_range=0.02):\n \"\"\"Creates a `truncated_normal_initializer` with the given range.\"\"\"\n return tf.truncated_normal_initializer(stddev=initializer_range)","function_tokens":["def","create_initializer","(","initializer_range","=","0.02",")",":","return","tf",".","truncated_normal_initializer","(","stddev","=","initializer_range",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L397-L399"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"embedding_lookup","parameters":"(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False)","argument_list":"","return_statement":"return (output, embedding_table)","docstring":"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].","docstring_summary":"Looks up words embeddings for id tensor.","docstring_tokens":["Looks","up","words","embeddings","for","id","tensor","."],"function":"def embedding_lookup(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False):\n \"\"\"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].\n \"\"\"\n # This function assumes that the input is of shape [batch_size, seq_length,\n # num_inputs].\n #\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\n # reshape to [batch_size, seq_length, 1].\n if input_ids.shape.ndims == 2:\n input_ids = tf.expand_dims(input_ids, axis=[-1]) # shape of input_ids is:[ batch_size, seq_length, 1]\n\n embedding_table = tf.get_variable( # [vocab_size, embedding_size]\n name=word_embedding_name,\n shape=[vocab_size, embedding_size],\n initializer=create_initializer(initializer_range))\n\n flat_input_ids = tf.reshape(input_ids, [-1]) # one rank. shape as (batch_size * sequence_length,)\n if use_one_hot_embeddings:\n one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size) # one_hot_input_ids=[batch_size * sequence_length,vocab_size]\n output = tf.matmul(one_hot_input_ids, embedding_table) # output=[batch_size * sequence_length,embedding_size]\n else:\n output = tf.gather(embedding_table, flat_input_ids) # [vocab_size, embedding_size]*[batch_size * sequence_length,]--->[batch_size * sequence_length,embedding_size]\n\n input_shape = get_shape_list(input_ids) # input_shape=[ batch_size, seq_length, 1]\n\n output = tf.reshape(output,input_shape[0:-1] + [input_shape[-1] * embedding_size]) # output=[batch_size,sequence_length,embedding_size]\n return (output, embedding_table)","function_tokens":["def","embedding_lookup","(","input_ids",",","vocab_size",",","embedding_size","=","128",",","initializer_range","=","0.02",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","False",")",":","# This function assumes that the input is of shape [batch_size, seq_length,","# num_inputs].","#","# If the input is a 2D tensor of shape [batch_size, seq_length], we","# reshape to [batch_size, seq_length, 1].","if","input_ids",".","shape",".","ndims","==","2",":","input_ids","=","tf",".","expand_dims","(","input_ids",",","axis","=","[","-","1","]",")","# shape of input_ids is:[ batch_size, seq_length, 1]","embedding_table","=","tf",".","get_variable","(","# [vocab_size, embedding_size]","name","=","word_embedding_name",",","shape","=","[","vocab_size",",","embedding_size","]",",","initializer","=","create_initializer","(","initializer_range",")",")","flat_input_ids","=","tf",".","reshape","(","input_ids",",","[","-","1","]",")","# one rank. shape as (batch_size * sequence_length,)","if","use_one_hot_embeddings",":","one_hot_input_ids","=","tf",".","one_hot","(","flat_input_ids",",","depth","=","vocab_size",")","# one_hot_input_ids=[batch_size * sequence_length,vocab_size]","output","=","tf",".","matmul","(","one_hot_input_ids",",","embedding_table",")","# output=[batch_size * sequence_length,embedding_size]","else",":","output","=","tf",".","gather","(","embedding_table",",","flat_input_ids",")","# [vocab_size, embedding_size]*[batch_size * sequence_length,]--->[batch_size * sequence_length,embedding_size]","input_shape","=","get_shape_list","(","input_ids",")","# input_shape=[ batch_size, seq_length, 1]","output","=","tf",".","reshape","(","output",",","input_shape","[","0",":","-","1","]","+","[","input_shape","[","-","1","]","*","embedding_size","]",")","# output=[batch_size,sequence_length,embedding_size]","return","(","output",",","embedding_table",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L402-L446"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"embedding_lookup_factorized","parameters":"(input_ids, # Factorized embedding parameterization provide by albert\n vocab_size,\n hidden_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False)","argument_list":"","return_statement":"return (output, embedding_table, project_variable)","docstring":"Looks up words embeddings for id tensor, but in a factorized style followed by albert. it is used to reduce much percentage of parameters previous exists.\n Check \"Factorized embedding parameterization\" session in the paper.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].","docstring_summary":"Looks up words embeddings for id tensor, but in a factorized style followed by albert. it is used to reduce much percentage of parameters previous exists.\n Check \"Factorized embedding parameterization\" session in the paper.","docstring_tokens":["Looks","up","words","embeddings","for","id","tensor","but","in","a","factorized","style","followed","by","albert",".","it","is","used","to","reduce","much","percentage","of","parameters","previous","exists",".","Check","Factorized","embedding","parameterization","session","in","the","paper","."],"function":"def embedding_lookup_factorized(input_ids, # Factorized embedding parameterization provide by albert\n vocab_size,\n hidden_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False):\n \"\"\"Looks up words embeddings for id tensor, but in a factorized style followed by albert. it is used to reduce much percentage of parameters previous exists.\n Check \"Factorized embedding parameterization\" session in the paper.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].\n \"\"\"\n # This function assumes that the input is of shape [batch_size, seq_length,\n # num_inputs].\n #\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\n # reshape to [batch_size, seq_length, 1].\n\n # 1.first project one-hot vectors into a lower dimensional embedding space of size E\n print(\"embedding_lookup_factorized. factorized embedding parameterization is used.\")\n if input_ids.shape.ndims == 2:\n input_ids = tf.expand_dims(input_ids, axis=[-1]) # shape of input_ids is:[ batch_size, seq_length, 1]\n\n embedding_table = tf.get_variable( # [vocab_size, embedding_size]\n name=word_embedding_name,\n shape=[vocab_size, embedding_size],\n initializer=create_initializer(initializer_range))\n\n flat_input_ids = tf.reshape(input_ids, [-1]) # one rank. shape as (batch_size * sequence_length,)\n if use_one_hot_embeddings:\n one_hot_input_ids = tf.one_hot(flat_input_ids,depth=vocab_size) # one_hot_input_ids=[batch_size * sequence_length,vocab_size]\n output_middle = tf.matmul(one_hot_input_ids, embedding_table) # output=[batch_size * sequence_length,embedding_size]\n else:\n output_middle = tf.gather(embedding_table,flat_input_ids) # [vocab_size, embedding_size]*[batch_size * sequence_length,]--->[batch_size * sequence_length,embedding_size]\n\n # 2. project vector(output_middle) to the hidden space\n project_variable = tf.get_variable( # [embedding_size, hidden_size]\n name=word_embedding_name+\"_2\",\n shape=[embedding_size, hidden_size],\n initializer=create_initializer(initializer_range))\n output = tf.matmul(output_middle, project_variable) # ([batch_size * sequence_length, embedding_size] * [embedding_size, hidden_size])--->[batch_size * sequence_length, hidden_size]\n # reshape back to 3 rank\n input_shape = get_shape_list(input_ids) # input_shape=[ batch_size, seq_length, 1]\n batch_size, sequene_length, _=input_shape\n output = tf.reshape(output, (batch_size,sequene_length,hidden_size)) # output=[batch_size, sequence_length, hidden_size]\n return (output, embedding_table, project_variable)","function_tokens":["def","embedding_lookup_factorized","(","input_ids",",","# Factorized embedding parameterization provide by albert","vocab_size",",","hidden_size",",","embedding_size","=","128",",","initializer_range","=","0.02",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","False",")",":","# This function assumes that the input is of shape [batch_size, seq_length,","# num_inputs].","#","# If the input is a 2D tensor of shape [batch_size, seq_length], we","# reshape to [batch_size, seq_length, 1].","# 1.first project one-hot vectors into a lower dimensional embedding space of size E","print","(","\"embedding_lookup_factorized. factorized embedding parameterization is used.\"",")","if","input_ids",".","shape",".","ndims","==","2",":","input_ids","=","tf",".","expand_dims","(","input_ids",",","axis","=","[","-","1","]",")","# shape of input_ids is:[ batch_size, seq_length, 1]","embedding_table","=","tf",".","get_variable","(","# [vocab_size, embedding_size]","name","=","word_embedding_name",",","shape","=","[","vocab_size",",","embedding_size","]",",","initializer","=","create_initializer","(","initializer_range",")",")","flat_input_ids","=","tf",".","reshape","(","input_ids",",","[","-","1","]",")","# one rank. shape as (batch_size * sequence_length,)","if","use_one_hot_embeddings",":","one_hot_input_ids","=","tf",".","one_hot","(","flat_input_ids",",","depth","=","vocab_size",")","# one_hot_input_ids=[batch_size * sequence_length,vocab_size]","output_middle","=","tf",".","matmul","(","one_hot_input_ids",",","embedding_table",")","# output=[batch_size * sequence_length,embedding_size]","else",":","output_middle","=","tf",".","gather","(","embedding_table",",","flat_input_ids",")","# [vocab_size, embedding_size]*[batch_size * sequence_length,]--->[batch_size * sequence_length,embedding_size]","# 2. project vector(output_middle) to the hidden space","project_variable","=","tf",".","get_variable","(","# [embedding_size, hidden_size]","name","=","word_embedding_name","+","\"_2\"",",","shape","=","[","embedding_size",",","hidden_size","]",",","initializer","=","create_initializer","(","initializer_range",")",")","output","=","tf",".","matmul","(","output_middle",",","project_variable",")","# ([batch_size * sequence_length, embedding_size] * [embedding_size, hidden_size])--->[batch_size * sequence_length, hidden_size]","# reshape back to 3 rank","input_shape","=","get_shape_list","(","input_ids",")","# input_shape=[ batch_size, seq_length, 1]","batch_size",",","sequene_length",",","_","=","input_shape","output","=","tf",".","reshape","(","output",",","(","batch_size",",","sequene_length",",","hidden_size",")",")","# output=[batch_size, sequence_length, hidden_size]","return","(","output",",","embedding_table",",","project_variable",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L448-L504"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"embedding_postprocessor","parameters":"(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1)","argument_list":"","return_statement":"return output","docstring":"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.","docstring_summary":"Performs various post-processing on a word embedding tensor.","docstring_tokens":["Performs","various","post","-","processing","on","a","word","embedding","tensor","."],"function":"def embedding_postprocessor(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1):\n \"\"\"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.\n \"\"\"\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = input_tensor\n\n if use_token_type:\n if token_type_ids is None:\n raise ValueError(\"`token_type_ids` must be specified if\"\n \"`use_token_type` is True.\")\n token_type_table = tf.get_variable(\n name=token_type_embedding_name,\n shape=[token_type_vocab_size, width],\n initializer=create_initializer(initializer_range))\n # This vocab will be small so we always do one-hot here, since it is always\n # faster for a small vocabulary.\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)\n token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if use_position_embeddings:\n assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)\n with tf.control_dependencies([assert_op]):\n full_position_embeddings = tf.get_variable(\n name=position_embedding_name,\n shape=[max_position_embeddings, width],\n initializer=create_initializer(initializer_range))\n # Since the position embedding table is a learned variable, we create it\n # using a (long) sequence length `max_position_embeddings`. The actual\n # sequence length might be shorter than this, for faster training of\n # tasks that do not have long sequences.\n #\n # So `full_position_embeddings` is effectively an embedding table\n # for position [0, 1, 2, ..., max_position_embeddings-1], and the current\n # sequence has positions [0, 1, 2, ... seq_length-1], so we can just\n # perform a slice.\n position_embeddings = tf.slice(full_position_embeddings, [0, 0],\n [seq_length, -1])\n num_dims = len(output.shape.as_list())\n\n # Only the last two dimensions are relevant (`seq_length` and `width`), so\n # we broadcast among the first dimensions, which is typically just\n # the batch size.\n position_broadcast_shape = []\n for _ in range(num_dims - 2):\n position_broadcast_shape.append(1)\n position_broadcast_shape.extend([seq_length, width])\n position_embeddings = tf.reshape(position_embeddings,\n position_broadcast_shape)\n output += position_embeddings\n\n output = layer_norm_and_dropout(output, dropout_prob)\n return output","function_tokens":["def","embedding_postprocessor","(","input_tensor",",","use_token_type","=","False",",","token_type_ids","=","None",",","token_type_vocab_size","=","16",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","0.02",",","max_position_embeddings","=","512",",","dropout_prob","=","0.1",")",":","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","width","=","input_shape","[","2","]","output","=","input_tensor","if","use_token_type",":","if","token_type_ids","is","None",":","raise","ValueError","(","\"`token_type_ids` must be specified if\"","\"`use_token_type` is True.\"",")","token_type_table","=","tf",".","get_variable","(","name","=","token_type_embedding_name",",","shape","=","[","token_type_vocab_size",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# This vocab will be small so we always do one-hot here, since it is always","# faster for a small vocabulary.","flat_token_type_ids","=","tf",".","reshape","(","token_type_ids",",","[","-","1","]",")","one_hot_ids","=","tf",".","one_hot","(","flat_token_type_ids",",","depth","=","token_type_vocab_size",")","token_type_embeddings","=","tf",".","matmul","(","one_hot_ids",",","token_type_table",")","token_type_embeddings","=","tf",".","reshape","(","token_type_embeddings",",","[","batch_size",",","seq_length",",","width","]",")","output","+=","token_type_embeddings","if","use_position_embeddings",":","assert_op","=","tf",".","assert_less_equal","(","seq_length",",","max_position_embeddings",")","with","tf",".","control_dependencies","(","[","assert_op","]",")",":","full_position_embeddings","=","tf",".","get_variable","(","name","=","position_embedding_name",",","shape","=","[","max_position_embeddings",",","width","]",",","initializer","=","create_initializer","(","initializer_range",")",")","# Since the position embedding table is a learned variable, we create it","# using a (long) sequence length `max_position_embeddings`. The actual","# sequence length might be shorter than this, for faster training of","# tasks that do not have long sequences.","#","# So `full_position_embeddings` is effectively an embedding table","# for position [0, 1, 2, ..., max_position_embeddings-1], and the current","# sequence has positions [0, 1, 2, ... seq_length-1], so we can just","# perform a slice.","position_embeddings","=","tf",".","slice","(","full_position_embeddings",",","[","0",",","0","]",",","[","seq_length",",","-","1","]",")","num_dims","=","len","(","output",".","shape",".","as_list","(",")",")","# Only the last two dimensions are relevant (`seq_length` and `width`), so","# we broadcast among the first dimensions, which is typically just","# the batch size.","position_broadcast_shape","=","[","]","for","_","in","range","(","num_dims","-","2",")",":","position_broadcast_shape",".","append","(","1",")","position_broadcast_shape",".","extend","(","[","seq_length",",","width","]",")","position_embeddings","=","tf",".","reshape","(","position_embeddings",",","position_broadcast_shape",")","output","+=","position_embeddings","output","=","layer_norm_and_dropout","(","output",",","dropout_prob",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L507-L600"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"create_attention_mask_from_input_mask","parameters":"(from_tensor, to_mask)","argument_list":"","return_statement":"return mask","docstring":"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].","docstring_summary":"Create 3D attention mask from a 2D tensor mask.","docstring_tokens":["Create","3D","attention","mask","from","a","2D","tensor","mask","."],"function":"def create_attention_mask_from_input_mask(from_tensor, to_mask):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n\n return mask","function_tokens":["def","create_attention_mask_from_input_mask","(","from_tensor",",","to_mask",")",":","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_shape","=","get_shape_list","(","to_mask",",","expected_rank","=","2",")","to_seq_length","=","to_shape","[","1","]","to_mask","=","tf",".","cast","(","tf",".","reshape","(","to_mask",",","[","batch_size",",","1",",","to_seq_length","]",")",",","tf",".","float32",")","# We don't assume that `from_tensor` is a mask (although it could be). We","# don't actually care if we attend *from* padding tokens (only *to* padding)","# tokens so we create a tensor of all ones.","#","# `broadcast_ones` = [batch_size, from_seq_length, 1]","broadcast_ones","=","tf",".","ones","(","shape","=","[","batch_size",",","from_seq_length",",","1","]",",","dtype","=","tf",".","float32",")","# Here we broadcast along two dimensions to create the mask.","mask","=","broadcast_ones","*","to_mask","return","mask"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L603-L634"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"attention_layer","parameters":"(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None)","argument_list":"","return_statement":"return context_layer","docstring":"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.","docstring_summary":"Performs multi-headed attention from `from_tensor` to `to_tensor`.","docstring_tokens":["Performs","multi","-","headed","attention","from","from_tensor","to","to_tensor","."],"function":"def attention_layer(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n \"\"\"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])\n return output_tensor\n\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n from_tensor_2d = reshape_to_matrix(from_tensor)\n to_tensor_2d = reshape_to_matrix(to_tensor)\n\n # `query_layer` = [B*F, N*H]\n query_layer = tf.layers.dense(\n from_tensor_2d,\n num_attention_heads * size_per_head,\n activation=query_act,\n name=\"query\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `key_layer` = [B*T, N*H]\n key_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=key_act,\n name=\"key\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `value_layer` = [B*T, N*H]\n value_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=value_act,\n name=\"value\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `query_layer` = [B, N, F, H]\n query_layer = transpose_for_scores(query_layer, batch_size,\n num_attention_heads, from_seq_length,\n size_per_head)\n\n # `key_layer` = [B, N, T, H]\n key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,\n to_seq_length, size_per_head)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n # `attention_scores` = [B, N, F, T]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(attention_scores,\n 1.0 \/ math.sqrt(float(size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\n\n # `value_layer` = [B, T, N, H]\n value_layer = tf.reshape(\n value_layer,\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\n\n # `value_layer` = [B, N, T, H]\n value_layer = tf.transpose(value_layer, [0, 2, 1, 3])\n\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\n\n if do_return_2d_tensor:\n # `context_layer` = [B*F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\n else:\n # `context_layer` = [B, F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\n\n return context_layer","function_tokens":["def","attention_layer","(","from_tensor",",","to_tensor",",","attention_mask","=","None",",","num_attention_heads","=","1",",","size_per_head","=","512",",","query_act","=","None",",","key_act","=","None",",","value_act","=","None",",","attention_probs_dropout_prob","=","0.0",",","initializer_range","=","0.02",",","do_return_2d_tensor","=","False",",","batch_size","=","None",",","from_seq_length","=","None",",","to_seq_length","=","None",")",":","def","transpose_for_scores","(","input_tensor",",","batch_size",",","num_attention_heads",",","seq_length",",","width",")",":","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","batch_size",",","seq_length",",","num_attention_heads",",","width","]",")","output_tensor","=","tf",".","transpose","(","output_tensor",",","[","0",",","2",",","1",",","3","]",")","return","output_tensor","from_shape","=","get_shape_list","(","from_tensor",",","expected_rank","=","[","2",",","3","]",")","to_shape","=","get_shape_list","(","to_tensor",",","expected_rank","=","[","2",",","3","]",")","if","len","(","from_shape",")","!=","len","(","to_shape",")",":","raise","ValueError","(","\"The rank of `from_tensor` must match the rank of `to_tensor`.\"",")","if","len","(","from_shape",")","==","3",":","batch_size","=","from_shape","[","0","]","from_seq_length","=","from_shape","[","1","]","to_seq_length","=","to_shape","[","1","]","elif","len","(","from_shape",")","==","2",":","if","(","batch_size","is","None","or","from_seq_length","is","None","or","to_seq_length","is","None",")",":","raise","ValueError","(","\"When passing in rank 2 tensors to attention_layer, the values \"","\"for `batch_size`, `from_seq_length`, and `to_seq_length` \"","\"must all be specified.\"",")","# Scalar dimensions referenced here:","# B = batch size (number of sequences)","# F = `from_tensor` sequence length","# T = `to_tensor` sequence length","# N = `num_attention_heads`","# H = `size_per_head`","from_tensor_2d","=","reshape_to_matrix","(","from_tensor",")","to_tensor_2d","=","reshape_to_matrix","(","to_tensor",")","# `query_layer` = [B*F, N*H]","query_layer","=","tf",".","layers",".","dense","(","from_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","query_act",",","name","=","\"query\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `key_layer` = [B*T, N*H]","key_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","key_act",",","name","=","\"key\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `value_layer` = [B*T, N*H]","value_layer","=","tf",".","layers",".","dense","(","to_tensor_2d",",","num_attention_heads","*","size_per_head",",","activation","=","value_act",",","name","=","\"value\"",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# `query_layer` = [B, N, F, H]","query_layer","=","transpose_for_scores","(","query_layer",",","batch_size",",","num_attention_heads",",","from_seq_length",",","size_per_head",")","# `key_layer` = [B, N, T, H]","key_layer","=","transpose_for_scores","(","key_layer",",","batch_size",",","num_attention_heads",",","to_seq_length",",","size_per_head",")","# Take the dot product between \"query\" and \"key\" to get the raw","# attention scores.","# `attention_scores` = [B, N, F, T]","attention_scores","=","tf",".","matmul","(","query_layer",",","key_layer",",","transpose_b","=","True",")","attention_scores","=","tf",".","multiply","(","attention_scores",",","1.0","\/","math",".","sqrt","(","float","(","size_per_head",")",")",")","if","attention_mask","is","not","None",":","# `attention_mask` = [B, 1, F, T]","attention_mask","=","tf",".","expand_dims","(","attention_mask",",","axis","=","[","1","]",")","# Since attention_mask is 1.0 for positions we want to attend and 0.0 for","# masked positions, this operation will create a tensor which is 0.0 for","# positions we want to attend and -10000.0 for masked positions.","adder","=","(","1.0","-","tf",".","cast","(","attention_mask",",","tf",".","float32",")",")","*","-","10000.0","# Since we are adding it to the raw scores before the softmax, this is","# effectively the same as removing these entirely.","attention_scores","+=","adder","# Normalize the attention scores to probabilities.","# `attention_probs` = [B, N, F, T]","attention_probs","=","tf",".","nn",".","softmax","(","attention_scores",")","# This is actually dropping out entire tokens to attend to, which might","# seem a bit unusual, but is taken from the original Transformer paper.","attention_probs","=","dropout","(","attention_probs",",","attention_probs_dropout_prob",")","# `value_layer` = [B, T, N, H]","value_layer","=","tf",".","reshape","(","value_layer",",","[","batch_size",",","to_seq_length",",","num_attention_heads",",","size_per_head","]",")","# `value_layer` = [B, N, T, H]","value_layer","=","tf",".","transpose","(","value_layer",",","[","0",",","2",",","1",",","3","]",")","# `context_layer` = [B, N, F, H]","context_layer","=","tf",".","matmul","(","attention_probs",",","value_layer",")","# `context_layer` = [B, F, N, H]","context_layer","=","tf",".","transpose","(","context_layer",",","[","0",",","2",",","1",",","3","]",")","if","do_return_2d_tensor",":","# `context_layer` = [B*F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size","*","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","else",":","# `context_layer` = [B, F, N*H]","context_layer","=","tf",".","reshape","(","context_layer",",","[","batch_size",",","from_seq_length",",","num_attention_heads","*","size_per_head","]",")","return","context_layer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L637-L830"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"transformer_model","parameters":"(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False,\n share_parameter_across_layers=True)","argument_list":"","return_statement":"","docstring":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.","docstring_summary":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".","docstring_tokens":["Multi","-","headed","multi","-","layer","Transformer","from","Attention","is","All","You","Need","."],"function":"def transformer_model(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False,\n share_parameter_across_layers=True):\n \"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https:\/\/arxiv.org\/abs\/1706.03762\n\n Also see:\n https:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate\/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.\n \"\"\"\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n\n attention_head_size = int(hidden_size \/ num_attention_heads)\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n input_width = input_shape[2]\n\n # The Transformer performs sum residuals on all layers so the input needs\n # to be the same as the hidden size.\n if input_width != hidden_size:\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n (input_width, hidden_size))\n\n # We keep the representation as a 2D tensor to avoid re-shaping it back and\n # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\n # the GPU\/CPU but may not be free on the TPU, so we want to minimize them to\n # help the optimizer.\n prev_output = reshape_to_matrix(input_tensor)\n\n all_layer_outputs = []\n for layer_idx in range(num_hidden_layers):\n if share_parameter_across_layers:\n name_variable_scope=\"layer_shared\"\n else:\n name_variable_scope=\"layer_%d\" % layer_idx\n # share all parameters across layers. add by brightmart, 2019-09-28. previous it is like this: \"layer_%d\" % layer_idx\n with tf.variable_scope(name_variable_scope, reuse=True if (share_parameter_across_layers and layer_idx>0) else False):\n\n layer_input = prev_output\n\n with tf.variable_scope(\"attention\"):\n attention_heads = []\n with tf.variable_scope(\"self\"):\n attention_head = attention_layer(\n from_tensor=layer_input,\n to_tensor=layer_input,\n attention_mask=attention_mask,\n num_attention_heads=num_attention_heads,\n size_per_head=attention_head_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n initializer_range=initializer_range,\n do_return_2d_tensor=True,\n batch_size=batch_size,\n from_seq_length=seq_length,\n to_seq_length=seq_length)\n attention_heads.append(attention_head)\n\n attention_output = None\n if len(attention_heads) == 1:\n attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n attention_output = tf.concat(attention_heads, axis=-1)\n\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n with tf.variable_scope(\"output\"):\n attention_output = tf.layers.dense(\n attention_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n attention_output = dropout(attention_output, hidden_dropout_prob)\n attention_output = layer_norm(attention_output + layer_input)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = tf.layers.dense(\n attention_output,\n intermediate_size,\n activation=intermediate_act_fn,\n kernel_initializer=create_initializer(initializer_range))\n\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"):\n layer_output = tf.layers.dense(\n intermediate_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n layer_output = dropout(layer_output, hidden_dropout_prob)\n layer_output = layer_norm(layer_output + attention_output)\n prev_output = layer_output\n all_layer_outputs.append(layer_output)\n\n if do_return_all_layers:\n final_outputs = []\n for layer_output in all_layer_outputs:\n final_output = reshape_from_matrix(layer_output, input_shape)\n final_outputs.append(final_output)\n return final_outputs\n else:\n final_output = reshape_from_matrix(prev_output, input_shape)\n return final_output","function_tokens":["def","transformer_model","(","input_tensor",",","attention_mask","=","None",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","intermediate_act_fn","=","gelu",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","initializer_range","=","0.02",",","do_return_all_layers","=","False",",","share_parameter_across_layers","=","True",")",":","if","hidden_size","%","num_attention_heads","!=","0",":","raise","ValueError","(","\"The hidden size (%d) is not a multiple of the number of attention \"","\"heads (%d)\"","%","(","hidden_size",",","num_attention_heads",")",")","attention_head_size","=","int","(","hidden_size","\/","num_attention_heads",")","input_shape","=","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","input_width","=","input_shape","[","2","]","# The Transformer performs sum residuals on all layers so the input needs","# to be the same as the hidden size.","if","input_width","!=","hidden_size",":","raise","ValueError","(","\"The width of the input tensor (%d) != hidden size (%d)\"","%","(","input_width",",","hidden_size",")",")","# We keep the representation as a 2D tensor to avoid re-shaping it back and","# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on","# the GPU\/CPU but may not be free on the TPU, so we want to minimize them to","# help the optimizer.","prev_output","=","reshape_to_matrix","(","input_tensor",")","all_layer_outputs","=","[","]","for","layer_idx","in","range","(","num_hidden_layers",")",":","if","share_parameter_across_layers",":","name_variable_scope","=","\"layer_shared\"","else",":","name_variable_scope","=","\"layer_%d\"","%","layer_idx","# share all parameters across layers. add by brightmart, 2019-09-28. previous it is like this: \"layer_%d\" % layer_idx","with","tf",".","variable_scope","(","name_variable_scope",",","reuse","=","True","if","(","share_parameter_across_layers","and","layer_idx",">","0",")","else","False",")",":","layer_input","=","prev_output","with","tf",".","variable_scope","(","\"attention\"",")",":","attention_heads","=","[","]","with","tf",".","variable_scope","(","\"self\"",")",":","attention_head","=","attention_layer","(","from_tensor","=","layer_input",",","to_tensor","=","layer_input",",","attention_mask","=","attention_mask",",","num_attention_heads","=","num_attention_heads",",","size_per_head","=","attention_head_size",",","attention_probs_dropout_prob","=","attention_probs_dropout_prob",",","initializer_range","=","initializer_range",",","do_return_2d_tensor","=","True",",","batch_size","=","batch_size",",","from_seq_length","=","seq_length",",","to_seq_length","=","seq_length",")","attention_heads",".","append","(","attention_head",")","attention_output","=","None","if","len","(","attention_heads",")","==","1",":","attention_output","=","attention_heads","[","0","]","else",":","# In the case where we have other sequences, we just concatenate","# them to the self-attention head before the projection.","attention_output","=","tf",".","concat","(","attention_heads",",","axis","=","-","1",")","# Run a linear projection of `hidden_size` then add a residual","# with `layer_input`.","with","tf",".","variable_scope","(","\"output\"",")",":","attention_output","=","tf",".","layers",".","dense","(","attention_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","attention_output","=","dropout","(","attention_output",",","hidden_dropout_prob",")","attention_output","=","layer_norm","(","attention_output","+","layer_input",")","# The activation is only applied to the \"intermediate\" hidden layer.","with","tf",".","variable_scope","(","\"intermediate\"",")",":","intermediate_output","=","tf",".","layers",".","dense","(","attention_output",",","intermediate_size",",","activation","=","intermediate_act_fn",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# Down-project back to `hidden_size` then add the residual.","with","tf",".","variable_scope","(","\"output\"",")",":","layer_output","=","tf",".","layers",".","dense","(","intermediate_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","layer_output","=","dropout","(","layer_output",",","hidden_dropout_prob",")","layer_output","=","layer_norm","(","layer_output","+","attention_output",")","prev_output","=","layer_output","all_layer_outputs",".","append","(","layer_output",")","if","do_return_all_layers",":","final_outputs","=","[","]","for","layer_output","in","all_layer_outputs",":","final_output","=","reshape_from_matrix","(","layer_output",",","input_shape",")","final_outputs",".","append","(","final_output",")","return","final_outputs","else",":","final_output","=","reshape_from_matrix","(","prev_output",",","input_shape",")","return","final_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L833-L978"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"get_shape_list","parameters":"(tensor, expected_rank=None, name=None)","argument_list":"","return_statement":"return shape","docstring":"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.","docstring_summary":"Returns a list of the shape of tensor, preferring static dimensions.","docstring_tokens":["Returns","a","list","of","the","shape","of","tensor","preferring","static","dimensions","."],"function":"def get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if name is None:\n name = tensor.name\n\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape","function_tokens":["def","get_shape_list","(","tensor",",","expected_rank","=","None",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","if","expected_rank","is","not","None",":","assert_rank","(","tensor",",","expected_rank",",","name",")","shape","=","tensor",".","shape",".","as_list","(",")","non_static_indexes","=","[","]","for","(","index",",","dim",")","in","enumerate","(","shape",")",":","if","dim","is","None",":","non_static_indexes",".","append","(","index",")","if","not","non_static_indexes",":","return","shape","dyn_shape","=","tf",".","shape","(","tensor",")","for","index","in","non_static_indexes",":","shape","[","index","]","=","dyn_shape","[","index","]","return","shape"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L981-L1015"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"reshape_to_matrix","parameters":"(input_tensor)","argument_list":"","return_statement":"return output_tensor","docstring":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_summary":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_tokens":["Reshapes","a",">","=","rank","2","tensor","to","a","rank","2","tensor","(","i",".","e",".","a","matrix",")","."],"function":"def reshape_to_matrix(input_tensor):\n \"\"\"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).\"\"\"\n ndims = input_tensor.shape.ndims\n if ndims < 2:\n raise ValueError(\"Input tensor must have at least rank 2. Shape = %s\" %\n (input_tensor.shape))\n if ndims == 2:\n return input_tensor\n\n width = input_tensor.shape[-1]\n output_tensor = tf.reshape(input_tensor, [-1, width])\n return output_tensor","function_tokens":["def","reshape_to_matrix","(","input_tensor",")",":","ndims","=","input_tensor",".","shape",".","ndims","if","ndims","<","2",":","raise","ValueError","(","\"Input tensor must have at least rank 2. Shape = %s\"","%","(","input_tensor",".","shape",")",")","if","ndims","==","2",":","return","input_tensor","width","=","input_tensor",".","shape","[","-","1","]","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","-","1",",","width","]",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L1018-L1029"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"reshape_from_matrix","parameters":"(output_tensor, orig_shape_list)","argument_list":"","return_statement":"return tf.reshape(output_tensor, orig_dims + [width])","docstring":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_summary":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_tokens":["Reshapes","a","rank","2","tensor","back","to","its","original","rank",">","=","2","tensor","."],"function":"def reshape_from_matrix(output_tensor, orig_shape_list):\n \"\"\"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.\"\"\"\n if len(orig_shape_list) == 2:\n return output_tensor\n\n output_shape = get_shape_list(output_tensor)\n\n orig_dims = orig_shape_list[0:-1]\n width = output_shape[-1]\n\n return tf.reshape(output_tensor, orig_dims + [width])","function_tokens":["def","reshape_from_matrix","(","output_tensor",",","orig_shape_list",")",":","if","len","(","orig_shape_list",")","==","2",":","return","output_tensor","output_shape","=","get_shape_list","(","output_tensor",")","orig_dims","=","orig_shape_list","[","0",":","-","1","]","width","=","output_shape","[","-","1","]","return","tf",".","reshape","(","output_tensor",",","orig_dims","+","[","width","]",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L1032-L1042"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"assert_rank","parameters":"(tensor, expected_rank, name=None)","argument_list":"","return_statement":"","docstring":"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.","docstring_summary":"Raises an exception if the tensor rank is not of the expected rank.","docstring_tokens":["Raises","an","exception","if","the","tensor","rank","is","not","of","the","expected","rank","."],"function":"def assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n if name is None:\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n scope_name = tf.get_variable_scope().name\n raise ValueError(\n \"For the tensor `%s` in scope `%s`, the actual rank \"\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))","function_tokens":["def","assert_rank","(","tensor",",","expected_rank",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","expected_rank_dict","=","{","}","if","isinstance","(","expected_rank",",","six",".","integer_types",")",":","expected_rank_dict","[","expected_rank","]","=","True","else",":","for","x","in","expected_rank",":","expected_rank_dict","[","x","]","=","True","actual_rank","=","tensor",".","shape",".","ndims","if","actual_rank","not","in","expected_rank_dict",":","scope_name","=","tf",".","get_variable_scope","(",")",".","name","raise","ValueError","(","\"For the tensor `%s` in scope `%s`, the actual rank \"","\"`%d` (shape = %s) is not equal to the expected rank `%s`\"","%","(","name",",","scope_name",",","actual_rank",",","str","(","tensor",".","shape",")",",","str","(","expected_rank",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L1045-L1072"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"prelln_transformer_model","parameters":"(input_tensor,\n\t\t\t\t\t\tattention_mask=None,\n\t\t\t\t\t\thidden_size=768,\n\t\t\t\t\t\tnum_hidden_layers=12,\n\t\t\t\t\t\tnum_attention_heads=12,\n\t\t\t\t\t\tintermediate_size=3072,\n\t\t\t\t\t\tintermediate_act_fn=gelu,\n\t\t\t\t\t\thidden_dropout_prob=0.1,\n\t\t\t\t\t\tattention_probs_dropout_prob=0.1,\n\t\t\t\t\t\tinitializer_range=0.02,\n\t\t\t\t\t\tdo_return_all_layers=False,\n\t\t\t\t\t\tshared_type='all', # None,\n\t\t\t\t\t\tadapter_fn=None)","argument_list":"","return_statement":"","docstring":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n\tThis is almost an exact implementation of the original Transformer encoder.\n\n\tSee the original paper:\n\thttps:\/\/arxiv.org\/abs\/1706.03762\n\n\tAlso see:\n\thttps:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n\tArgs:\n\t\tinput_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n\t\tattention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n\t\t\tseq_length], with 1 for positions that can be attended to and 0 in\n\t\t\tpositions that should not be.\n\t\thidden_size: int. Hidden size of the Transformer.\n\t\tnum_hidden_layers: int. Number of layers (blocks) in the Transformer.\n\t\tnum_attention_heads: int. Number of attention heads in the Transformer.\n\t\tintermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n\t\t\tforward) layer.\n\t\tintermediate_act_fn: function. The non-linear activation function to apply\n\t\t\tto the output of the intermediate\/feed-forward layer.\n\t\thidden_dropout_prob: float. Dropout probability for the hidden layers.\n\t\tattention_probs_dropout_prob: float. Dropout probability of the attention\n\t\t\tprobabilities.\n\t\tinitializer_range: float. Range of the initializer (stddev of truncated\n\t\t\tnormal).\n\t\tdo_return_all_layers: Whether to also return all layers or just the final\n\t\t\tlayer.\n\n\tReturns:\n\t\tfloat Tensor of shape [batch_size, seq_length, hidden_size], the final\n\t\thidden layer of the Transformer.\n\n\tRaises:\n\t\tValueError: A Tensor shape or parameter is invalid.","docstring_summary":"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".","docstring_tokens":["Multi","-","headed","multi","-","layer","Transformer","from","Attention","is","All","You","Need","."],"function":"def prelln_transformer_model(input_tensor,\n\t\t\t\t\t\tattention_mask=None,\n\t\t\t\t\t\thidden_size=768,\n\t\t\t\t\t\tnum_hidden_layers=12,\n\t\t\t\t\t\tnum_attention_heads=12,\n\t\t\t\t\t\tintermediate_size=3072,\n\t\t\t\t\t\tintermediate_act_fn=gelu,\n\t\t\t\t\t\thidden_dropout_prob=0.1,\n\t\t\t\t\t\tattention_probs_dropout_prob=0.1,\n\t\t\t\t\t\tinitializer_range=0.02,\n\t\t\t\t\t\tdo_return_all_layers=False,\n\t\t\t\t\t\tshared_type='all', # None,\n\t\t\t\t\t\tadapter_fn=None):\n\t\"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n\tThis is almost an exact implementation of the original Transformer encoder.\n\n\tSee the original paper:\n\thttps:\/\/arxiv.org\/abs\/1706.03762\n\n\tAlso see:\n\thttps:\/\/github.com\/tensorflow\/tensor2tensor\/blob\/master\/tensor2tensor\/models\/transformer.py\n\n\tArgs:\n\t\tinput_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n\t\tattention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n\t\t\tseq_length], with 1 for positions that can be attended to and 0 in\n\t\t\tpositions that should not be.\n\t\thidden_size: int. Hidden size of the Transformer.\n\t\tnum_hidden_layers: int. Number of layers (blocks) in the Transformer.\n\t\tnum_attention_heads: int. Number of attention heads in the Transformer.\n\t\tintermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n\t\t\tforward) layer.\n\t\tintermediate_act_fn: function. The non-linear activation function to apply\n\t\t\tto the output of the intermediate\/feed-forward layer.\n\t\thidden_dropout_prob: float. Dropout probability for the hidden layers.\n\t\tattention_probs_dropout_prob: float. Dropout probability of the attention\n\t\t\tprobabilities.\n\t\tinitializer_range: float. Range of the initializer (stddev of truncated\n\t\t\tnormal).\n\t\tdo_return_all_layers: Whether to also return all layers or just the final\n\t\t\tlayer.\n\n\tReturns:\n\t\tfloat Tensor of shape [batch_size, seq_length, hidden_size], the final\n\t\thidden layer of the Transformer.\n\n\tRaises:\n\t\tValueError: A Tensor shape or parameter is invalid.\n\t\"\"\"\n\tif hidden_size % num_attention_heads != 0:\n\t\traise ValueError(\n\t\t\t\t\"The hidden size (%d) is not a multiple of the number of attention \"\n\t\t\t\t\"heads (%d)\" % (hidden_size, num_attention_heads))\n\n\tattention_head_size = int(hidden_size \/ num_attention_heads)\n\n\tinput_shape = bert_utils.get_shape_list(input_tensor, expected_rank=3)\n\tbatch_size = input_shape[0]\n\tseq_length = input_shape[1]\n\tinput_width = input_shape[2]\n\n\t# The Transformer performs sum residuals on all layers so the input needs\n\t# to be the same as the hidden size.\n\tif input_width != hidden_size:\n\t\traise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n\t\t\t\t\t\t\t\t\t\t (input_width, hidden_size))\n\n\t# We keep the representation as a 2D tensor to avoid re-shaping it back and\n\t# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\n\t# the GPU\/CPU but may not be free on the TPU, so we want to minimize them to\n\t# help the optimizer.\n\tprev_output = bert_utils.reshape_to_matrix(input_tensor)\n\n\tall_layer_outputs = []\n\n\tdef layer_scope(idx, shared_type):\n\t\tif shared_type == 'all':\n\t\t\ttmp = {\n\t\t\t\t\"layer\":\"layer_shared\",\n\t\t\t\t'attention':'attention',\n\t\t\t\t'intermediate':'intermediate',\n\t\t\t\t'output':'output'\n\t\t\t}\n\t\telif shared_type == 'attention':\n\t\t\ttmp = {\n\t\t\t\t\"layer\":\"layer_shared\",\n\t\t\t\t'attention':'attention',\n\t\t\t\t'intermediate':'intermediate_{}'.format(idx),\n\t\t\t\t'output':'output_{}'.format(idx)\n\t\t\t}\n\t\telif shared_type == 'ffn':\n\t\t\ttmp = {\n\t\t\t\t\"layer\":\"layer_shared\",\n\t\t\t\t'attention':'attention_{}'.format(idx),\n\t\t\t\t'intermediate':'intermediate',\n\t\t\t\t'output':'output'\n\t\t\t}\n\t\telse:\n\t\t\ttmp = {\n\t\t\t\t\"layer\":\"layer_{}\".format(idx),\n\t\t\t\t'attention':'attention',\n\t\t\t\t'intermediate':'intermediate',\n\t\t\t\t'output':'output'\n\t\t\t}\n\n\t\treturn tmp\n\n\tall_layer_outputs = []\n\n\tfor layer_idx in range(num_hidden_layers):\n\n\t\tidx_scope = layer_scope(layer_idx, shared_type)\n\n\t\twith tf.variable_scope(idx_scope['layer'], reuse=tf.AUTO_REUSE):\n\t\t\tlayer_input = prev_output\n\n\t\t\twith tf.variable_scope(idx_scope['attention'], reuse=tf.AUTO_REUSE):\n\t\t\t\tattention_heads = []\n\n\t\t\t\twith tf.variable_scope(\"output\", reuse=tf.AUTO_REUSE):\n\t\t\t\t\tlayer_input_pre = layer_norm(layer_input)\n\n\t\t\t\twith tf.variable_scope(\"self\"):\n\t\t\t\t\tattention_head = attention_layer(\n\t\t\t\t\t\t\tfrom_tensor=layer_input_pre,\n\t\t\t\t\t\t\tto_tensor=layer_input_pre,\n\t\t\t\t\t\t\tattention_mask=attention_mask,\n\t\t\t\t\t\t\tnum_attention_heads=num_attention_heads,\n\t\t\t\t\t\t\tsize_per_head=attention_head_size,\n\t\t\t\t\t\t\tattention_probs_dropout_prob=attention_probs_dropout_prob,\n\t\t\t\t\t\t\tinitializer_range=initializer_range,\n\t\t\t\t\t\t\tdo_return_2d_tensor=True,\n\t\t\t\t\t\t\tbatch_size=batch_size,\n\t\t\t\t\t\t\tfrom_seq_length=seq_length,\n\t\t\t\t\t\t\tto_seq_length=seq_length)\n\t\t\t\t\tattention_heads.append(attention_head)\n\n\t\t\t\tattention_output = None\n\t\t\t\tif len(attention_heads) == 1:\n\t\t\t\t\tattention_output = attention_heads[0]\n\t\t\t\telse:\n\t\t\t\t\t# In the case where we have other sequences, we just concatenate\n\t\t\t\t\t# them to the self-attention head before the projection.\n\t\t\t\t\tattention_output = tf.concat(attention_heads, axis=-1)\n\n\t\t\t\t# Run a linear projection of `hidden_size` then add a residual\n\t\t\t\t# with `layer_input`.\n\t\t\t\twith tf.variable_scope(\"output\", reuse=tf.AUTO_REUSE):\n\t\t\t\t\tattention_output = tf.layers.dense(\n\t\t\t\t\t\t\tattention_output,\n\t\t\t\t\t\t\thidden_size,\n\t\t\t\t\t\t\tkernel_initializer=create_initializer(initializer_range))\n\t\t\t\t\tattention_output = dropout(attention_output, hidden_dropout_prob)\n\n\t\t\t\t\t# attention_output = layer_norm(attention_output + layer_input)\n\t\t\t\t\tattention_output = attention_output + layer_input\n\n\t\t\twith tf.variable_scope(idx_scope['output'], reuse=tf.AUTO_REUSE):\n\t\t\t\tattention_output_pre = layer_norm(attention_output)\n\n\t\t\t# The activation is only applied to the \"intermediate\" hidden layer.\n\t\t\twith tf.variable_scope(idx_scope['intermediate'], reuse=tf.AUTO_REUSE):\n\t\t\t\tintermediate_output = tf.layers.dense(\n\t\t\t\t\t\tattention_output_pre,\n\t\t\t\t\t\tintermediate_size,\n\t\t\t\t\t\tactivation=intermediate_act_fn,\n\t\t\t\t\t\tkernel_initializer=create_initializer(initializer_range))\n\n\t\t\t# Down-project back to `hidden_size` then add the residual.\n\t\t\twith tf.variable_scope(idx_scope['output'], reuse=tf.AUTO_REUSE):\n\t\t\t\tlayer_output = tf.layers.dense(\n\t\t\t\t\t\tintermediate_output,\n\t\t\t\t\t\thidden_size,\n\t\t\t\t\t\tkernel_initializer=create_initializer(initializer_range))\n\t\t\t\tlayer_output = dropout(layer_output, hidden_dropout_prob)\n\n\t\t\t\t# layer_output = layer_norm(layer_output + attention_output)\n\t\t\t\tlayer_output = layer_output + attention_output\n\t\t\t\tprev_output = layer_output\n\t\t\t\tall_layer_outputs.append(layer_output)\n\n\tif do_return_all_layers:\n\t\tfinal_outputs = []\n\t\tfor layer_output in all_layer_outputs:\n\t\t\tfinal_output = bert_utils.reshape_from_matrix(layer_output, input_shape)\n\t\t\tfinal_outputs.append(final_output)\n\t\treturn final_outputs\n\telse:\n\t\tfinal_output = bert_utils.reshape_from_matrix(prev_output, input_shape)\n\t\treturn final_output","function_tokens":["def","prelln_transformer_model","(","input_tensor",",","attention_mask","=","None",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","intermediate_act_fn","=","gelu",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","initializer_range","=","0.02",",","do_return_all_layers","=","False",",","shared_type","=","'all'",",","# None,","adapter_fn","=","None",")",":","if","hidden_size","%","num_attention_heads","!=","0",":","raise","ValueError","(","\"The hidden size (%d) is not a multiple of the number of attention \"","\"heads (%d)\"","%","(","hidden_size",",","num_attention_heads",")",")","attention_head_size","=","int","(","hidden_size","\/","num_attention_heads",")","input_shape","=","bert_utils",".","get_shape_list","(","input_tensor",",","expected_rank","=","3",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","input_width","=","input_shape","[","2","]","# The Transformer performs sum residuals on all layers so the input needs","# to be the same as the hidden size.","if","input_width","!=","hidden_size",":","raise","ValueError","(","\"The width of the input tensor (%d) != hidden size (%d)\"","%","(","input_width",",","hidden_size",")",")","# We keep the representation as a 2D tensor to avoid re-shaping it back and","# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on","# the GPU\/CPU but may not be free on the TPU, so we want to minimize them to","# help the optimizer.","prev_output","=","bert_utils",".","reshape_to_matrix","(","input_tensor",")","all_layer_outputs","=","[","]","def","layer_scope","(","idx",",","shared_type",")",":","if","shared_type","==","'all'",":","tmp","=","{","\"layer\"",":","\"layer_shared\"",",","'attention'",":","'attention'",",","'intermediate'",":","'intermediate'",",","'output'",":","'output'","}","elif","shared_type","==","'attention'",":","tmp","=","{","\"layer\"",":","\"layer_shared\"",",","'attention'",":","'attention'",",","'intermediate'",":","'intermediate_{}'",".","format","(","idx",")",",","'output'",":","'output_{}'",".","format","(","idx",")","}","elif","shared_type","==","'ffn'",":","tmp","=","{","\"layer\"",":","\"layer_shared\"",",","'attention'",":","'attention_{}'",".","format","(","idx",")",",","'intermediate'",":","'intermediate'",",","'output'",":","'output'","}","else",":","tmp","=","{","\"layer\"",":","\"layer_{}\"",".","format","(","idx",")",",","'attention'",":","'attention'",",","'intermediate'",":","'intermediate'",",","'output'",":","'output'","}","return","tmp","all_layer_outputs","=","[","]","for","layer_idx","in","range","(","num_hidden_layers",")",":","idx_scope","=","layer_scope","(","layer_idx",",","shared_type",")","with","tf",".","variable_scope","(","idx_scope","[","'layer'","]",",","reuse","=","tf",".","AUTO_REUSE",")",":","layer_input","=","prev_output","with","tf",".","variable_scope","(","idx_scope","[","'attention'","]",",","reuse","=","tf",".","AUTO_REUSE",")",":","attention_heads","=","[","]","with","tf",".","variable_scope","(","\"output\"",",","reuse","=","tf",".","AUTO_REUSE",")",":","layer_input_pre","=","layer_norm","(","layer_input",")","with","tf",".","variable_scope","(","\"self\"",")",":","attention_head","=","attention_layer","(","from_tensor","=","layer_input_pre",",","to_tensor","=","layer_input_pre",",","attention_mask","=","attention_mask",",","num_attention_heads","=","num_attention_heads",",","size_per_head","=","attention_head_size",",","attention_probs_dropout_prob","=","attention_probs_dropout_prob",",","initializer_range","=","initializer_range",",","do_return_2d_tensor","=","True",",","batch_size","=","batch_size",",","from_seq_length","=","seq_length",",","to_seq_length","=","seq_length",")","attention_heads",".","append","(","attention_head",")","attention_output","=","None","if","len","(","attention_heads",")","==","1",":","attention_output","=","attention_heads","[","0","]","else",":","# In the case where we have other sequences, we just concatenate","# them to the self-attention head before the projection.","attention_output","=","tf",".","concat","(","attention_heads",",","axis","=","-","1",")","# Run a linear projection of `hidden_size` then add a residual","# with `layer_input`.","with","tf",".","variable_scope","(","\"output\"",",","reuse","=","tf",".","AUTO_REUSE",")",":","attention_output","=","tf",".","layers",".","dense","(","attention_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","attention_output","=","dropout","(","attention_output",",","hidden_dropout_prob",")","# attention_output = layer_norm(attention_output + layer_input)","attention_output","=","attention_output","+","layer_input","with","tf",".","variable_scope","(","idx_scope","[","'output'","]",",","reuse","=","tf",".","AUTO_REUSE",")",":","attention_output_pre","=","layer_norm","(","attention_output",")","# The activation is only applied to the \"intermediate\" hidden layer.","with","tf",".","variable_scope","(","idx_scope","[","'intermediate'","]",",","reuse","=","tf",".","AUTO_REUSE",")",":","intermediate_output","=","tf",".","layers",".","dense","(","attention_output_pre",",","intermediate_size",",","activation","=","intermediate_act_fn",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","# Down-project back to `hidden_size` then add the residual.","with","tf",".","variable_scope","(","idx_scope","[","'output'","]",",","reuse","=","tf",".","AUTO_REUSE",")",":","layer_output","=","tf",".","layers",".","dense","(","intermediate_output",",","hidden_size",",","kernel_initializer","=","create_initializer","(","initializer_range",")",")","layer_output","=","dropout","(","layer_output",",","hidden_dropout_prob",")","# layer_output = layer_norm(layer_output + attention_output)","layer_output","=","layer_output","+","attention_output","prev_output","=","layer_output","all_layer_outputs",".","append","(","layer_output",")","if","do_return_all_layers",":","final_outputs","=","[","]","for","layer_output","in","all_layer_outputs",":","final_output","=","bert_utils",".","reshape_from_matrix","(","layer_output",",","input_shape",")","final_outputs",".","append","(","final_output",")","return","final_outputs","else",":","final_output","=","bert_utils",".","reshape_from_matrix","(","prev_output",",","input_shape",")","return","final_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L1074-L1264"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"BertConfig.__init__","parameters":"(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02)","argument_list":"","return_statement":"","docstring":"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.","docstring_summary":"Constructs BertConfig.","docstring_tokens":["Constructs","BertConfig","."],"function":"def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range","function_tokens":["def","__init__","(","self",",","vocab_size",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","hidden_act","=","\"gelu\"",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","max_position_embeddings","=","512",",","type_vocab_size","=","16",",","initializer_range","=","0.02",")",":","self",".","vocab_size","=","vocab_size","self",".","hidden_size","=","hidden_size","self",".","num_hidden_layers","=","num_hidden_layers","self",".","num_attention_heads","=","num_attention_heads","self",".","hidden_act","=","hidden_act","self",".","intermediate_size","=","intermediate_size","self",".","hidden_dropout_prob","=","hidden_dropout_prob","self",".","attention_probs_dropout_prob","=","attention_probs_dropout_prob","self",".","max_position_embeddings","=","max_position_embeddings","self",".","type_vocab_size","=","type_vocab_size","self",".","initializer_range","=","initializer_range"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L34-L80"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"BertConfig.from_dict","parameters":"(cls, json_object)","argument_list":"","return_statement":"return config","docstring":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_summary":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","Python","dictionary","of","parameters","."],"function":"def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config","function_tokens":["def","from_dict","(","cls",",","json_object",")",":","config","=","BertConfig","(","vocab_size","=","None",")","for","(","key",",","value",")","in","six",".","iteritems","(","json_object",")",":","config",".","__dict__","[","key","]","=","value","return","config"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L83-L88"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"BertConfig.from_json_file","parameters":"(cls, json_file)","argument_list":"","return_statement":"return cls.from_dict(json.loads(text))","docstring":"Constructs a `BertConfig` from a json file of parameters.","docstring_summary":"Constructs a `BertConfig` from a json file of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","json","file","of","parameters","."],"function":"def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with tf.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))","function_tokens":["def","from_json_file","(","cls",",","json_file",")",":","with","tf",".","gfile",".","GFile","(","json_file",",","\"r\"",")","as","reader",":","text","=","reader",".","read","(",")","return","cls",".","from_dict","(","json",".","loads","(","text",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L91-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"BertConfig.to_dict","parameters":"(self)","argument_list":"","return_statement":"return output","docstring":"Serializes this instance to a Python dictionary.","docstring_summary":"Serializes this instance to a Python dictionary.","docstring_tokens":["Serializes","this","instance","to","a","Python","dictionary","."],"function":"def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output","function_tokens":["def","to_dict","(","self",")",":","output","=","copy",".","deepcopy","(","self",".","__dict__",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L97-L100"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"BertConfig.to_json_string","parameters":"(self)","argument_list":"","return_statement":"return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","docstring":"Serializes this instance to a JSON string.","docstring_summary":"Serializes this instance to a JSON string.","docstring_tokens":["Serializes","this","instance","to","a","JSON","string","."],"function":"def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","function_tokens":["def","to_json_string","(","self",")",":","return","json",".","dumps","(","self",".","to_dict","(",")",",","indent","=","2",",","sort_keys","=","True",")","+","\"\\n\""],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L102-L104"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"BertModel.__init__","parameters":"(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None)","argument_list":"","return_statement":"","docstring":"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.","docstring_summary":"Constructor for BertModel.","docstring_tokens":["Constructor","for","BertModel","."],"function":"def __init__(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n\n input_shape = get_shape_list(input_ids, expected_rank=2)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n\n if input_mask is None:\n input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)\n\n if token_type_ids is None:\n token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)\n\n with tf.variable_scope(scope, default_name=\"bert\"):\n with tf.variable_scope(\"embeddings\"):\n # Perform embedding lookup on the word ids, but use stype of factorized embedding parameterization from albert. add by brightmart, 2019-09-28\n (self.embedding_output, self.embedding_table,self.embedding_table_2) = embedding_lookup_factorized(\n input_ids=input_ids,\n vocab_size=config.vocab_size,\n hidden_size=config.hidden_size,\n embedding_size=config.embedding_size,\n initializer_range=config.initializer_range,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n self.embedding_output = embedding_postprocessor(\n input_tensor=self.embedding_output,\n use_token_type=True,\n token_type_ids=token_type_ids,\n token_type_vocab_size=config.type_vocab_size,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=config.initializer_range,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n\n with tf.variable_scope(\"encoder\"):\n # This converts a 2D mask of shape [batch_size, seq_length] to a 3D\n # mask of shape [batch_size, seq_length, seq_length] which is used\n # for the attention scores.\n attention_mask = create_attention_mask_from_input_mask(\n input_ids, input_mask)\n\n # Run the stacked transformer.\n # `sequence_output` shape = [batch_size, seq_length, hidden_size].\n ln_type=config.ln_type\n print(\"ln_type:\",ln_type)\n if ln_type=='postln' or ln_type is None: # currently, base or large of albert used post-LN structure\n print(\"old structure of transformer.use: transformer_model,which use post-LN\")\n self.all_encoder_layers = transformer_model(\n input_tensor=self.embedding_output,\n attention_mask=attention_mask,\n hidden_size=config.hidden_size,\n num_hidden_layers=config.num_hidden_layers,\n num_attention_heads=config.num_attention_heads,\n intermediate_size=config.intermediate_size,\n intermediate_act_fn=get_activation(config.hidden_act),\n hidden_dropout_prob=config.hidden_dropout_prob,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n initializer_range=config.initializer_range,\n do_return_all_layers=True)\n else: # xlarge or xxlarge of albert, used pre-LN structure\n print(\"new structure of transformer.use: prelln_transformer_model,which use pre-LN\")\n self.all_encoder_layers = prelln_transformer_model( # change by brightmart, 4th, oct, 2019. pre-Layer Normalization can converge fast and better. check paper: ON LAYER NORMALIZATION IN THE TRANSFORMER ARCHITECTURE\n input_tensor=self.embedding_output,\n attention_mask=attention_mask,\n hidden_size=config.hidden_size,\n num_hidden_layers=config.num_hidden_layers,\n num_attention_heads=config.num_attention_heads,\n intermediate_size=config.intermediate_size,\n intermediate_act_fn=get_activation(config.hidden_act),\n hidden_dropout_prob=config.hidden_dropout_prob,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n initializer_range=config.initializer_range,\n do_return_all_layers=True,\n shared_type='all') # do_return_all_layers=True\n\n self.sequence_output = self.all_encoder_layers[-1] # [batch_size, seq_length, hidden_size]\n # The \"pooler\" converts the encoded sequence tensor of shape\n # [batch_size, seq_length, hidden_size] to a tensor of shape\n # [batch_size, hidden_size]. This is necessary for segment-level\n # (or segment-pair-level) classification tasks where we need a fixed\n # dimensional representation of the segment.\n with tf.variable_scope(\"pooler\"):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token. We assume that this has been pre-trained\n first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)\n self.pooled_output = tf.layers.dense(\n first_token_tensor,\n config.hidden_size,\n activation=tf.tanh,\n kernel_initializer=create_initializer(config.initializer_range))","function_tokens":["def","__init__","(","self",",","config",",","is_training",",","input_ids",",","input_mask","=","None",",","token_type_ids","=","None",",","use_one_hot_embeddings","=","False",",","scope","=","None",")",":","config","=","copy",".","deepcopy","(","config",")","if","not","is_training",":","config",".","hidden_dropout_prob","=","0.0","config",".","attention_probs_dropout_prob","=","0.0","input_shape","=","get_shape_list","(","input_ids",",","expected_rank","=","2",")","batch_size","=","input_shape","[","0","]","seq_length","=","input_shape","[","1","]","if","input_mask","is","None",":","input_mask","=","tf",".","ones","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","if","token_type_ids","is","None",":","token_type_ids","=","tf",".","zeros","(","shape","=","[","batch_size",",","seq_length","]",",","dtype","=","tf",".","int32",")","with","tf",".","variable_scope","(","scope",",","default_name","=","\"bert\"",")",":","with","tf",".","variable_scope","(","\"embeddings\"",")",":","# Perform embedding lookup on the word ids, but use stype of factorized embedding parameterization from albert. add by brightmart, 2019-09-28","(","self",".","embedding_output",",","self",".","embedding_table",",","self",".","embedding_table_2",")","=","embedding_lookup_factorized","(","input_ids","=","input_ids",",","vocab_size","=","config",".","vocab_size",",","hidden_size","=","config",".","hidden_size",",","embedding_size","=","config",".","embedding_size",",","initializer_range","=","config",".","initializer_range",",","word_embedding_name","=","\"word_embeddings\"",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# Add positional embeddings and token type embeddings, then layer","# normalize and perform dropout.","self",".","embedding_output","=","embedding_postprocessor","(","input_tensor","=","self",".","embedding_output",",","use_token_type","=","True",",","token_type_ids","=","token_type_ids",",","token_type_vocab_size","=","config",".","type_vocab_size",",","token_type_embedding_name","=","\"token_type_embeddings\"",",","use_position_embeddings","=","True",",","position_embedding_name","=","\"position_embeddings\"",",","initializer_range","=","config",".","initializer_range",",","max_position_embeddings","=","config",".","max_position_embeddings",",","dropout_prob","=","config",".","hidden_dropout_prob",")","with","tf",".","variable_scope","(","\"encoder\"",")",":","# This converts a 2D mask of shape [batch_size, seq_length] to a 3D","# mask of shape [batch_size, seq_length, seq_length] which is used","# for the attention scores.","attention_mask","=","create_attention_mask_from_input_mask","(","input_ids",",","input_mask",")","# Run the stacked transformer.","# `sequence_output` shape = [batch_size, seq_length, hidden_size].","ln_type","=","config",".","ln_type","print","(","\"ln_type:\"",",","ln_type",")","if","ln_type","==","'postln'","or","ln_type","is","None",":","# currently, base or large of albert used post-LN structure","print","(","\"old structure of transformer.use: transformer_model,which use post-LN\"",")","self",".","all_encoder_layers","=","transformer_model","(","input_tensor","=","self",".","embedding_output",",","attention_mask","=","attention_mask",",","hidden_size","=","config",".","hidden_size",",","num_hidden_layers","=","config",".","num_hidden_layers",",","num_attention_heads","=","config",".","num_attention_heads",",","intermediate_size","=","config",".","intermediate_size",",","intermediate_act_fn","=","get_activation","(","config",".","hidden_act",")",",","hidden_dropout_prob","=","config",".","hidden_dropout_prob",",","attention_probs_dropout_prob","=","config",".","attention_probs_dropout_prob",",","initializer_range","=","config",".","initializer_range",",","do_return_all_layers","=","True",")","else",":","# xlarge or xxlarge of albert, used pre-LN structure","print","(","\"new structure of transformer.use: prelln_transformer_model,which use pre-LN\"",")","self",".","all_encoder_layers","=","prelln_transformer_model","(","# change by brightmart, 4th, oct, 2019. pre-Layer Normalization can converge fast and better. check paper: ON LAYER NORMALIZATION IN THE TRANSFORMER ARCHITECTURE","input_tensor","=","self",".","embedding_output",",","attention_mask","=","attention_mask",",","hidden_size","=","config",".","hidden_size",",","num_hidden_layers","=","config",".","num_hidden_layers",",","num_attention_heads","=","config",".","num_attention_heads",",","intermediate_size","=","config",".","intermediate_size",",","intermediate_act_fn","=","get_activation","(","config",".","hidden_act",")",",","hidden_dropout_prob","=","config",".","hidden_dropout_prob",",","attention_probs_dropout_prob","=","config",".","attention_probs_dropout_prob",",","initializer_range","=","config",".","initializer_range",",","do_return_all_layers","=","True",",","shared_type","=","'all'",")","# do_return_all_layers=True","self",".","sequence_output","=","self",".","all_encoder_layers","[","-","1","]","# [batch_size, seq_length, hidden_size]","# The \"pooler\" converts the encoded sequence tensor of shape","# [batch_size, seq_length, hidden_size] to a tensor of shape","# [batch_size, hidden_size]. This is necessary for segment-level","# (or segment-pair-level) classification tasks where we need a fixed","# dimensional representation of the segment.","with","tf",".","variable_scope","(","\"pooler\"",")",":","# We \"pool\" the model by simply taking the hidden state corresponding","# to the first token. We assume that this has been pre-trained","first_token_tensor","=","tf",".","squeeze","(","self",".","sequence_output","[",":",",","0",":","1",",",":","]",",","axis","=","1",")","self",".","pooled_output","=","tf",".","layers",".","dense","(","first_token_tensor",",","config",".","hidden_size",",","activation","=","tf",".","tanh",",","kernel_initializer","=","create_initializer","(","config",".","initializer_range",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L131-L252"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"BertModel.get_sequence_output","parameters":"(self)","argument_list":"","return_statement":"return self.sequence_output","docstring":"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.","docstring_summary":"Gets final hidden layer of encoder.","docstring_tokens":["Gets","final","hidden","layer","of","encoder","."],"function":"def get_sequence_output(self):\n \"\"\"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.\n \"\"\"\n return self.sequence_output","function_tokens":["def","get_sequence_output","(","self",")",":","return","self",".","sequence_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L257-L264"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/modeling.py","language":"python","identifier":"BertModel.get_embedding_output","parameters":"(self)","argument_list":"","return_statement":"return self.embedding_output","docstring":"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.","docstring_summary":"Gets output of the embedding lookup (i.e., input to the transformer).","docstring_tokens":["Gets","output","of","the","embedding","lookup","(","i",".","e",".","input","to","the","transformer",")","."],"function":"def get_embedding_output(self):\n \"\"\"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.\n \"\"\"\n return self.embedding_output","function_tokens":["def","get_embedding_output","(","self",")",":","return","self",".","embedding_output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/modeling.py#L269-L278"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_classifier.py","language":"python","identifier":"convert_example_list_for_inews","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature_list","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_example_list_for_inews(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return [InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)]\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n must_len = len(tokens_a) + 3\n extra_len = max_seq_length - must_len\n feature_list = []\n if example.text_b and extra_len > 0:\n extra_num = int((len(tokens_b) - 1) \/ extra_len) + 1\n for num in range(extra_num):\n max_len = min((num + 1) * extra_len, len(tokens_b))\n tokens_b_sub = tokens_b[num * extra_len: max_len]\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b_sub, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n else:\n feature = convert_single_example_for_inews(\n ex_index, tokens_a, tokens_b, label_map, max_seq_length, tokenizer, example)\n feature_list.append(feature)\n return feature_list","function_tokens":["def","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","[","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","]","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","must_len","=","len","(","tokens_a",")","+","3","extra_len","=","max_seq_length","-","must_len","feature_list","=","[","]","if","example",".","text_b","and","extra_len",">","0",":","extra_num","=","int","(","(","len","(","tokens_b",")","-","1",")","\/","extra_len",")","+","1","for","num","in","range","(","extra_num",")",":","max_len","=","min","(","(","num","+","1",")","*","extra_len",",","len","(","tokens_b",")",")","tokens_b_sub","=","tokens_b","[","num","*","extra_len",":","max_len","]","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b_sub",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","else",":","feature","=","convert_single_example_for_inews","(","ex_index",",","tokens_a",",","tokens_b",",","label_map",",","max_seq_length",",","tokenizer",",","example",")","feature_list",".","append","(","feature",")","return","feature_list"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_classifier.py#L234-L269"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features_for_inews","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features_for_inews(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n num_example = 0\n for (ex_index, example) in enumerate(examples):\n if ex_index % 1000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature_list = convert_example_list_for_inews(ex_index, example, label_list,\n max_seq_length, tokenizer)\n num_example += len(feature_list)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n for feature in feature_list:\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n tf.logging.info(\"feature num: %s\", num_example)\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features_for_inews","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","num_example","=","0","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","1000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature_list","=","convert_example_list_for_inews","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","num_example","+=","len","(","feature_list",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","for","feature","in","feature_list",":","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","tf",".","logging",".","info","(","\"feature num: %s\"",",","num_example",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_classifier.py#L272-L302"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_classifier.py","language":"python","identifier":"convert_single_example","parameters":"(ex_index, example, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return feature","docstring":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_summary":"Converts a single `InputExample` into a single `InputFeatures`.","docstring_tokens":["Converts","a","single","InputExample","into","a","single","InputFeatures","."],"function":"def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature","function_tokens":["def","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")",":","if","isinstance","(","example",",","PaddingInputExample",")",":","return","InputFeatures","(","input_ids","=","[","0","]","*","max_seq_length",",","input_mask","=","[","0","]","*","max_seq_length",",","segment_ids","=","[","0","]","*","max_seq_length",",","label_id","=","0",",","is_real_example","=","False",")","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","None","if","example",".","text_b",":","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","if","tokens_b",":","# Modifies `tokens_a` and `tokens_b` in place so that the total","# length is less than the specified length.","# Account for [CLS], [SEP], [SEP] with \"- 3\"","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_seq_length","-","3",")","else",":","# Account for [CLS] and [SEP] with \"- 2\"","if","len","(","tokens_a",")",">","max_seq_length","-","2",":","tokens_a","=","tokens_a","[","0",":","(","max_seq_length","-","2",")","]","# The convention in BERT is:","# (a) For sequence pairs:","# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]","# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1","# (b) For single sequences:","# tokens: [CLS] the dog is hairy . [SEP]","# type_ids: 0 0 0 0 0 0 0","#","# Where \"type_ids\" are used to indicate whether this is the first","# sequence or the second sequence. The embedding vectors for `type=0` and","# `type=1` were learned during pre-training and are added to the wordpiece","# embedding vector (and position vector). This is not *strictly* necessary","# since the [SEP] token unambiguously separates the sequences, but it makes","# it easier for the model to learn the concept of sequences.","#","# For classification tasks, the first vector (corresponding to [CLS]) is","# used as the \"sentence vector\". Note that this only makes sense because","# the entire model is fine-tuned.","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","label_id","=","label_map","[","example",".","label","]","if","ex_index","<","5",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"guid: %s\"","%","(","example",".","guid",")",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","tf",".","logging",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","tf",".","logging",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","tf",".","logging",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","tf",".","logging",".","info","(","\"label: %s (id = %d)\"","%","(","example",".","label",",","label_id",")",")","feature","=","InputFeatures","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","label_id","=","label_id",",","is_real_example","=","True",")","return","feature"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_classifier.py#L305-L404"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_classifier.py","language":"python","identifier":"file_based_convert_examples_to_features","parameters":"(\n examples, label_list, max_seq_length, tokenizer, output_file)","argument_list":"","return_statement":"","docstring":"Convert a set of `InputExample`s to a TFRecord file.","docstring_summary":"Convert a set of `InputExample`s to a TFRecord file.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","TFRecord","file","."],"function":"def file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()","function_tokens":["def","file_based_convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",",","output_file",")",":","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","def","create_int_feature","(","values",")",":","f","=","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","list","(","values",")",")",")","return","f","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","feature",".","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","feature",".","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","feature",".","segment_ids",")","features","[","\"label_ids\"","]","=","create_int_feature","(","[","feature",".","label_id","]",")","features","[","\"is_real_example\"","]","=","create_int_feature","(","[","int","(","feature",".","is_real_example",")","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writer",".","write","(","tf_example",".","SerializeToString","(",")",")","writer",".","close","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_classifier.py#L407-L434"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_classifier.py","language":"python","identifier":"file_based_input_fn_builder","parameters":"(input_file, seq_length, is_training,\n drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn","function_tokens":["def","file_based_input_fn_builder","(","input_file",",","seq_length",",","is_training",",","drop_remainder",")",":","name_to_features","=","{","\"input_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"input_mask\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"segment_ids\"",":","tf",".","FixedLenFeature","(","[","seq_length","]",",","tf",".","int64",")",",","\"label_ids\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","\"is_real_example\"",":","tf",".","FixedLenFeature","(","[","]",",","tf",".","int64",")",",","}","def","_decode_record","(","record",",","name_to_features",")",":","\"\"\"Decodes a record to a TensorFlow example.\"\"\"","example","=","tf",".","parse_single_example","(","record",",","name_to_features",")","# tf.Example only supports tf.int64, but the TPU only supports tf.int32.","# So cast all int64 to int32.","for","name","in","list","(","example",".","keys","(",")",")",":","t","=","example","[","name","]","if","t",".","dtype","==","tf",".","int64",":","t","=","tf",".","to_int32","(","t",")","example","[","name","]","=","t","return","example","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","# For training, we want a lot of parallel reading and shuffling.","# For eval, we want no shuffling and parallel reading doesn't matter.","d","=","tf",".","data",".","TFRecordDataset","(","input_file",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","apply","(","tf",".","contrib",".","data",".","map_and_batch","(","lambda","record",":","_decode_record","(","record",",","name_to_features",")",",","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_classifier.py#L437-L482"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_classifier.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_classifier.py#L485-L499"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_classifier.py","language":"python","identifier":"create_model","parameters":"(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings)","argument_list":"","return_statement":"","docstring":"Creates a classification model.","docstring_summary":"Creates a classification model.","docstring_tokens":["Creates","a","classification","model","."],"function":"def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n ln_type = bert_config.ln_type\n if ln_type == 'preln': # add by brightmart, 10-06. if it is preln, we need to an additonal layer: layer normalization as suggested in paper \"ON LAYER NORMALIZATION IN THE TRANSFORMER ARCHITECTURE\"\n print(\"ln_type is preln. add LN layer.\")\n output_layer = layer_norm(output_layer)\n else:\n print(\"ln_type is postln or other,do nothing.\")\n\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs,\n axis=-1) # todo 08-29 try temp-loss\n ###############bi_tempered_logistic_loss############################################################################\n # print(\"##cross entropy loss is used....\"); tf.logging.info(\"##cross entropy loss is used....\")\n # t1=0.9 #t1=0.90\n # t2=1.05 #t2=1.05\n # per_example_loss=bi_tempered_logistic_loss(log_probs,one_hot_labels,t1,t2,label_smoothing=0.1,num_iters=5) # TODO label_smoothing=0.0\n # tf.logging.info(\"per_example_loss:\"+str(per_example_loss.shape))\n ##############bi_tempered_logistic_loss#############################################################################\n\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)","function_tokens":["def","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","labels",",","num_labels",",","use_one_hot_embeddings",")",":","model","=","modeling",".","BertModel","(","config","=","bert_config",",","is_training","=","is_training",",","input_ids","=","input_ids",",","input_mask","=","input_mask",",","token_type_ids","=","segment_ids",",","use_one_hot_embeddings","=","use_one_hot_embeddings",")","# In the demo, we are doing a simple classification task on the entire","# segment.","#","# If you want to use the token-level output, use model.get_sequence_output()","# instead.","output_layer","=","model",".","get_pooled_output","(",")","hidden_size","=","output_layer",".","shape","[","-","1","]",".","value","output_weights","=","tf",".","get_variable","(","\"output_weights\"",",","[","num_labels",",","hidden_size","]",",","initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","0.02",")",")","output_bias","=","tf",".","get_variable","(","\"output_bias\"",",","[","num_labels","]",",","initializer","=","tf",".","zeros_initializer","(",")",")","with","tf",".","variable_scope","(","\"loss\"",")",":","ln_type","=","bert_config",".","ln_type","if","ln_type","==","'preln'",":","# add by brightmart, 10-06. if it is preln, we need to an additonal layer: layer normalization as suggested in paper \"ON LAYER NORMALIZATION IN THE TRANSFORMER ARCHITECTURE\"","print","(","\"ln_type is preln. add LN layer.\"",")","output_layer","=","layer_norm","(","output_layer",")","else",":","print","(","\"ln_type is postln or other,do nothing.\"",")","if","is_training",":","# I.e., 0.1 dropout","output_layer","=","tf",".","nn",".","dropout","(","output_layer",",","keep_prob","=","0.9",")","logits","=","tf",".","matmul","(","output_layer",",","output_weights",",","transpose_b","=","True",")","logits","=","tf",".","nn",".","bias_add","(","logits",",","output_bias",")","probabilities","=","tf",".","nn",".","softmax","(","logits",",","axis","=","-","1",")","log_probs","=","tf",".","nn",".","log_softmax","(","logits",",","axis","=","-","1",")","one_hot_labels","=","tf",".","one_hot","(","labels",",","depth","=","num_labels",",","dtype","=","tf",".","float32",")","per_example_loss","=","-","tf",".","reduce_sum","(","one_hot_labels","*","log_probs",",","axis","=","-","1",")","# todo 08-29 try temp-loss","###############bi_tempered_logistic_loss############################################################################","# print(\"##cross entropy loss is used....\"); tf.logging.info(\"##cross entropy loss is used....\")","# t1=0.9 #t1=0.90","# t2=1.05 #t2=1.05","# per_example_loss=bi_tempered_logistic_loss(log_probs,one_hot_labels,t1,t2,label_smoothing=0.1,num_iters=5) # TODO label_smoothing=0.0","# tf.logging.info(\"per_example_loss:\"+str(per_example_loss.shape))","##############bi_tempered_logistic_loss#############################################################################","loss","=","tf",".","reduce_mean","(","per_example_loss",")","return","(","loss",",","per_example_loss",",","logits",",","probabilities",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_classifier.py#L502-L560"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_classifier.py","language":"python","identifier":"layer_norm","parameters":"(input_tensor, name=None)","argument_list":"","return_statement":"return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","docstring":"Run layer normalization on the last dimension of the tensor.","docstring_summary":"Run layer normalization on the last dimension of the tensor.","docstring_tokens":["Run","layer","normalization","on","the","last","dimension","of","the","tensor","."],"function":"def layer_norm(input_tensor, name=None):\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\n return tf.contrib.layers.layer_norm(\n inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)","function_tokens":["def","layer_norm","(","input_tensor",",","name","=","None",")",":","return","tf",".","contrib",".","layers",".","layer_norm","(","inputs","=","input_tensor",",","begin_norm_axis","=","-","1",",","begin_params_axis","=","-","1",",","scope","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_classifier.py#L563-L566"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_classifier.py","language":"python","identifier":"model_fn_builder","parameters":"(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings)","argument_list":"","return_statement":"return model_fn","docstring":"Returns `model_fn` closure for TPUEstimator.","docstring_summary":"Returns `model_fn` closure for TPUEstimator.","docstring_tokens":["Returns","model_fn","closure","for","TPUEstimator","."],"function":"def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions, weights=is_real_example)\n loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn,\n [per_example_loss, label_ids, logits, is_real_example])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn","function_tokens":["def","model_fn_builder","(","bert_config",",","num_labels",",","init_checkpoint",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",",","use_one_hot_embeddings",")",":","def","model_fn","(","features",",","labels",",","mode",",","params",")",":","# pylint: disable=unused-argument","\"\"\"The `model_fn` for TPUEstimator.\"\"\"","tf",".","logging",".","info","(","\"*** Features ***\"",")","for","name","in","sorted","(","features",".","keys","(",")",")",":","tf",".","logging",".","info","(","\" name = %s, shape = %s\"","%","(","name",",","features","[","name","]",".","shape",")",")","input_ids","=","features","[","\"input_ids\"","]","input_mask","=","features","[","\"input_mask\"","]","segment_ids","=","features","[","\"segment_ids\"","]","label_ids","=","features","[","\"label_ids\"","]","is_real_example","=","None","if","\"is_real_example\"","in","features",":","is_real_example","=","tf",".","cast","(","features","[","\"is_real_example\"","]",",","dtype","=","tf",".","float32",")","else",":","is_real_example","=","tf",".","ones","(","tf",".","shape","(","label_ids",")",",","dtype","=","tf",".","float32",")","is_training","=","(","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",")","(","total_loss",",","per_example_loss",",","logits",",","probabilities",")","=","create_model","(","bert_config",",","is_training",",","input_ids",",","input_mask",",","segment_ids",",","label_ids",",","num_labels",",","use_one_hot_embeddings",")","tvars","=","tf",".","trainable_variables","(",")","initialized_variable_names","=","{","}","scaffold_fn","=","None","if","init_checkpoint",":","(","assignment_map",",","initialized_variable_names",")","=","modeling",".","get_assignment_map_from_checkpoint","(","tvars",",","init_checkpoint",")","if","use_tpu",":","def","tpu_scaffold","(",")",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","return","tf",".","train",".","Scaffold","(",")","scaffold_fn","=","tpu_scaffold","else",":","tf",".","train",".","init_from_checkpoint","(","init_checkpoint",",","assignment_map",")","tf",".","logging",".","info","(","\"**** Trainable Variables ****\"",")","for","var","in","tvars",":","init_string","=","\"\"","if","var",".","name","in","initialized_variable_names",":","init_string","=","\", *INIT_FROM_CKPT*\"","tf",".","logging",".","info","(","\" name = %s, shape = %s%s\"",",","var",".","name",",","var",".","shape",",","init_string",")","output_spec","=","None","if","mode","==","tf",".","estimator",".","ModeKeys",".","TRAIN",":","train_op","=","optimization",".","create_optimizer","(","total_loss",",","learning_rate",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","train_op","=","train_op",",","scaffold_fn","=","scaffold_fn",")","elif","mode","==","tf",".","estimator",".","ModeKeys",".","EVAL",":","def","metric_fn","(","per_example_loss",",","label_ids",",","logits",",","is_real_example",")",":","predictions","=","tf",".","argmax","(","logits",",","axis","=","-","1",",","output_type","=","tf",".","int32",")","accuracy","=","tf",".","metrics",".","accuracy","(","labels","=","label_ids",",","predictions","=","predictions",",","weights","=","is_real_example",")","loss","=","tf",".","metrics",".","mean","(","values","=","per_example_loss",",","weights","=","is_real_example",")","return","{","\"eval_accuracy\"",":","accuracy",",","\"eval_loss\"",":","loss",",","}","eval_metrics","=","(","metric_fn",",","[","per_example_loss",",","label_ids",",","logits",",","is_real_example","]",")","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","loss","=","total_loss",",","eval_metrics","=","eval_metrics",",","scaffold_fn","=","scaffold_fn",")","else",":","output_spec","=","tf",".","contrib",".","tpu",".","TPUEstimatorSpec","(","mode","=","mode",",","predictions","=","{","\"probabilities\"",":","probabilities","}",",","scaffold_fn","=","scaffold_fn",")","return","output_spec","return","model_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_classifier.py#L569-L658"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_classifier.py","language":"python","identifier":"input_fn_builder","parameters":"(features, seq_length, is_training, drop_remainder)","argument_list":"","return_statement":"return input_fn","docstring":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_summary":"Creates an `input_fn` closure to be passed to TPUEstimator.","docstring_tokens":["Creates","an","input_fn","closure","to","be","passed","to","TPUEstimator","."],"function":"def input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn","function_tokens":["def","input_fn_builder","(","features",",","seq_length",",","is_training",",","drop_remainder",")",":","all_input_ids","=","[","]","all_input_mask","=","[","]","all_segment_ids","=","[","]","all_label_ids","=","[","]","for","feature","in","features",":","all_input_ids",".","append","(","feature",".","input_ids",")","all_input_mask",".","append","(","feature",".","input_mask",")","all_segment_ids",".","append","(","feature",".","segment_ids",")","all_label_ids",".","append","(","feature",".","label_id",")","def","input_fn","(","params",")",":","\"\"\"The actual input function.\"\"\"","batch_size","=","params","[","\"batch_size\"","]","num_examples","=","len","(","features",")","# This is for demo purposes and does NOT scale to large data sets. We do","# not use Dataset.from_generator() because that uses tf.py_func which is","# not TPU compatible. The right way to load data is with TFRecordReader.","d","=","tf",".","data",".","Dataset",".","from_tensor_slices","(","{","\"input_ids\"",":","tf",".","constant","(","all_input_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"input_mask\"",":","tf",".","constant","(","all_input_mask",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"segment_ids\"",":","tf",".","constant","(","all_segment_ids",",","shape","=","[","num_examples",",","seq_length","]",",","dtype","=","tf",".","int32",")",",","\"label_ids\"",":","tf",".","constant","(","all_label_ids",",","shape","=","[","num_examples","]",",","dtype","=","tf",".","int32",")",",","}",")","if","is_training",":","d","=","d",".","repeat","(",")","d","=","d",".","shuffle","(","buffer_size","=","100",")","d","=","d",".","batch","(","batch_size","=","batch_size",",","drop_remainder","=","drop_remainder",")","return","d","return","input_fn"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_classifier.py#L663-L712"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/run_classifier.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, label_list, max_seq_length,\n tokenizer)","argument_list":"","return_statement":"return features","docstring":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_summary":"Convert a set of `InputExample`s to a list of `InputFeatures`.","docstring_tokens":["Convert","a","set","of","InputExample","s","to","a","list","of","InputFeatures","."],"function":"def convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n features.append(feature)\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",")",":","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","tf",".","logging",".","info","(","\"Writing example %d of %d\"","%","(","ex_index",",","len","(","examples",")",")",")","feature","=","convert_single_example","(","ex_index",",","example",",","label_list",",","max_seq_length",",","tokenizer",")","features",".","append","(","feature",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/run_classifier.py#L717-L730"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization.py","language":"python","identifier":"create_optimizer","parameters":"(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu)","argument_list":"","return_statement":"return train_op","docstring":"Creates an optimizer training op.","docstring_summary":"Creates an optimizer training op.","docstring_tokens":["Creates","an","optimizer","training","op","."],"function":"def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n \"\"\"Creates an optimizer training op.\"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step\/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float \/ warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m\/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = LAMBOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n if use_tpu:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op","function_tokens":["def","create_optimizer","(","loss",",","init_lr",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")",":","global_step","=","tf",".","train",".","get_or_create_global_step","(",")","learning_rate","=","tf",".","constant","(","value","=","init_lr",",","shape","=","[","]",",","dtype","=","tf",".","float32",")","# Implements linear decay of the learning rate.","learning_rate","=","tf",".","train",".","polynomial_decay","(","learning_rate",",","global_step",",","num_train_steps",",","end_learning_rate","=","0.0",",","power","=","1.0",",","cycle","=","False",")","# Implements linear warmup. I.e., if global_step < num_warmup_steps, the","# learning rate will be `global_step\/num_warmup_steps * init_lr`.","if","num_warmup_steps",":","global_steps_int","=","tf",".","cast","(","global_step",",","tf",".","int32",")","warmup_steps_int","=","tf",".","constant","(","num_warmup_steps",",","dtype","=","tf",".","int32",")","global_steps_float","=","tf",".","cast","(","global_steps_int",",","tf",".","float32",")","warmup_steps_float","=","tf",".","cast","(","warmup_steps_int",",","tf",".","float32",")","warmup_percent_done","=","global_steps_float","\/","warmup_steps_float","warmup_learning_rate","=","init_lr","*","warmup_percent_done","is_warmup","=","tf",".","cast","(","global_steps_int","<","warmup_steps_int",",","tf",".","float32",")","learning_rate","=","(","(","1.0","-","is_warmup",")","*","learning_rate","+","is_warmup","*","warmup_learning_rate",")","# It is recommended that you use this optimizer for fine tuning, since this","# is how the model was trained (note that the Adam m\/v variables are NOT","# loaded from init_checkpoint.)","optimizer","=","LAMBOptimizer","(","learning_rate","=","learning_rate",",","weight_decay_rate","=","0.01",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","[","\"LayerNorm\"",",","\"layer_norm\"",",","\"bias\"","]",")","if","use_tpu",":","optimizer","=","tf",".","contrib",".","tpu",".","CrossShardOptimizer","(","optimizer",")","tvars","=","tf",".","trainable_variables","(",")","grads","=","tf",".","gradients","(","loss",",","tvars",")","# This is how the model was pre-trained.","(","grads",",","_",")","=","tf",".","clip_by_global_norm","(","grads",",","clip_norm","=","1.0",")","train_op","=","optimizer",".","apply_gradients","(","zip","(","grads",",","tvars",")",",","global_step","=","global_step",")","# Normally the global step update is done inside of `apply_gradients`.","# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use","# a different optimizer, you should probably take this line out.","new_global_step","=","global_step","+","1","train_op","=","tf",".","group","(","train_op",",","[","global_step",".","assign","(","new_global_step",")","]",")","return","train_op"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization.py#L25-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.__init__","parameters":"(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\")","argument_list":"","return_statement":"","docstring":"Constructs a AdamWeightDecayOptimizer.","docstring_summary":"Constructs a AdamWeightDecayOptimizer.","docstring_tokens":["Constructs","a","AdamWeightDecayOptimizer","."],"function":"def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay","function_tokens":["def","__init__","(","self",",","learning_rate",",","weight_decay_rate","=","0.0",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","None",",","name","=","\"AdamWeightDecayOptimizer\"",")",":","super","(","AdamWeightDecayOptimizer",",","self",")",".","__init__","(","False",",","name",")","self",".","learning_rate","=","learning_rate","self",".","weight_decay_rate","=","weight_decay_rate","self",".","beta_1","=","beta_1","self",".","beta_2","=","beta_2","self",".","epsilon","=","epsilon","self",".","exclude_from_weight_decay","=","exclude_from_weight_decay"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization.py#L90-L106"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer.apply_gradients","parameters":"(self, grads_and_vars, global_step=None, name=None)","argument_list":"","return_statement":"return tf.group(*assignments, name=name)","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"\/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"\/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m \/ (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization\/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m\/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)","function_tokens":["def","apply_gradients","(","self",",","grads_and_vars",",","global_step","=","None",",","name","=","None",")",":","assignments","=","[","]","for","(","grad",",","param",")","in","grads_and_vars",":","if","grad","is","None","or","param","is","None",":","continue","param_name","=","self",".","_get_variable_name","(","param",".","name",")","m","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_m\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","v","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_v\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","# Standard Adam update.","next_m","=","(","tf",".","multiply","(","self",".","beta_1",",","m",")","+","tf",".","multiply","(","1.0","-","self",".","beta_1",",","grad",")",")","next_v","=","(","tf",".","multiply","(","self",".","beta_2",",","v",")","+","tf",".","multiply","(","1.0","-","self",".","beta_2",",","tf",".","square","(","grad",")",")",")","update","=","next_m","\/","(","tf",".","sqrt","(","next_v",")","+","self",".","epsilon",")","# Just adding the square of the weights to the loss function is *not*","# the correct way of using L2 regularization\/weight decay with Adam,","# since that will interact with the m and v parameters in strange ways.","#","# Instead we want ot decay the weights in a manner that doesn't interact","# with the m\/v parameters. This is equivalent to adding the square","# of the weights to the loss with plain (non-momentum) SGD.","if","self",".","_do_use_weight_decay","(","param_name",")",":","update","+=","self",".","weight_decay_rate","*","param","update_with_lr","=","self",".","learning_rate","*","update","next_param","=","param","-","update_with_lr","assignments",".","extend","(","[","param",".","assign","(","next_param",")",",","m",".","assign","(","next_m",")",",","v",".","assign","(","next_v",")","]",")","return","tf",".","group","(","*","assignments",",","name","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization.py#L108-L157"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._do_use_weight_decay","parameters":"(self, param_name)","argument_list":"","return_statement":"return True","docstring":"Whether to use L2 weight decay for `param_name`.","docstring_summary":"Whether to use L2 weight decay for `param_name`.","docstring_tokens":["Whether","to","use","L2","weight","decay","for","param_name","."],"function":"def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True","function_tokens":["def","_do_use_weight_decay","(","self",",","param_name",")",":","if","not","self",".","weight_decay_rate",":","return","False","if","self",".","exclude_from_weight_decay",":","for","r","in","self",".","exclude_from_weight_decay",":","if","re",".","search","(","r",",","param_name",")","is","not","None",":","return","False","return","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization.py#L159-L167"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization.py","language":"python","identifier":"AdamWeightDecayOptimizer._get_variable_name","parameters":"(self, param_name)","argument_list":"","return_statement":"return param_name","docstring":"Get the variable name from the tensor name.","docstring_summary":"Get the variable name from the tensor name.","docstring_tokens":["Get","the","variable","name","from","the","tensor","name","."],"function":"def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name","function_tokens":["def","_get_variable_name","(","self",",","param_name",")",":","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","param_name",")","if","m","is","not","None",":","param_name","=","m",".","group","(","1",")","return","param_name"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization.py#L169-L174"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization.py","language":"python","identifier":"LAMBOptimizer.__init__","parameters":"(self,\n learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"LAMBOptimizer\")","argument_list":"","return_statement":"","docstring":"Constructs a LAMBOptimizer.","docstring_summary":"Constructs a LAMBOptimizer.","docstring_tokens":["Constructs","a","LAMBOptimizer","."],"function":"def __init__(self,\n learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"LAMBOptimizer\"):\n \"\"\"Constructs a LAMBOptimizer.\"\"\"\n super(LAMBOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay","function_tokens":["def","__init__","(","self",",","learning_rate",",","weight_decay_rate","=","0.01",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","None",",","name","=","\"LAMBOptimizer\"",")",":","super","(","LAMBOptimizer",",","self",")",".","__init__","(","False",",","name",")","self",".","learning_rate","=","learning_rate","self",".","weight_decay_rate","=","weight_decay_rate","self",".","beta_1","=","beta_1","self",".","beta_2","=","beta_2","self",".","epsilon","=","epsilon","self",".","exclude_from_weight_decay","=","exclude_from_weight_decay"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization.py#L195-L211"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization.py","language":"python","identifier":"LAMBOptimizer.apply_gradients","parameters":"(self, grads_and_vars, global_step=None, name=None)","argument_list":"","return_statement":"return tf.group(*assignments, name=name)","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"\/lamb_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"\/lamb_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m \/ (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization\/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m\/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n ############## BELOW ARE THE SPECIFIC PARTS FOR LAMB ##############\n\n # Note: Here are two choices for scaling function \\phi(z)\n # minmax: \\phi(z) = min(max(z, \\gamma_l), \\gamma_u)\n # identity: \\phi(z) = z\n # The authors does not mention what is \\gamma_l and \\gamma_u\n # UPDATE: after asking authors, they provide me the code below.\n # ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where(\n # math_ops.greater(g_norm, 0), (w_norm \/ g_norm), 1.0), 1.0)\n\n r1 = tf.sqrt(tf.reduce_sum(tf.square(param)))\n r2 = tf.sqrt(tf.reduce_sum(tf.square(update)))\n\n r = tf.where(tf.greater(r1, 0.0),\n tf.where(tf.greater(r2, 0.0),\n r1 \/ r2,\n 1.0),\n 1.0)\n\n eta = self.learning_rate * r\n\n update_with_lr = eta * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)","function_tokens":["def","apply_gradients","(","self",",","grads_and_vars",",","global_step","=","None",",","name","=","None",")",":","assignments","=","[","]","for","(","grad",",","param",")","in","grads_and_vars",":","if","grad","is","None","or","param","is","None",":","continue","param_name","=","self",".","_get_variable_name","(","param",".","name",")","m","=","tf",".","get_variable","(","name","=","param_name","+","\"\/lamb_m\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","v","=","tf",".","get_variable","(","name","=","param_name","+","\"\/lamb_v\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","# Standard Adam update.","next_m","=","(","tf",".","multiply","(","self",".","beta_1",",","m",")","+","tf",".","multiply","(","1.0","-","self",".","beta_1",",","grad",")",")","next_v","=","(","tf",".","multiply","(","self",".","beta_2",",","v",")","+","tf",".","multiply","(","1.0","-","self",".","beta_2",",","tf",".","square","(","grad",")",")",")","update","=","next_m","\/","(","tf",".","sqrt","(","next_v",")","+","self",".","epsilon",")","# Just adding the square of the weights to the loss function is *not*","# the correct way of using L2 regularization\/weight decay with Adam,","# since that will interact with the m and v parameters in strange ways.","#","# Instead we want ot decay the weights in a manner that doesn't interact","# with the m\/v parameters. This is equivalent to adding the square","# of the weights to the loss with plain (non-momentum) SGD.","if","self",".","_do_use_weight_decay","(","param_name",")",":","update","+=","self",".","weight_decay_rate","*","param","############## BELOW ARE THE SPECIFIC PARTS FOR LAMB ##############","# Note: Here are two choices for scaling function \\phi(z)","# minmax: \\phi(z) = min(max(z, \\gamma_l), \\gamma_u)","# identity: \\phi(z) = z","# The authors does not mention what is \\gamma_l and \\gamma_u","# UPDATE: after asking authors, they provide me the code below.","# ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where(","# math_ops.greater(g_norm, 0), (w_norm \/ g_norm), 1.0), 1.0)","r1","=","tf",".","sqrt","(","tf",".","reduce_sum","(","tf",".","square","(","param",")",")",")","r2","=","tf",".","sqrt","(","tf",".","reduce_sum","(","tf",".","square","(","update",")",")",")","r","=","tf",".","where","(","tf",".","greater","(","r1",",","0.0",")",",","tf",".","where","(","tf",".","greater","(","r2",",","0.0",")",",","r1","\/","r2",",","1.0",")",",","1.0",")","eta","=","self",".","learning_rate","*","r","update_with_lr","=","eta","*","update","next_param","=","param","-","update_with_lr","assignments",".","extend","(","[","param",".","assign","(","next_param",")",",","m",".","assign","(","next_m",")",",","v",".","assign","(","next_v",")","]",")","return","tf",".","group","(","*","assignments",",","name","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization.py#L213-L283"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization.py","language":"python","identifier":"LAMBOptimizer._do_use_weight_decay","parameters":"(self, param_name)","argument_list":"","return_statement":"return True","docstring":"Whether to use L2 weight decay for `param_name`.","docstring_summary":"Whether to use L2 weight decay for `param_name`.","docstring_tokens":["Whether","to","use","L2","weight","decay","for","param_name","."],"function":"def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True","function_tokens":["def","_do_use_weight_decay","(","self",",","param_name",")",":","if","not","self",".","weight_decay_rate",":","return","False","if","self",".","exclude_from_weight_decay",":","for","r","in","self",".","exclude_from_weight_decay",":","if","re",".","search","(","r",",","param_name",")","is","not","None",":","return","False","return","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization.py#L285-L293"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization.py","language":"python","identifier":"LAMBOptimizer._get_variable_name","parameters":"(self, param_name)","argument_list":"","return_statement":"return param_name","docstring":"Get the variable name from the tensor name.","docstring_summary":"Get the variable name from the tensor name.","docstring_tokens":["Get","the","variable","name","from","the","tensor","name","."],"function":"def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name","function_tokens":["def","_get_variable_name","(","self",",","param_name",")",":","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","param_name",")","if","m","is","not","None",":","param_name","=","m",".","group","(","1",")","return","param_name"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization.py#L295-L300"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization_finetuning.py","language":"python","identifier":"create_optimizer","parameters":"(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu)","argument_list":"","return_statement":"return train_op","docstring":"Creates an optimizer training op.","docstring_summary":"Creates an optimizer training op.","docstring_tokens":["Creates","an","optimizer","training","op","."],"function":"def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n \"\"\"Creates an optimizer training op.\"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step\/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float \/ warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m\/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999, # 0.98 ONLY USED FOR PRETRAIN. MUST CHANGE AT FINE-TUNING 0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n\n if use_tpu:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op","function_tokens":["def","create_optimizer","(","loss",",","init_lr",",","num_train_steps",",","num_warmup_steps",",","use_tpu",")",":","global_step","=","tf",".","train",".","get_or_create_global_step","(",")","learning_rate","=","tf",".","constant","(","value","=","init_lr",",","shape","=","[","]",",","dtype","=","tf",".","float32",")","# Implements linear decay of the learning rate.","learning_rate","=","tf",".","train",".","polynomial_decay","(","learning_rate",",","global_step",",","num_train_steps",",","end_learning_rate","=","0.0",",","power","=","1.0",",","cycle","=","False",")","# Implements linear warmup. I.e., if global_step < num_warmup_steps, the","# learning rate will be `global_step\/num_warmup_steps * init_lr`.","if","num_warmup_steps",":","global_steps_int","=","tf",".","cast","(","global_step",",","tf",".","int32",")","warmup_steps_int","=","tf",".","constant","(","num_warmup_steps",",","dtype","=","tf",".","int32",")","global_steps_float","=","tf",".","cast","(","global_steps_int",",","tf",".","float32",")","warmup_steps_float","=","tf",".","cast","(","warmup_steps_int",",","tf",".","float32",")","warmup_percent_done","=","global_steps_float","\/","warmup_steps_float","warmup_learning_rate","=","init_lr","*","warmup_percent_done","is_warmup","=","tf",".","cast","(","global_steps_int","<","warmup_steps_int",",","tf",".","float32",")","learning_rate","=","(","(","1.0","-","is_warmup",")","*","learning_rate","+","is_warmup","*","warmup_learning_rate",")","# It is recommended that you use this optimizer for fine tuning, since this","# is how the model was trained (note that the Adam m\/v variables are NOT","# loaded from init_checkpoint.)","optimizer","=","AdamWeightDecayOptimizer","(","learning_rate","=","learning_rate",",","weight_decay_rate","=","0.01",",","beta_1","=","0.9",",","beta_2","=","0.999",",","# 0.98 ONLY USED FOR PRETRAIN. MUST CHANGE AT FINE-TUNING 0.999,","epsilon","=","1e-6",",","exclude_from_weight_decay","=","[","\"LayerNorm\"",",","\"layer_norm\"",",","\"bias\"","]",")","if","use_tpu",":","optimizer","=","tf",".","contrib",".","tpu",".","CrossShardOptimizer","(","optimizer",")","tvars","=","tf",".","trainable_variables","(",")","grads","=","tf",".","gradients","(","loss",",","tvars",")","# This is how the model was pre-trained.","(","grads",",","_",")","=","tf",".","clip_by_global_norm","(","grads",",","clip_norm","=","1.0",")","train_op","=","optimizer",".","apply_gradients","(","zip","(","grads",",","tvars",")",",","global_step","=","global_step",")","# Normally the global step update is done inside of `apply_gradients`.","# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use","# a different optimizer, you should probably take this line out.","new_global_step","=","global_step","+","1","train_op","=","tf",".","group","(","train_op",",","[","global_step",".","assign","(","new_global_step",")","]",")","return","train_op"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization_finetuning.py#L25-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization_finetuning.py","language":"python","identifier":"AdamWeightDecayOptimizer.__init__","parameters":"(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\")","argument_list":"","return_statement":"","docstring":"Constructs a AdamWeightDecayOptimizer.","docstring_summary":"Constructs a AdamWeightDecayOptimizer.","docstring_tokens":["Constructs","a","AdamWeightDecayOptimizer","."],"function":"def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay","function_tokens":["def","__init__","(","self",",","learning_rate",",","weight_decay_rate","=","0.0",",","beta_1","=","0.9",",","beta_2","=","0.999",",","epsilon","=","1e-6",",","exclude_from_weight_decay","=","None",",","name","=","\"AdamWeightDecayOptimizer\"",")",":","super","(","AdamWeightDecayOptimizer",",","self",")",".","__init__","(","False",",","name",")","self",".","learning_rate","=","learning_rate","self",".","weight_decay_rate","=","weight_decay_rate","self",".","beta_1","=","beta_1","self",".","beta_2","=","beta_2","self",".","epsilon","=","epsilon","self",".","exclude_from_weight_decay","=","exclude_from_weight_decay"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization_finetuning.py#L90-L106"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization_finetuning.py","language":"python","identifier":"AdamWeightDecayOptimizer.apply_gradients","parameters":"(self, grads_and_vars, global_step=None, name=None)","argument_list":"","return_statement":"return tf.group(*assignments, name=name)","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"\/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"\/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m \/ (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization\/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m\/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)","function_tokens":["def","apply_gradients","(","self",",","grads_and_vars",",","global_step","=","None",",","name","=","None",")",":","assignments","=","[","]","for","(","grad",",","param",")","in","grads_and_vars",":","if","grad","is","None","or","param","is","None",":","continue","param_name","=","self",".","_get_variable_name","(","param",".","name",")","m","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_m\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","v","=","tf",".","get_variable","(","name","=","param_name","+","\"\/adam_v\"",",","shape","=","param",".","shape",".","as_list","(",")",",","dtype","=","tf",".","float32",",","trainable","=","False",",","initializer","=","tf",".","zeros_initializer","(",")",")","# Standard Adam update.","next_m","=","(","tf",".","multiply","(","self",".","beta_1",",","m",")","+","tf",".","multiply","(","1.0","-","self",".","beta_1",",","grad",")",")","next_v","=","(","tf",".","multiply","(","self",".","beta_2",",","v",")","+","tf",".","multiply","(","1.0","-","self",".","beta_2",",","tf",".","square","(","grad",")",")",")","update","=","next_m","\/","(","tf",".","sqrt","(","next_v",")","+","self",".","epsilon",")","# Just adding the square of the weights to the loss function is *not*","# the correct way of using L2 regularization\/weight decay with Adam,","# since that will interact with the m and v parameters in strange ways.","#","# Instead we want ot decay the weights in a manner that doesn't interact","# with the m\/v parameters. This is equivalent to adding the square","# of the weights to the loss with plain (non-momentum) SGD.","if","self",".","_do_use_weight_decay","(","param_name",")",":","update","+=","self",".","weight_decay_rate","*","param","update_with_lr","=","self",".","learning_rate","*","update","next_param","=","param","-","update_with_lr","assignments",".","extend","(","[","param",".","assign","(","next_param",")",",","m",".","assign","(","next_m",")",",","v",".","assign","(","next_v",")","]",")","return","tf",".","group","(","*","assignments",",","name","=","name",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization_finetuning.py#L108-L157"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization_finetuning.py","language":"python","identifier":"AdamWeightDecayOptimizer._do_use_weight_decay","parameters":"(self, param_name)","argument_list":"","return_statement":"return True","docstring":"Whether to use L2 weight decay for `param_name`.","docstring_summary":"Whether to use L2 weight decay for `param_name`.","docstring_tokens":["Whether","to","use","L2","weight","decay","for","param_name","."],"function":"def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True","function_tokens":["def","_do_use_weight_decay","(","self",",","param_name",")",":","if","not","self",".","weight_decay_rate",":","return","False","if","self",".","exclude_from_weight_decay",":","for","r","in","self",".","exclude_from_weight_decay",":","if","re",".","search","(","r",",","param_name",")","is","not","None",":","return","False","return","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization_finetuning.py#L159-L167"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/optimization_finetuning.py","language":"python","identifier":"AdamWeightDecayOptimizer._get_variable_name","parameters":"(self, param_name)","argument_list":"","return_statement":"return param_name","docstring":"Get the variable name from the tensor name.","docstring_summary":"Get the variable name from the tensor name.","docstring_tokens":["Get","the","variable","name","from","the","tensor","name","."],"function":"def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name","function_tokens":["def","_get_variable_name","(","self",",","param_name",")",":","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","param_name",")","if","m","is","not","None",":","param_name","=","m",".","group","(","1",")","return","param_name"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/optimization_finetuning.py#L169-L174"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/create_pretraining_data.py","language":"python","identifier":"write_instance_to_example_files","parameters":"(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files)","argument_list":"","return_statement":"","docstring":"Create TF example files from `TrainingInstance`s.","docstring_summary":"Create TF example files from `TrainingInstance`s.","docstring_tokens":["Create","TF","example","files","from","TrainingInstance","s","."],"function":"def write_instance_to_example_files(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files):\n \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n writers = []\n for output_file in output_files:\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n writer_index = 0\n\n total_written = 0\n for (inst_index, instance) in enumerate(instances):\n input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)\n input_mask = [1] * len(input_ids)\n segment_ids = list(instance.segment_ids)\n assert len(input_ids) <= max_seq_length\n\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n while len(masked_lm_positions) < max_predictions_per_seq:\n masked_lm_positions.append(0)\n masked_lm_ids.append(0)\n masked_lm_weights.append(0.0)\n\n next_sentence_label = 1 if instance.is_random_next else 0\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n features[\"masked_lm_positions\"] = create_int_feature(masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n features[\"next_sentence_labels\"] = create_int_feature([next_sentence_label])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n\n writers[writer_index].write(tf_example.SerializeToString())\n writer_index = (writer_index + 1) % len(writers)\n\n total_written += 1\n\n if inst_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in instance.tokens]))\n\n for feature_name in features.keys():\n feature = features[feature_name]\n values = []\n if feature.int64_list.value:\n values = feature.int64_list.value\n elif feature.float_list.value:\n values = feature.float_list.value\n tf.logging.info(\n \"%s: %s\" % (feature_name, \" \".join([str(x) for x in values])))\n\n for writer in writers:\n writer.close()\n\n tf.logging.info(\"Wrote %d total instances\", total_written)","function_tokens":["def","write_instance_to_example_files","(","instances",",","tokenizer",",","max_seq_length",",","max_predictions_per_seq",",","output_files",")",":","writers","=","[","]","for","output_file","in","output_files",":","writers",".","append","(","tf",".","python_io",".","TFRecordWriter","(","output_file",")",")","writer_index","=","0","total_written","=","0","for","(","inst_index",",","instance",")","in","enumerate","(","instances",")",":","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","tokens",")","input_mask","=","[","1","]","*","len","(","input_ids",")","segment_ids","=","list","(","instance",".","segment_ids",")","assert","len","(","input_ids",")","<=","max_seq_length","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","masked_lm_positions","=","list","(","instance",".","masked_lm_positions",")","masked_lm_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","masked_lm_labels",")","masked_lm_weights","=","[","1.0","]","*","len","(","masked_lm_ids",")","while","len","(","masked_lm_positions",")","<","max_predictions_per_seq",":","masked_lm_positions",".","append","(","0",")","masked_lm_ids",".","append","(","0",")","masked_lm_weights",".","append","(","0.0",")","next_sentence_label","=","1","if","instance",".","is_random_next","else","0","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","segment_ids",")","features","[","\"masked_lm_positions\"","]","=","create_int_feature","(","masked_lm_positions",")","features","[","\"masked_lm_ids\"","]","=","create_int_feature","(","masked_lm_ids",")","features","[","\"masked_lm_weights\"","]","=","create_float_feature","(","masked_lm_weights",")","features","[","\"next_sentence_labels\"","]","=","create_int_feature","(","[","next_sentence_label","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writers","[","writer_index","]",".","write","(","tf_example",".","SerializeToString","(",")",")","writer_index","=","(","writer_index","+","1",")","%","len","(","writers",")","total_written","+=","1","if","inst_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","instance",".","tokens","]",")",")","for","feature_name","in","features",".","keys","(",")",":","feature","=","features","[","feature_name","]","values","=","[","]","if","feature",".","int64_list",".","value",":","values","=","feature",".","int64_list",".","value","elif","feature",".","float_list",".","value",":","values","=","feature",".","float_list",".","value","tf",".","logging",".","info","(","\"%s: %s\"","%","(","feature_name",",","\" \"",".","join","(","[","str","(","x",")","for","x","in","values","]",")",")",")","for","writer","in","writers",":","writer",".","close","(",")","tf",".","logging",".","info","(","\"Wrote %d total instances\"",",","total_written",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/create_pretraining_data.py#L99-L169"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/create_pretraining_data.py","language":"python","identifier":"create_training_instances","parameters":"(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng)","argument_list":"","return_statement":"return instances","docstring":"Create `TrainingInstance`s from raw text.","docstring_summary":"Create `TrainingInstance`s from raw text.","docstring_tokens":["Create","TrainingInstance","s","from","raw","text","."],"function":"def create_training_instances(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng):\n \"\"\"Create `TrainingInstance`s from raw text.\"\"\"\n all_documents = [[]]\n\n # Input file format:\n # (1) One sentence per line. These should ideally be actual sentences, not\n # entire paragraphs or arbitrary spans of text. (Because we use the\n # sentence boundaries for the \"next sentence prediction\" task).\n # (2) Blank lines between documents. Document boundaries are needed so\n # that the \"next sentence prediction\" task doesn't span between documents.\n for input_file in input_files:\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n strings=reader.readline()\n strings=strings.replace(\" \",\" \").replace(\" \",\" \") # \u5982\u679c\u6709\u4e24\u4e2a\u6216\u4e09\u4e2a\u7a7a\u683c\uff0c\u66ff\u6362\u4e3a\u4e00\u4e2a\u7a7a\u683c\n line = tokenization.convert_to_unicode(strings)\n if not line:\n break\n line = line.strip()\n\n # Empty lines are used as document delimiters\n if not line:\n all_documents.append([])\n tokens = tokenizer.tokenize(line)\n if tokens:\n all_documents[-1].append(tokens)\n\n # Remove empty documents\n all_documents = [x for x in all_documents if x]\n rng.shuffle(all_documents)\n\n vocab_words = list(tokenizer.vocab.keys())\n instances = []\n for _ in range(dupe_factor):\n for document_index in range(len(all_documents)):\n instances.extend(\n create_instances_from_document_albert( # change to albert style for sentence order prediction(SOP), 2019-08-28, brightmart\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng))\n\n rng.shuffle(instances)\n return instances","function_tokens":["def","create_training_instances","(","input_files",",","tokenizer",",","max_seq_length",",","dupe_factor",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","rng",")",":","all_documents","=","[","[","]","]","# Input file format:","# (1) One sentence per line. These should ideally be actual sentences, not","# entire paragraphs or arbitrary spans of text. (Because we use the","# sentence boundaries for the \"next sentence prediction\" task).","# (2) Blank lines between documents. Document boundaries are needed so","# that the \"next sentence prediction\" task doesn't span between documents.","for","input_file","in","input_files",":","with","tf",".","gfile",".","GFile","(","input_file",",","\"r\"",")","as","reader",":","while","True",":","strings","=","reader",".","readline","(",")","strings","=","strings",".","replace","(","\" \"",",","\" \"",")",".","replace","(","\" \"",",","\" \"",")","# \u5982\u679c\u6709\u4e24\u4e2a\u6216\u4e09\u4e2a\u7a7a\u683c\uff0c\u66ff\u6362\u4e3a\u4e00\u4e2a\u7a7a\u683c","line","=","tokenization",".","convert_to_unicode","(","strings",")","if","not","line",":","break","line","=","line",".","strip","(",")","# Empty lines are used as document delimiters","if","not","line",":","all_documents",".","append","(","[","]",")","tokens","=","tokenizer",".","tokenize","(","line",")","if","tokens",":","all_documents","[","-","1","]",".","append","(","tokens",")","# Remove empty documents","all_documents","=","[","x","for","x","in","all_documents","if","x","]","rng",".","shuffle","(","all_documents",")","vocab_words","=","list","(","tokenizer",".","vocab",".","keys","(",")",")","instances","=","[","]","for","_","in","range","(","dupe_factor",")",":","for","document_index","in","range","(","len","(","all_documents",")",")",":","instances",".","extend","(","create_instances_from_document_albert","(","# change to albert style for sentence order prediction(SOP), 2019-08-28, brightmart","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",")","rng",".","shuffle","(","instances",")","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/create_pretraining_data.py#L182-L225"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/create_pretraining_data.py","language":"python","identifier":"get_new_segment","parameters":"(segment)","argument_list":"","return_statement":"return new_segment","docstring":"\u8f93\u5165\u4e00\u53e5\u8bdd\uff0c\u8fd4\u56de\u4e00\u53e5\u7ecf\u8fc7\u5904\u7406\u7684\u8bdd: \u4e3a\u4e86\u652f\u6301\u4e2d\u6587\u5168\u79f0mask\uff0c\u5c06\u88ab\u5206\u5f00\u7684\u8bcd\uff0c\u5c06\u4e0a\u7279\u6b8a\u6807\u8bb0(\"#\")\uff0c\u4f7f\u5f97\u540e\u7eed\u5904\u7406\u6a21\u5757\uff0c\u80fd\u591f\u77e5\u9053\u54ea\u4e9b\u5b57\u662f\u5c5e\u4e8e\u540c\u4e00\u4e2a\u8bcd\u7684\u3002\n :param segment: \u4e00\u53e5\u8bdd. e.g. ['\u60ac', '\u7078', '\u6280', '\u672f', '\u57f9', '\u8bad', '\u4e13', '\u5bb6', '\u6559', '\u4f60', '\u827e', '\u7078', '\u964d', '\u8840', '\u7cd6', '\uff0c', '\u4e3a', '\u7238', '\u5988', '\u6536', '\u597d', '\u4e86', '\uff01']\n :return: \u4e00\u53e5\u5904\u7406\u8fc7\u7684\u8bdd e.g. ['\u60ac', '##\u7078', '\u6280', '\u672f', '\u57f9', '\u8bad', '\u4e13', '##\u5bb6', '\u6559', '\u4f60', '\u827e', '##\u7078', '\u964d', '##\u8840', '##\u7cd6', '\uff0c', '\u4e3a', '\u7238', '##\u5988', '\u6536', '##\u597d', '\u4e86', '\uff01']","docstring_summary":"\u8f93\u5165\u4e00\u53e5\u8bdd\uff0c\u8fd4\u56de\u4e00\u53e5\u7ecf\u8fc7\u5904\u7406\u7684\u8bdd: \u4e3a\u4e86\u652f\u6301\u4e2d\u6587\u5168\u79f0mask\uff0c\u5c06\u88ab\u5206\u5f00\u7684\u8bcd\uff0c\u5c06\u4e0a\u7279\u6b8a\u6807\u8bb0(\"#\")\uff0c\u4f7f\u5f97\u540e\u7eed\u5904\u7406\u6a21\u5757\uff0c\u80fd\u591f\u77e5\u9053\u54ea\u4e9b\u5b57\u662f\u5c5e\u4e8e\u540c\u4e00\u4e2a\u8bcd\u7684\u3002\n :param segment: \u4e00\u53e5\u8bdd. e.g. ['\u60ac', '\u7078', '\u6280', '\u672f', '\u57f9', '\u8bad', '\u4e13', '\u5bb6', '\u6559', '\u4f60', '\u827e', '\u7078', '\u964d', '\u8840', '\u7cd6', '\uff0c', '\u4e3a', '\u7238', '\u5988', '\u6536', '\u597d', '\u4e86', '\uff01']\n :return: \u4e00\u53e5\u5904\u7406\u8fc7\u7684\u8bdd e.g. ['\u60ac', '##\u7078', '\u6280', '\u672f', '\u57f9', '\u8bad', '\u4e13', '##\u5bb6', '\u6559', '\u4f60', '\u827e', '##\u7078', '\u964d', '##\u8840', '##\u7cd6', '\uff0c', '\u4e3a', '\u7238', '##\u5988', '\u6536', '##\u597d', '\u4e86', '\uff01']","docstring_tokens":["\u8f93\u5165\u4e00\u53e5\u8bdd\uff0c\u8fd4\u56de\u4e00\u53e5\u7ecf\u8fc7\u5904\u7406\u7684\u8bdd",":","\u4e3a\u4e86\u652f\u6301\u4e2d\u6587\u5168\u79f0mask\uff0c\u5c06\u88ab\u5206\u5f00\u7684\u8bcd\uff0c\u5c06\u4e0a\u7279\u6b8a\u6807\u8bb0","(","#",")","\uff0c\u4f7f\u5f97\u540e\u7eed\u5904\u7406\u6a21\u5757\uff0c\u80fd\u591f\u77e5\u9053\u54ea\u4e9b\u5b57\u662f\u5c5e\u4e8e\u540c\u4e00\u4e2a\u8bcd\u7684\u3002",":","param","segment",":","\u4e00\u53e5\u8bdd",".","e",".","g",".","[","\u60ac","\u7078","\u6280","\u672f","\u57f9","\u8bad","\u4e13","\u5bb6","\u6559","\u4f60","\u827e","\u7078","\u964d","\u8840","\u7cd6","\uff0c","\u4e3a","\u7238","\u5988","\u6536","\u597d","\u4e86","\uff01","]",":","return",":","\u4e00\u53e5\u5904\u7406\u8fc7\u7684\u8bdd","e",".","g",".","[","\u60ac","##\u7078","\u6280","\u672f","\u57f9","\u8bad","\u4e13","##\u5bb6","\u6559","\u4f60","\u827e","##\u7078","\u964d","##\u8840","##\u7cd6","\uff0c","\u4e3a","\u7238","##\u5988","\u6536","##\u597d","\u4e86","\uff01","]"],"function":"def get_new_segment(segment): # \u65b0\u589e\u7684\u65b9\u6cd5 ####\n \"\"\"\n \u8f93\u5165\u4e00\u53e5\u8bdd\uff0c\u8fd4\u56de\u4e00\u53e5\u7ecf\u8fc7\u5904\u7406\u7684\u8bdd: \u4e3a\u4e86\u652f\u6301\u4e2d\u6587\u5168\u79f0mask\uff0c\u5c06\u88ab\u5206\u5f00\u7684\u8bcd\uff0c\u5c06\u4e0a\u7279\u6b8a\u6807\u8bb0(\"#\")\uff0c\u4f7f\u5f97\u540e\u7eed\u5904\u7406\u6a21\u5757\uff0c\u80fd\u591f\u77e5\u9053\u54ea\u4e9b\u5b57\u662f\u5c5e\u4e8e\u540c\u4e00\u4e2a\u8bcd\u7684\u3002\n :param segment: \u4e00\u53e5\u8bdd. e.g. ['\u60ac', '\u7078', '\u6280', '\u672f', '\u57f9', '\u8bad', '\u4e13', '\u5bb6', '\u6559', '\u4f60', '\u827e', '\u7078', '\u964d', '\u8840', '\u7cd6', '\uff0c', '\u4e3a', '\u7238', '\u5988', '\u6536', '\u597d', '\u4e86', '\uff01']\n :return: \u4e00\u53e5\u5904\u7406\u8fc7\u7684\u8bdd e.g. ['\u60ac', '##\u7078', '\u6280', '\u672f', '\u57f9', '\u8bad', '\u4e13', '##\u5bb6', '\u6559', '\u4f60', '\u827e', '##\u7078', '\u964d', '##\u8840', '##\u7cd6', '\uff0c', '\u4e3a', '\u7238', '##\u5988', '\u6536', '##\u597d', '\u4e86', '\uff01']\n \"\"\"\n seq_cws = jieba.lcut(\"\".join(segment)) # \u5206\u8bcd\n seq_cws_dict = {x: 1 for x in seq_cws} # \u5206\u8bcd\u540e\u7684\u8bcd\u52a0\u5165\u5230\u8bcd\u5178dict\n new_segment = []\n i = 0\n while i < len(segment): # \u4ece\u53e5\u5b50\u7684\u7b2c\u4e00\u4e2a\u5b57\u5f00\u59cb\u5904\u7406\uff0c\u77e5\u9053\u5904\u7406\u5b8c\u6574\u4e2a\u53e5\u5b50\n if len(re.findall('[\\u4E00-\\u9FA5]', segment[i])) == 0: # \u5982\u679c\u627e\u4e0d\u5230\u4e2d\u6587\u7684\uff0c\u539f\u6587\u52a0\u8fdb\u53bb\u5373\u4e0d\u7528\u7279\u6b8a\u5904\u7406\u3002\n new_segment.append(segment[i])\n i += 1\n continue\n\n has_add = False\n for length in range(3, 0, -1):\n if i + length > len(segment):\n continue\n if ''.join(segment[i:i + length]) in seq_cws_dict:\n new_segment.append(segment[i])\n for l in range(1, length):\n new_segment.append('##' + segment[i + l])\n i += length\n has_add = True\n break\n if not has_add:\n new_segment.append(segment[i])\n i += 1\n # print(\"get_new_segment.wwm.get_new_segment:\",new_segment)\n return new_segment","function_tokens":["def","get_new_segment","(","segment",")",":","# \u65b0\u589e\u7684\u65b9\u6cd5 ####","seq_cws","=","jieba",".","lcut","(","\"\"",".","join","(","segment",")",")","# \u5206\u8bcd","seq_cws_dict","=","{","x",":","1","for","x","in","seq_cws","}","# \u5206\u8bcd\u540e\u7684\u8bcd\u52a0\u5165\u5230\u8bcd\u5178dict","new_segment","=","[","]","i","=","0","while","i","<","len","(","segment",")",":","# \u4ece\u53e5\u5b50\u7684\u7b2c\u4e00\u4e2a\u5b57\u5f00\u59cb\u5904\u7406\uff0c\u77e5\u9053\u5904\u7406\u5b8c\u6574\u4e2a\u53e5\u5b50","if","len","(","re",".","findall","(","'[\\u4E00-\\u9FA5]'",",","segment","[","i","]",")",")","==","0",":","# \u5982\u679c\u627e\u4e0d\u5230\u4e2d\u6587\u7684\uff0c\u539f\u6587\u52a0\u8fdb\u53bb\u5373\u4e0d\u7528\u7279\u6b8a\u5904\u7406\u3002","new_segment",".","append","(","segment","[","i","]",")","i","+=","1","continue","has_add","=","False","for","length","in","range","(","3",",","0",",","-","1",")",":","if","i","+","length",">","len","(","segment",")",":","continue","if","''",".","join","(","segment","[","i",":","i","+","length","]",")","in","seq_cws_dict",":","new_segment",".","append","(","segment","[","i","]",")","for","l","in","range","(","1",",","length",")",":","new_segment",".","append","(","'##'","+","segment","[","i","+","l","]",")","i","+=","length","has_add","=","True","break","if","not","has_add",":","new_segment",".","append","(","segment","[","i","]",")","i","+=","1","# print(\"get_new_segment.wwm.get_new_segment:\",new_segment)","return","new_segment"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/create_pretraining_data.py#L227-L258"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/create_pretraining_data.py","language":"python","identifier":"create_instances_from_document_albert","parameters":"(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return instances","docstring":"Creates `TrainingInstance`s for a single document.\n This method is changed to create sentence-order prediction (SOP) followed by idea from paper of ALBERT, 2019-08-28, brightmart","docstring_summary":"Creates `TrainingInstance`s for a single document.\n This method is changed to create sentence-order prediction (SOP) followed by idea from paper of ALBERT, 2019-08-28, brightmart","docstring_tokens":["Creates","TrainingInstance","s","for","a","single","document",".","This","method","is","changed","to","create","sentence","-","order","prediction","(","SOP",")","followed","by","idea","from","paper","of","ALBERT","2019","-","08","-","28","brightmart"],"function":"def create_instances_from_document_albert(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates `TrainingInstance`s for a single document.\n This method is changed to create sentence-order prediction (SOP) followed by idea from paper of ALBERT, 2019-08-28, brightmart\n \"\"\"\n document = all_documents[document_index] # \u5f97\u5230\u4e00\u4e2a\u6587\u6863\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if rng.random() < short_seq_prob: # \u6709\u4e00\u5b9a\u7684\u6bd4\u4f8b\uff0c\u598210%\u7684\u6982\u7387\uff0c\u6211\u4eec\u4f7f\u7528\u6bd4\u8f83\u77ed\u7684\u5e8f\u5217\u957f\u5ea6\uff0c\u4ee5\u7f13\u89e3\u9884\u8bad\u7ec3\u7684\u957f\u5e8f\u5217\u548c\u8c03\u4f18\u9636\u6bb5\uff08\u53ef\u80fd\u7684\uff09\u77ed\u5e8f\u5217\u7684\u4e0d\u4e00\u81f4\u60c5\u51b5\n target_seq_length = rng.randint(2, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n # \u8bbe\u6cd5\u4f7f\u7528\u5b9e\u9645\u7684\u53e5\u5b50\uff0c\u800c\u4e0d\u662f\u4efb\u610f\u7684\u622a\u65ad\u53e5\u5b50\uff0c\u4ece\u800c\u66f4\u597d\u7684\u6784\u9020\u53e5\u5b50\u8fde\u8d2f\u6027\u9884\u6d4b\u7684\u4efb\u52a1\n instances = []\n current_chunk = [] # \u5f53\u524d\u5904\u7406\u7684\u6587\u672c\u6bb5\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\n current_length = 0\n i = 0\n # print(\"###document:\",document) # \u4e00\u4e2adocument\u53ef\u4ee5\u662f\u4e00\u6574\u7bc7\u6587\u7ae0\u3001\u65b0\u95fb\u3001\u8bcd\u6761\u7b49. document:[['\u662f', '\u7237', '\u4eec', '\uff0c', '\u5c31', '\u5f97', '\u7ed9', '\u5ab3', '\u5987', '\u5e78', '\u798f'], ['\u5173', '\u6ce8', '\u3010', '\u6668', '\u66e6', '\u6559', '\u80b2', '\u3011', '\uff0c', '\u83b7', '\u53d6', '\u80b2', '\u513f', '\u7684', '\u667a', '\u6167', '\uff0c', '\u4e0e', '\u5b69', '\u5b50', '\u4e00', '\u540c', '\u6210', '\u957f', '\uff01'], ['\u65b9', '\u6cd5', ':', '\u6253', '\u5f00', '\u5fae', '\u4fe1', '\u2192', '\u6dfb', '\u52a0', '\u670b', '\u53cb', '\u2192', '\u641c', '\u53f7', '\u2192', '##he', '##bc', '##x', '##jy', '##\u2192', '\u5173', '\u6ce8', '!', '\u6211', '\u662f', '\u4e00', '\u4e2a', '\u7237', '\u4eec', '\uff0c', '\u5b5d', '\u987a', '\u662f', '\u505a', '\u4eba', '\u7684', '\u7b2c', '\u4e00', '\u51c6', '\u5219', '\u3002'], ['\u752d', '\u7ba1', '\u5c0f', '\u65f6', '\u5019', '\u600e', '\u4e48', '\u8ddf', '\u5bb6', '\u957f', '\u72af', '\u6df7', '\u86cb', '\uff0c', '\u957f', '\u5927', '\u4e86', '\uff0c', '\u5c31', '\u5e95', '\u62a5', '\u7b54', '\u7236', '\u6bcd', '\uff0c', '\u4ee5', '\u540e', '\u6211', '\u5ab3', '\u5987', '\u4e5f', '\u5fc5', '\u987b', '\u5b5d', '\u987a', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u4e2a', '\u7237', '\u4eec', '\uff0c', '\u53ef', '\u4ee5', '\u82b1', '\u5fc3', '\uff0c', '\u53ef', '\u4ee5', '\u597d', '\u73a9', '\u3002'], ['\u4f46', '\u6211', '\u4e00', '\u5b9a', '\u4f1a', '\u627e', '\u4e00', '\u4e2a', '\u7ba1', '\u7684', '\u4f4f', '\u6211', '\u7684', '\u5973', '\u4eba', '\uff0c', '\u548c', '\u6211', '\u4e00', '\u8d77', '\u751f', '\u6d3b', '\u3002'], ['28', '\u5c81', '\u4ee5', '\u524d', '\u5728', '\u600e', '\u4e48', '\u73a9', '\u90fd', '\u884c', '\uff0c', '\u4f46', '\u6211', '\u6700', '\u540e', '\u4e00', '\u5b9a', '\u4f1a', '\u627e', '\u4e00', '\u4e2a', '\u52e4', '\u4fed', '\u6301', '\u5bb6', '\u7684', '\u5973', '\u4eba', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u4e0d', '\u4f1a', '\u8ba9', '\u81ea', '\u5df1', '\u7684', '\u5973', '\u4eba', '\u53d7', '\u4e00', '\u70b9', '\u59d4', '\u5c48', '\uff0c', '\u6bcf', '\u6b21', '\u628a', '\u5979', '\u62b1', '\u5728', '\u6000', '\u91cc', '\uff0c', '\u770b', '\u5979', '\u6d0b', '\u6ea2', '\u7740', '\u5e78', '\u798f', '\u7684', '\u8138', '\uff0c', '\u6211', '\u90fd', '\u4f1a', '\u5f15', '\u4ee5', '\u4e3a', '\u50b2', '\uff0c', '\u8fd9', '\u7279', '\u4e48', '\u5c31', '\u662f', '\u6211', '\u7684', '\u5973', '\u4eba', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u5e72', '\u4ec0', '\u4e48', '\u4e5f', '\u4e0d', '\u80fd', '\u5fd8', '\u4e86', '\u81ea', '\u5df1', '\u5ab3', '\u5987', '\uff0c', '\u5c31', '\u7b97', '\u548c', '\u54e5', '\u4eec', '\u4e00', '\u8d77', '\u559d', '\u9152', '\uff0c', '\u559d', '\u5230', '\u5f88', '\u665a', '\uff0c', '\u4e5f', '\u8981', '\u63d0', '\u524d', '\u6253', '\u7535', '\u8bdd', '\u544a', '\u8bc9', '\u5979', '\uff0c', '\u8ba9', '\u5979', '\u65e9', '\u70b9', '\u4f11', '\u606f', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u5ab3', '\u5987', '\u7edd', '\u5bf9', '\u4e0d', '\u80fd', '\u62bd', '\u70df', '\uff0c', '\u559d', '\u9152', '\u8fd8', '\u52c9', '\u5f3a', '\u8fc7', '\u5f97', '\u53bb', '\uff0c', '\u4e0d', '\u8fc7', '\u8be5', '\u559d', '\u7684', '\u65f6', '\u5019', '\u559d', '\uff0c', '\u4e0d', '\u8be5', '\u559d', '\u7684', '\u65f6', '\u5019', '\uff0c', '\u5c11', '\u626f', '\u7eb3', '\u6781', '\u8584', '\u86cb', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u5ab3', '\u5987', '\u5fc5', '\u987b', '\u542c', '\u6211', '\u8bdd', '\uff0c', '\u5728', '\u4eba', '\u524d', '\u4e00', '\u5b9a', '\u8981', '\u7ed9', '\u6211', '\u9762', '\u5b50', '\uff0c', '\u56de', '\u5bb6', '\u4e86', '\u54b1', '\u4ec0', '\u4e48', '\u90fd', '\u597d', '\u8bf4', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u5c31', '\u7b97', '\u96be', '\u7684', '\u5403', '\u4e0d', '\u4e0a', '\u996d', '\u4e86', '\uff0c', '\u90fd', '\u4e0d', '\u5f20', '\u53e3', '\u8ddf', '\u5ab3', '\u5987', '\u8981', '\u4e00', '\u5206', '\u94b1', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4e0d', '\u7ba1', '\u4e0a', '\u5b66', '\u8fd8', '\u662f', '\u4e0a', '\u73ed', '\uff0c', '\u6211', '\u90fd', '\u4f1a', '\u9001', '\u5ab3', '\u5987', '\u56de', '\u5bb6', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4ea4', '\u5f80', '\u4e0d', '\u5230', '1', '\u5e74', '\uff0c', '\u7edd', '\u5bf9', '\u4e0d', '\u4f1a', '\u548c', '\u5ab3', '\u5987', '\u63d0', '\u8fc7', '\u5206', '\u7684', '\u8981', '\u6c42', '\uff0c', '\u6211', '\u4f1a', '\u5c0a', '\u91cd', '\u5979', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6e38', '\u620f', '\u6c38', '\u8fdc', '\u6bd4', '\u4e0d', '\u4e0a', '\u6211', '\u5ab3', '\u5987', '\u91cd', '\u8981', '\uff0c', '\u53ea', '\u8981', '\u5ab3', '\u5987', '\u53d1', '\u8bdd', '\uff0c', '\u6211', '\u7edd', '\u5bf9', '\u552f', '\u547d', '\u662f', '\u4ece', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4e0a', 'q', '\u7edd', '\u5bf9', '\u662f', '\u4e3a', '\u4e86', '\u7b49', '\u5ab3', '\u5987', '\uff0c', '\u6240', '\u6709', '\u66a7', '\u6627', '\u7684', '\u5fc3', '\u60c5', '\u53ea', '\u4e3a', '\u5979', '\u4e00', '\u4e2a', '\u5973', '\u4eba', '\u800c', '\u5199', '\uff0c', '\u6211', '\u4e0d', '\u4e00', '\u5b9a', '\u4f1a', '\u7ecf', '\u5e38', '\u5199', '\u65e5', '\u5fd7', '\uff0c', '\u53ef', '\u662f', '\u6211', '\u4f1a', '\u544a', '\u8bc9', '\u5168', '\u4e16', '\u754c', '\uff0c', '\u6211', '\u5f88', '\u7231', '\u5979', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4e0d', '\u4e00', '\u5b9a', '\u8981', '\u7ecf', '\u5e38', '\u5236', '\u9020', '\u6d6a', '\u6f2b', '\u3001', '\u5076', '\u5c14', '\u8fc7', '\u4e2a', '\u8282', '\u65e5', '\u4e5f', '\u8981', '\u9001', '\u675f', '\u73ab', '\u7470', '\u82b1', '\u7ed9', '\u5ab3', '\u5987', '\u62b1', '\u56de', '\u5bb6', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u624b', '\u673a', '\u4f1a', '24', '\u5c0f', '\u65f6', '\u4e3a', '\u5979', '\u5f00', '\u673a', '\uff0c', '\u8ba9', '\u5979', '\u534a', '\u591c', '\u75db', '\u7ecf', '\u7684', '\u65f6', '\u5019', '\uff0c', '\u505a', '\u6076', '\u68a6', '\u7684', '\u65f6', '\u5019', '\uff0c', '\u968f', '\u65f6', '\u53ef', '\u4ee5', '\u8054', '\u7cfb', '\u5230', '\u6211', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u4f1a', '\u7ecf', '\u5e38', '\u5e26', '\u5ab3', '\u5987', '\u51fa', '\u53bb', '\u73a9', '\uff0c', '\u5979', '\u4e0d', '\u4e00', '\u5b9a', '\u8981', '\u548c', '\u6211', '\u6240', '\u6709', '\u7684', '\u54e5', '\u4eec', '\u90fd', '\u8ba4', '\u8bc6', '\uff0c', '\u4f46', '\u89c1', '\u9762', '\u80fd', '\u8bf4', '\u7684', '\u4e0a', '\u8bdd', '\u5c31', '\u884c', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u4f1a', '\u548c', '\u5ab3', '\u5987', '\u7684', '\u59d0', '\u59b9', '\u54e5', '\u4eec', '\u641e', '\u597d', '\u5173', '\u7cfb', '\uff0c', '\u8ba9', '\u5979', '\u4eec', '\u76f8', '\u4fe1', '\u6211', '\u4e00', '\u5b9a', '\u53ef', '\u4ee5', '\u7ed9', '\u6211', '\u5ab3', '\u5987', '\u5e78', '\u798f', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u5435', '\u67b6', '\u540e', '\u3001', '\u4e5f', '\u8981', '\u4e3b', '\u52a8', '\u6253', '\u7535', '\u8bdd', '\u5173', '\u5fc3', '\u5979', '\uff0c', '\u54b1', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u7ed9', '\u5ab3', '\u5987', '\u670d', '\u4e2a', '\u8f6f', '\uff0c', '\u9053', '\u4e2a', '\u6b49', '\u600e', '\u4e48', '\u4e86', '\uff1f'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u7edd', '\u5bf9', '\u4e0d', '\u4f1a', '\u5acc', '\u5f03', '\u81ea', '\u5df1', '\u5ab3', '\u5987', '\uff0c', '\u62ff', '\u5979', '\u548c', '\u522b', '\u4eba', '\u6bd4', '\uff0c', '\u8bf4', '\u5979', '\u8fd9', '\u4e0d', '\u5982', '\u4eba', '\u5bb6', '\uff0c', '\u7eb3', '\u4e0d', '\u5982', '\u4eba', '\u5bb6', '\u7684', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u966a', '\u5ab3', '\u5987', '\u901b', '\u8857', '\u65f6', '\uff0c', '\u78b0', '\u89c1', '\u719f', '\u4eba', '\uff0c', '\u65e0', '\u8bba', '\u6211', '\u5ab3', '\u5987', '\u957f', '\u7684', '\u597d', '\u770b', '\u4e0e', '\u5426', '\uff0c', '\u6211', '\u90fd', '\u4f1a', '\u5927', '\u65b9', '\u7684', '\u4ecb', '\u7ecd', '\u3002'], ['\u8c01', '\u8ba9', '\u54b1', '\u7237', '\u4eec', '\u5c31', '\u597d', '\u8fd9', '\u53e3', '\u5462', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u60f3', '\u6211', '\u4f1a', '\u7ed9', '\u6211', '\u5ab3', '\u5987', '\u6700', '\u597d', '\u7684', '\u5e78', '\u798f', '\u3002'], ['\u3010', '\u6211', '\u4eec', '\u91cd', '\u5728', '\u5206', '\u4eab', '\u3002'], ['\u6240', '\u6709', '\u6587', '\u5b57', '\u548c', '\u7f8e', '\u56fe', '\uff0c', '\u6765', '\u81ea', '\u7f51', '\u7edc', '\uff0c', '\u6668', '\u6b23', '\u6559', '\u80b2', '\u6574', '\u7406', '\u3002'], ['\u5bf9', '\u539f', '\u6587', '\u4f5c', '\u8005', '\uff0c', '\u8868', '\u793a', '\u656c', '\u610f', '\u3002'], ['\u3011', '\u5173', '\u6ce8', '\u6668', '\u66e6', '\u6559', '\u80b2', '[UNK]', '[UNK]', '\u6668', '\u66e6', '\u6559', '\u80b2', '\uff08', '\u5fae', '\u4fe1', '\u53f7', '\uff1a', 'he', '##bc', '##x', '##jy', '\uff09', '\u3002'], ['\u6253', '\u5f00', '\u5fae', '\u4fe1', '\uff0c', '\u626b', '\u63cf', '\u4e8c', '\u7ef4', '\u7801', '\uff0c', '\u5173', '\u6ce8', '[UNK]', '\u6668', '\u66e6', '\u6559', '\u80b2', '[UNK]', '\uff0c', '\u83b7', '\u53d6', '\u66f4', '\u591a', '\u80b2', '\u513f', '\u8d44', '\u6e90', '\u3002'], ['\u70b9', '\u51fb', '\u4e0b', '\u9762', '\u8ba2', '\u9605', '\u6309', '\u94ae', '\u8ba2', '\u9605', '\uff0c', '\u4f1a', '\u6709', '\u66f4', '\u591a', '\u60ca', '\u559c', '\u54e6', '\uff01']]\n while i < len(document): # \u4ece\u6587\u6863\u7684\u7b2c\u4e00\u4e2a\u4f4d\u7f6e\u5f00\u59cb\uff0c\u6309\u4e2a\u5f80\u4e0b\u770b\n segment = document[i] # segment\u662f\u5217\u8868\uff0c\u4ee3\u8868\u7684\u662f\u6309\u5b57\u5206\u5f00\u7684\u4e00\u4e2a\u5b8c\u6574\u53e5\u5b50\uff0c\u5982 segment=['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u60f3', '\u6211', '\u4f1a', '\u7ed9', '\u6211', '\u5ab3', '\u5987', '\u6700', '\u597d', '\u7684', '\u5e78', '\u798f', '\u3002']\n if FLAGS.non_chinese==False: # if non chinese is False, that means it is chinese, then do something to make chinese whole word mask works.\n segment = get_new_segment(segment) # whole word mask for chinese: \u7ed3\u5408\u5206\u8bcd\u7684\u4e2d\u6587\u7684whole mask\u8bbe\u7f6e\u5373\u5728\u9700\u8981\u7684\u5730\u65b9\u52a0\u4e0a\u201c##\u201d\n\n current_chunk.append(segment) # \u5c06\u4e00\u4e2a\u72ec\u7acb\u7684\u53e5\u5b50\u52a0\u5165\u5230\u5f53\u524d\u7684\u6587\u672c\u5757\u4e2d\n current_length += len(segment) # \u7d2f\u8ba1\u5230\u4e3a\u6b62\u4f4d\u7f6e\u63a5\u89e6\u5230\u53e5\u5b50\u7684\u603b\u957f\u5ea6\n if i == len(document) - 1 or current_length >= target_seq_length:\n # \u5982\u679c\u7d2f\u8ba1\u7684\u5e8f\u5217\u957f\u5ea6\u8fbe\u5230\u4e86\u76ee\u6807\u7684\u957f\u5ea6\uff0c\u6216\u5f53\u524d\u8d70\u5230\u4e86\u6587\u6863\u7ed3\u5c3e==>\u6784\u9020\u5e76\u6dfb\u52a0\u5230\u201cA[SEP]B\u201c\u4e2d\u7684A\u548cB\u4e2d\uff1b\n if current_chunk: # \u5982\u679c\u5f53\u524d\u5757\u4e0d\u4e3a\u7a7a\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2: # \u5f53\u524d\u5757\uff0c\u5982\u679c\u5305\u542b\u8d85\u8fc7\u4e24\u4e2a\u53e5\u5b50\uff0c\u53d6\u5f53\u524d\u5757\u7684\u4e00\u90e8\u5206\u4f5c\u4e3a\u201cA[SEP]B\u201c\u4e2d\u7684A\u90e8\u5206\n a_end = rng.randint(1, len(current_chunk) - 1)\n # \u5c06\u5f53\u524d\u6587\u672c\u6bb5\u4e2d\u9009\u53d6\u51fa\u6765\u7684\u524d\u534a\u90e8\u5206\uff0c\u8d4b\u503c\u7ed9A\u5373tokens_a\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n # \u6784\u9020\u201cA[SEP]B\u201c\u4e2d\u7684B\u90e8\u5206(\u6709\u4e00\u90e8\u5206\u662f\u6b63\u5e38\u7684\u5f53\u524d\u6587\u6863\u4e2d\u7684\u540e\u534a\u90e8;\u5728\u539fBERT\u7684\u5b9e\u73b0\u4e2d\u4e00\u90e8\u5206\u662f\u968f\u673a\u7684\u4ece\u53e6\u4e00\u4e2a\u6587\u6863\u4e2d\u9009\u53d6\u7684\uff0c\uff09\n tokens_b = []\n for j in range(a_end, len(current_chunk)):\n tokens_b.extend(current_chunk[j])\n\n # \u6709\u767e\u5206\u4e4b50%\u7684\u6982\u7387\u4ea4\u6362\u4e00\u4e0btokens_a\u548ctokens_b\u7684\u4f4d\u7f6e\n # print(\"tokens_a length1:\",len(tokens_a))\n # print(\"tokens_b length1:\",len(tokens_b)) # len(tokens_b) = 0\n\n if len(tokens_a)==0 or len(tokens_b)==0: continue\n if rng.random() < 0.5: # \u4ea4\u6362\u4e00\u4e0btokens_a\u548ctokens_b\n is_random_next=True\n temp=tokens_a\n tokens_a=tokens_b\n tokens_b=temp\n else:\n is_random_next=False\n\n truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)\n\n assert len(tokens_a) >= 1\n assert len(tokens_b) >= 1\n\n # \u628atokens_a & tokens_b\u52a0\u5165\u5230\u6309\u7167bert\u7684\u98ce\u683c\uff0c\u5373\u4ee5[CLS]tokens_a[SEP]tokens_b[SEP]\u7684\u5f62\u5f0f\uff0c\u7ed3\u5408\u5230\u4e00\u8d77\uff0c\u4f5c\u4e3a\u6700\u7ec8\u7684tokens; \u4e5f\u5e26\u4e0asegment_ids\uff0c\u524d\u9762\u90e8\u5206segment_ids\u7684\u503c\u662f0\uff0c\u540e\u9762\u90e8\u5206\u7684\u503c\u662f1.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n # \u521b\u5efamasked LM\u7684\u4efb\u52a1\u7684\u6570\u636e Creates the predictions for the masked LM objective\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n instance = TrainingInstance( # \u521b\u5efa\u8bad\u7ec3\u5b9e\u4f8b\u7684\u5bf9\u8c61\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n current_chunk = [] # \u6e05\u7a7a\u5f53\u524d\u5757\n current_length = 0 # \u91cd\u7f6e\u5f53\u524d\u6587\u672c\u5757\u7684\u957f\u5ea6\n i += 1 # \u63a5\u7740\u6587\u6863\u4e2d\u7684\u5185\u5bb9\u5f80\u540e\u770b\n\n return instances","function_tokens":["def","create_instances_from_document_albert","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","document","=","all_documents","[","document_index","]","# \u5f97\u5230\u4e00\u4e2a\u6587\u6863","# Account for [CLS], [SEP], [SEP]","max_num_tokens","=","max_seq_length","-","3","# We *usually* want to fill up the entire sequence since we are padding","# to `max_seq_length` anyways, so short sequences are generally wasted","# computation. However, we *sometimes*","# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter","# sequences to minimize the mismatch between pre-training and fine-tuning.","# The `target_seq_length` is just a rough target however, whereas","# `max_seq_length` is a hard limit.","target_seq_length","=","max_num_tokens","if","rng",".","random","(",")","<","short_seq_prob",":","# \u6709\u4e00\u5b9a\u7684\u6bd4\u4f8b\uff0c\u598210%\u7684\u6982\u7387\uff0c\u6211\u4eec\u4f7f\u7528\u6bd4\u8f83\u77ed\u7684\u5e8f\u5217\u957f\u5ea6\uff0c\u4ee5\u7f13\u89e3\u9884\u8bad\u7ec3\u7684\u957f\u5e8f\u5217\u548c\u8c03\u4f18\u9636\u6bb5\uff08\u53ef\u80fd\u7684\uff09\u77ed\u5e8f\u5217\u7684\u4e0d\u4e00\u81f4\u60c5\u51b5","target_seq_length","=","rng",".","randint","(","2",",","max_num_tokens",")","# We DON'T just concatenate all of the tokens from a document into a long","# sequence and choose an arbitrary split point because this would make the","# next sentence prediction task too easy. Instead, we split the input into","# segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user","# input.","# \u8bbe\u6cd5\u4f7f\u7528\u5b9e\u9645\u7684\u53e5\u5b50\uff0c\u800c\u4e0d\u662f\u4efb\u610f\u7684\u622a\u65ad\u53e5\u5b50\uff0c\u4ece\u800c\u66f4\u597d\u7684\u6784\u9020\u53e5\u5b50\u8fde\u8d2f\u6027\u9884\u6d4b\u7684\u4efb\u52a1","instances","=","[","]","current_chunk","=","[","]","# \u5f53\u524d\u5904\u7406\u7684\u6587\u672c\u6bb5\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50","current_length","=","0","i","=","0","# print(\"###document:\",document) # \u4e00\u4e2adocument\u53ef\u4ee5\u662f\u4e00\u6574\u7bc7\u6587\u7ae0\u3001\u65b0\u95fb\u3001\u8bcd\u6761\u7b49. document:[['\u662f', '\u7237', '\u4eec', '\uff0c', '\u5c31', '\u5f97', '\u7ed9', '\u5ab3', '\u5987', '\u5e78', '\u798f'], ['\u5173', '\u6ce8', '\u3010', '\u6668', '\u66e6', '\u6559', '\u80b2', '\u3011', '\uff0c', '\u83b7', '\u53d6', '\u80b2', '\u513f', '\u7684', '\u667a', '\u6167', '\uff0c', '\u4e0e', '\u5b69', '\u5b50', '\u4e00', '\u540c', '\u6210', '\u957f', '\uff01'], ['\u65b9', '\u6cd5', ':', '\u6253', '\u5f00', '\u5fae', '\u4fe1', '\u2192', '\u6dfb', '\u52a0', '\u670b', '\u53cb', '\u2192', '\u641c', '\u53f7', '\u2192', '##he', '##bc', '##x', '##jy', '##\u2192', '\u5173', '\u6ce8', '!', '\u6211', '\u662f', '\u4e00', '\u4e2a', '\u7237', '\u4eec', '\uff0c', '\u5b5d', '\u987a', '\u662f', '\u505a', '\u4eba', '\u7684', '\u7b2c', '\u4e00', '\u51c6', '\u5219', '\u3002'], ['\u752d', '\u7ba1', '\u5c0f', '\u65f6', '\u5019', '\u600e', '\u4e48', '\u8ddf', '\u5bb6', '\u957f', '\u72af', '\u6df7', '\u86cb', '\uff0c', '\u957f', '\u5927', '\u4e86', '\uff0c', '\u5c31', '\u5e95', '\u62a5', '\u7b54', '\u7236', '\u6bcd', '\uff0c', '\u4ee5', '\u540e', '\u6211', '\u5ab3', '\u5987', '\u4e5f', '\u5fc5', '\u987b', '\u5b5d', '\u987a', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u4e2a', '\u7237', '\u4eec', '\uff0c', '\u53ef', '\u4ee5', '\u82b1', '\u5fc3', '\uff0c', '\u53ef', '\u4ee5', '\u597d', '\u73a9', '\u3002'], ['\u4f46', '\u6211', '\u4e00', '\u5b9a', '\u4f1a', '\u627e', '\u4e00', '\u4e2a', '\u7ba1', '\u7684', '\u4f4f', '\u6211', '\u7684', '\u5973', '\u4eba', '\uff0c', '\u548c', '\u6211', '\u4e00', '\u8d77', '\u751f', '\u6d3b', '\u3002'], ['28', '\u5c81', '\u4ee5', '\u524d', '\u5728', '\u600e', '\u4e48', '\u73a9', '\u90fd', '\u884c', '\uff0c', '\u4f46', '\u6211', '\u6700', '\u540e', '\u4e00', '\u5b9a', '\u4f1a', '\u627e', '\u4e00', '\u4e2a', '\u52e4', '\u4fed', '\u6301', '\u5bb6', '\u7684', '\u5973', '\u4eba', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u4e0d', '\u4f1a', '\u8ba9', '\u81ea', '\u5df1', '\u7684', '\u5973', '\u4eba', '\u53d7', '\u4e00', '\u70b9', '\u59d4', '\u5c48', '\uff0c', '\u6bcf', '\u6b21', '\u628a', '\u5979', '\u62b1', '\u5728', '\u6000', '\u91cc', '\uff0c', '\u770b', '\u5979', '\u6d0b', '\u6ea2', '\u7740', '\u5e78', '\u798f', '\u7684', '\u8138', '\uff0c', '\u6211', '\u90fd', '\u4f1a', '\u5f15', '\u4ee5', '\u4e3a', '\u50b2', '\uff0c', '\u8fd9', '\u7279', '\u4e48', '\u5c31', '\u662f', '\u6211', '\u7684', '\u5973', '\u4eba', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u5e72', '\u4ec0', '\u4e48', '\u4e5f', '\u4e0d', '\u80fd', '\u5fd8', '\u4e86', '\u81ea', '\u5df1', '\u5ab3', '\u5987', '\uff0c', '\u5c31', '\u7b97', '\u548c', '\u54e5', '\u4eec', '\u4e00', '\u8d77', '\u559d', '\u9152', '\uff0c', '\u559d', '\u5230', '\u5f88', '\u665a', '\uff0c', '\u4e5f', '\u8981', '\u63d0', '\u524d', '\u6253', '\u7535', '\u8bdd', '\u544a', '\u8bc9', '\u5979', '\uff0c', '\u8ba9', '\u5979', '\u65e9', '\u70b9', '\u4f11', '\u606f', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u5ab3', '\u5987', '\u7edd', '\u5bf9', '\u4e0d', '\u80fd', '\u62bd', '\u70df', '\uff0c', '\u559d', '\u9152', '\u8fd8', '\u52c9', '\u5f3a', '\u8fc7', '\u5f97', '\u53bb', '\uff0c', '\u4e0d', '\u8fc7', '\u8be5', '\u559d', '\u7684', '\u65f6', '\u5019', '\u559d', '\uff0c', '\u4e0d', '\u8be5', '\u559d', '\u7684', '\u65f6', '\u5019', '\uff0c', '\u5c11', '\u626f', '\u7eb3', '\u6781', '\u8584', '\u86cb', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u5ab3', '\u5987', '\u5fc5', '\u987b', '\u542c', '\u6211', '\u8bdd', '\uff0c', '\u5728', '\u4eba', '\u524d', '\u4e00', '\u5b9a', '\u8981', '\u7ed9', '\u6211', '\u9762', '\u5b50', '\uff0c', '\u56de', '\u5bb6', '\u4e86', '\u54b1', '\u4ec0', '\u4e48', '\u90fd', '\u597d', '\u8bf4', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u5c31', '\u7b97', '\u96be', '\u7684', '\u5403', '\u4e0d', '\u4e0a', '\u996d', '\u4e86', '\uff0c', '\u90fd', '\u4e0d', '\u5f20', '\u53e3', '\u8ddf', '\u5ab3', '\u5987', '\u8981', '\u4e00', '\u5206', '\u94b1', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4e0d', '\u7ba1', '\u4e0a', '\u5b66', '\u8fd8', '\u662f', '\u4e0a', '\u73ed', '\uff0c', '\u6211', '\u90fd', '\u4f1a', '\u9001', '\u5ab3', '\u5987', '\u56de', '\u5bb6', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4ea4', '\u5f80', '\u4e0d', '\u5230', '1', '\u5e74', '\uff0c', '\u7edd', '\u5bf9', '\u4e0d', '\u4f1a', '\u548c', '\u5ab3', '\u5987', '\u63d0', '\u8fc7', '\u5206', '\u7684', '\u8981', '\u6c42', '\uff0c', '\u6211', '\u4f1a', '\u5c0a', '\u91cd', '\u5979', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6e38', '\u620f', '\u6c38', '\u8fdc', '\u6bd4', '\u4e0d', '\u4e0a', '\u6211', '\u5ab3', '\u5987', '\u91cd', '\u8981', '\uff0c', '\u53ea', '\u8981', '\u5ab3', '\u5987', '\u53d1', '\u8bdd', '\uff0c', '\u6211', '\u7edd', '\u5bf9', '\u552f', '\u547d', '\u662f', '\u4ece', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4e0a', 'q', '\u7edd', '\u5bf9', '\u662f', '\u4e3a', '\u4e86', '\u7b49', '\u5ab3', '\u5987', '\uff0c', '\u6240', '\u6709', '\u66a7', '\u6627', '\u7684', '\u5fc3', '\u60c5', '\u53ea', '\u4e3a', '\u5979', '\u4e00', '\u4e2a', '\u5973', '\u4eba', '\u800c', '\u5199', '\uff0c', '\u6211', '\u4e0d', '\u4e00', '\u5b9a', '\u4f1a', '\u7ecf', '\u5e38', '\u5199', '\u65e5', '\u5fd7', '\uff0c', '\u53ef', '\u662f', '\u6211', '\u4f1a', '\u544a', '\u8bc9', '\u5168', '\u4e16', '\u754c', '\uff0c', '\u6211', '\u5f88', '\u7231', '\u5979', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4e0d', '\u4e00', '\u5b9a', '\u8981', '\u7ecf', '\u5e38', '\u5236', '\u9020', '\u6d6a', '\u6f2b', '\u3001', '\u5076', '\u5c14', '\u8fc7', '\u4e2a', '\u8282', '\u65e5', '\u4e5f', '\u8981', '\u9001', '\u675f', '\u73ab', '\u7470', '\u82b1', '\u7ed9', '\u5ab3', '\u5987', '\u62b1', '\u56de', '\u5bb6', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u624b', '\u673a', '\u4f1a', '24', '\u5c0f', '\u65f6', '\u4e3a', '\u5979', '\u5f00', '\u673a', '\uff0c', '\u8ba9', '\u5979', '\u534a', '\u591c', '\u75db', '\u7ecf', '\u7684', '\u65f6', '\u5019', '\uff0c', '\u505a', '\u6076', '\u68a6', '\u7684', '\u65f6', '\u5019', '\uff0c', '\u968f', '\u65f6', '\u53ef', '\u4ee5', '\u8054', '\u7cfb', '\u5230', '\u6211', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u4f1a', '\u7ecf', '\u5e38', '\u5e26', '\u5ab3', '\u5987', '\u51fa', '\u53bb', '\u73a9', '\uff0c', '\u5979', '\u4e0d', '\u4e00', '\u5b9a', '\u8981', '\u548c', '\u6211', '\u6240', '\u6709', '\u7684', '\u54e5', '\u4eec', '\u90fd', '\u8ba4', '\u8bc6', '\uff0c', '\u4f46', '\u89c1', '\u9762', '\u80fd', '\u8bf4', '\u7684', '\u4e0a', '\u8bdd', '\u5c31', '\u884c', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u4f1a', '\u548c', '\u5ab3', '\u5987', '\u7684', '\u59d0', '\u59b9', '\u54e5', '\u4eec', '\u641e', '\u597d', '\u5173', '\u7cfb', '\uff0c', '\u8ba9', '\u5979', '\u4eec', '\u76f8', '\u4fe1', '\u6211', '\u4e00', '\u5b9a', '\u53ef', '\u4ee5', '\u7ed9', '\u6211', '\u5ab3', '\u5987', '\u5e78', '\u798f', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u5435', '\u67b6', '\u540e', '\u3001', '\u4e5f', '\u8981', '\u4e3b', '\u52a8', '\u6253', '\u7535', '\u8bdd', '\u5173', '\u5fc3', '\u5979', '\uff0c', '\u54b1', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u7ed9', '\u5ab3', '\u5987', '\u670d', '\u4e2a', '\u8f6f', '\uff0c', '\u9053', '\u4e2a', '\u6b49', '\u600e', '\u4e48', '\u4e86', '\uff1f'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u7edd', '\u5bf9', '\u4e0d', '\u4f1a', '\u5acc', '\u5f03', '\u81ea', '\u5df1', '\u5ab3', '\u5987', '\uff0c', '\u62ff', '\u5979', '\u548c', '\u522b', '\u4eba', '\u6bd4', '\uff0c', '\u8bf4', '\u5979', '\u8fd9', '\u4e0d', '\u5982', '\u4eba', '\u5bb6', '\uff0c', '\u7eb3', '\u4e0d', '\u5982', '\u4eba', '\u5bb6', '\u7684', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u966a', '\u5ab3', '\u5987', '\u901b', '\u8857', '\u65f6', '\uff0c', '\u78b0', '\u89c1', '\u719f', '\u4eba', '\uff0c', '\u65e0', '\u8bba', '\u6211', '\u5ab3', '\u5987', '\u957f', '\u7684', '\u597d', '\u770b', '\u4e0e', '\u5426', '\uff0c', '\u6211', '\u90fd', '\u4f1a', '\u5927', '\u65b9', '\u7684', '\u4ecb', '\u7ecd', '\u3002'], ['\u8c01', '\u8ba9', '\u54b1', '\u7237', '\u4eec', '\u5c31', '\u597d', '\u8fd9', '\u53e3', '\u5462', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u60f3', '\u6211', '\u4f1a', '\u7ed9', '\u6211', '\u5ab3', '\u5987', '\u6700', '\u597d', '\u7684', '\u5e78', '\u798f', '\u3002'], ['\u3010', '\u6211', '\u4eec', '\u91cd', '\u5728', '\u5206', '\u4eab', '\u3002'], ['\u6240', '\u6709', '\u6587', '\u5b57', '\u548c', '\u7f8e', '\u56fe', '\uff0c', '\u6765', '\u81ea', '\u7f51', '\u7edc', '\uff0c', '\u6668', '\u6b23', '\u6559', '\u80b2', '\u6574', '\u7406', '\u3002'], ['\u5bf9', '\u539f', '\u6587', '\u4f5c', '\u8005', '\uff0c', '\u8868', '\u793a', '\u656c', '\u610f', '\u3002'], ['\u3011', '\u5173', '\u6ce8', '\u6668', '\u66e6', '\u6559', '\u80b2', '[UNK]', '[UNK]', '\u6668', '\u66e6', '\u6559', '\u80b2', '\uff08', '\u5fae', '\u4fe1', '\u53f7', '\uff1a', 'he', '##bc', '##x', '##jy', '\uff09', '\u3002'], ['\u6253', '\u5f00', '\u5fae', '\u4fe1', '\uff0c', '\u626b', '\u63cf', '\u4e8c', '\u7ef4', '\u7801', '\uff0c', '\u5173', '\u6ce8', '[UNK]', '\u6668', '\u66e6', '\u6559', '\u80b2', '[UNK]', '\uff0c', '\u83b7', '\u53d6', '\u66f4', '\u591a', '\u80b2', '\u513f', '\u8d44', '\u6e90', '\u3002'], ['\u70b9', '\u51fb', '\u4e0b', '\u9762', '\u8ba2', '\u9605', '\u6309', '\u94ae', '\u8ba2', '\u9605', '\uff0c', '\u4f1a', '\u6709', '\u66f4', '\u591a', '\u60ca', '\u559c', '\u54e6', '\uff01']]","while","i","<","len","(","document",")",":","# \u4ece\u6587\u6863\u7684\u7b2c\u4e00\u4e2a\u4f4d\u7f6e\u5f00\u59cb\uff0c\u6309\u4e2a\u5f80\u4e0b\u770b","segment","=","document","[","i","]","# segment\u662f\u5217\u8868\uff0c\u4ee3\u8868\u7684\u662f\u6309\u5b57\u5206\u5f00\u7684\u4e00\u4e2a\u5b8c\u6574\u53e5\u5b50\uff0c\u5982 segment=['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u60f3', '\u6211', '\u4f1a', '\u7ed9', '\u6211', '\u5ab3', '\u5987', '\u6700', '\u597d', '\u7684', '\u5e78', '\u798f', '\u3002']","if","FLAGS",".","non_chinese","==","False",":","# if non chinese is False, that means it is chinese, then do something to make chinese whole word mask works.","segment","=","get_new_segment","(","segment",")","# whole word mask for chinese: \u7ed3\u5408\u5206\u8bcd\u7684\u4e2d\u6587\u7684whole mask\u8bbe\u7f6e\u5373\u5728\u9700\u8981\u7684\u5730\u65b9\u52a0\u4e0a\u201c##\u201d","current_chunk",".","append","(","segment",")","# \u5c06\u4e00\u4e2a\u72ec\u7acb\u7684\u53e5\u5b50\u52a0\u5165\u5230\u5f53\u524d\u7684\u6587\u672c\u5757\u4e2d","current_length","+=","len","(","segment",")","# \u7d2f\u8ba1\u5230\u4e3a\u6b62\u4f4d\u7f6e\u63a5\u89e6\u5230\u53e5\u5b50\u7684\u603b\u957f\u5ea6","if","i","==","len","(","document",")","-","1","or","current_length",">=","target_seq_length",":","# \u5982\u679c\u7d2f\u8ba1\u7684\u5e8f\u5217\u957f\u5ea6\u8fbe\u5230\u4e86\u76ee\u6807\u7684\u957f\u5ea6\uff0c\u6216\u5f53\u524d\u8d70\u5230\u4e86\u6587\u6863\u7ed3\u5c3e==>\u6784\u9020\u5e76\u6dfb\u52a0\u5230\u201cA[SEP]B\u201c\u4e2d\u7684A\u548cB\u4e2d\uff1b","if","current_chunk",":","# \u5982\u679c\u5f53\u524d\u5757\u4e0d\u4e3a\u7a7a","# `a_end` is how many segments from `current_chunk` go into the `A`","# (first) sentence.","a_end","=","1","if","len","(","current_chunk",")",">=","2",":","# \u5f53\u524d\u5757\uff0c\u5982\u679c\u5305\u542b\u8d85\u8fc7\u4e24\u4e2a\u53e5\u5b50\uff0c\u53d6\u5f53\u524d\u5757\u7684\u4e00\u90e8\u5206\u4f5c\u4e3a\u201cA[SEP]B\u201c\u4e2d\u7684A\u90e8\u5206","a_end","=","rng",".","randint","(","1",",","len","(","current_chunk",")","-","1",")","# \u5c06\u5f53\u524d\u6587\u672c\u6bb5\u4e2d\u9009\u53d6\u51fa\u6765\u7684\u524d\u534a\u90e8\u5206\uff0c\u8d4b\u503c\u7ed9A\u5373tokens_a","tokens_a","=","[","]","for","j","in","range","(","a_end",")",":","tokens_a",".","extend","(","current_chunk","[","j","]",")","# \u6784\u9020\u201cA[SEP]B\u201c\u4e2d\u7684B\u90e8\u5206(\u6709\u4e00\u90e8\u5206\u662f\u6b63\u5e38\u7684\u5f53\u524d\u6587\u6863\u4e2d\u7684\u540e\u534a\u90e8;\u5728\u539fBERT\u7684\u5b9e\u73b0\u4e2d\u4e00\u90e8\u5206\u662f\u968f\u673a\u7684\u4ece\u53e6\u4e00\u4e2a\u6587\u6863\u4e2d\u9009\u53d6\u7684\uff0c\uff09","tokens_b","=","[","]","for","j","in","range","(","a_end",",","len","(","current_chunk",")",")",":","tokens_b",".","extend","(","current_chunk","[","j","]",")","# \u6709\u767e\u5206\u4e4b50%\u7684\u6982\u7387\u4ea4\u6362\u4e00\u4e0btokens_a\u548ctokens_b\u7684\u4f4d\u7f6e","# print(\"tokens_a length1:\",len(tokens_a))","# print(\"tokens_b length1:\",len(tokens_b)) # len(tokens_b) = 0","if","len","(","tokens_a",")","==","0","or","len","(","tokens_b",")","==","0",":","continue","if","rng",".","random","(",")","<","0.5",":","# \u4ea4\u6362\u4e00\u4e0btokens_a\u548ctokens_b","is_random_next","=","True","temp","=","tokens_a","tokens_a","=","tokens_b","tokens_b","=","temp","else",":","is_random_next","=","False","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")","assert","len","(","tokens_a",")",">=","1","assert","len","(","tokens_b",")",">=","1","# \u628atokens_a & tokens_b\u52a0\u5165\u5230\u6309\u7167bert\u7684\u98ce\u683c\uff0c\u5373\u4ee5[CLS]tokens_a[SEP]tokens_b[SEP]\u7684\u5f62\u5f0f\uff0c\u7ed3\u5408\u5230\u4e00\u8d77\uff0c\u4f5c\u4e3a\u6700\u7ec8\u7684tokens; \u4e5f\u5e26\u4e0asegment_ids\uff0c\u524d\u9762\u90e8\u5206segment_ids\u7684\u503c\u662f0\uff0c\u540e\u9762\u90e8\u5206\u7684\u503c\u662f1.","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","# \u521b\u5efamasked LM\u7684\u4efb\u52a1\u7684\u6570\u636e Creates the predictions for the masked LM objective","(","tokens",",","masked_lm_positions",",","masked_lm_labels",")","=","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")","instance","=","TrainingInstance","(","# \u521b\u5efa\u8bad\u7ec3\u5b9e\u4f8b\u7684\u5bf9\u8c61","tokens","=","tokens",",","segment_ids","=","segment_ids",",","is_random_next","=","is_random_next",",","masked_lm_positions","=","masked_lm_positions",",","masked_lm_labels","=","masked_lm_labels",")","instances",".","append","(","instance",")","current_chunk","=","[","]","# \u6e05\u7a7a\u5f53\u524d\u5757","current_length","=","0","# \u91cd\u7f6e\u5f53\u524d\u6587\u672c\u5757\u7684\u957f\u5ea6","i","+=","1","# \u63a5\u7740\u6587\u6863\u4e2d\u7684\u5185\u5bb9\u5f80\u540e\u770b","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/create_pretraining_data.py#L260-L369"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/create_pretraining_data.py","language":"python","identifier":"create_instances_from_document_original","parameters":"( # THIS IS ORIGINAL BERT STYLE FOR CREATE DATA OF MLM AND NEXT SENTENCE PREDICTION TASK\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return instances","docstring":"Creates `TrainingInstance`s for a single document.","docstring_summary":"Creates `TrainingInstance`s for a single document.","docstring_tokens":["Creates","TrainingInstance","s","for","a","single","document","."],"function":"def create_instances_from_document_original( # THIS IS ORIGINAL BERT STYLE FOR CREATE DATA OF MLM AND NEXT SENTENCE PREDICTION TASK\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[document_index] # \u5f97\u5230\u4e00\u4e2a\u6587\u6863\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if rng.random() < short_seq_prob: # \u6709\u4e00\u5b9a\u7684\u6bd4\u4f8b\uff0c\u598210%\u7684\u6982\u7387\uff0c\u6211\u4eec\u4f7f\u7528\u6bd4\u8f83\u77ed\u7684\u5e8f\u5217\u957f\u5ea6\uff0c\u4ee5\u7f13\u89e3\u9884\u8bad\u7ec3\u7684\u957f\u5e8f\u5217\u548c\u8c03\u4f18\u9636\u6bb5\uff08\u53ef\u80fd\u7684\uff09\u77ed\u5e8f\u5217\u7684\u4e0d\u4e00\u81f4\u60c5\u51b5\n target_seq_length = rng.randint(2, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n # \u8bbe\u6cd5\u4f7f\u7528\u5b9e\u9645\u7684\u53e5\u5b50\uff0c\u800c\u4e0d\u662f\u4efb\u610f\u7684\u622a\u65ad\u53e5\u5b50\uff0c\u4ece\u800c\u66f4\u597d\u7684\u6784\u9020\u53e5\u5b50\u8fde\u8d2f\u6027\u9884\u6d4b\u7684\u4efb\u52a1\n instances = []\n current_chunk = [] # \u5f53\u524d\u5904\u7406\u7684\u6587\u672c\u6bb5\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\n current_length = 0\n i = 0\n # print(\"###document:\",document) # \u4e00\u4e2adocument\u53ef\u4ee5\u662f\u4e00\u6574\u7bc7\u6587\u7ae0\u3001\u65b0\u95fb\u3001\u4e00\u4e2a\u8bcd\u6761\u7b49. document:[['\u662f', '\u7237', '\u4eec', '\uff0c', '\u5c31', '\u5f97', '\u7ed9', '\u5ab3', '\u5987', '\u5e78', '\u798f'], ['\u5173', '\u6ce8', '\u3010', '\u6668', '\u66e6', '\u6559', '\u80b2', '\u3011', '\uff0c', '\u83b7', '\u53d6', '\u80b2', '\u513f', '\u7684', '\u667a', '\u6167', '\uff0c', '\u4e0e', '\u5b69', '\u5b50', '\u4e00', '\u540c', '\u6210', '\u957f', '\uff01'], ['\u65b9', '\u6cd5', ':', '\u6253', '\u5f00', '\u5fae', '\u4fe1', '\u2192', '\u6dfb', '\u52a0', '\u670b', '\u53cb', '\u2192', '\u641c', '\u53f7', '\u2192', '##he', '##bc', '##x', '##jy', '##\u2192', '\u5173', '\u6ce8', '!', '\u6211', '\u662f', '\u4e00', '\u4e2a', '\u7237', '\u4eec', '\uff0c', '\u5b5d', '\u987a', '\u662f', '\u505a', '\u4eba', '\u7684', '\u7b2c', '\u4e00', '\u51c6', '\u5219', '\u3002'], ['\u752d', '\u7ba1', '\u5c0f', '\u65f6', '\u5019', '\u600e', '\u4e48', '\u8ddf', '\u5bb6', '\u957f', '\u72af', '\u6df7', '\u86cb', '\uff0c', '\u957f', '\u5927', '\u4e86', '\uff0c', '\u5c31', '\u5e95', '\u62a5', '\u7b54', '\u7236', '\u6bcd', '\uff0c', '\u4ee5', '\u540e', '\u6211', '\u5ab3', '\u5987', '\u4e5f', '\u5fc5', '\u987b', '\u5b5d', '\u987a', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u4e2a', '\u7237', '\u4eec', '\uff0c', '\u53ef', '\u4ee5', '\u82b1', '\u5fc3', '\uff0c', '\u53ef', '\u4ee5', '\u597d', '\u73a9', '\u3002'], ['\u4f46', '\u6211', '\u4e00', '\u5b9a', '\u4f1a', '\u627e', '\u4e00', '\u4e2a', '\u7ba1', '\u7684', '\u4f4f', '\u6211', '\u7684', '\u5973', '\u4eba', '\uff0c', '\u548c', '\u6211', '\u4e00', '\u8d77', '\u751f', '\u6d3b', '\u3002'], ['28', '\u5c81', '\u4ee5', '\u524d', '\u5728', '\u600e', '\u4e48', '\u73a9', '\u90fd', '\u884c', '\uff0c', '\u4f46', '\u6211', '\u6700', '\u540e', '\u4e00', '\u5b9a', '\u4f1a', '\u627e', '\u4e00', '\u4e2a', '\u52e4', '\u4fed', '\u6301', '\u5bb6', '\u7684', '\u5973', '\u4eba', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u4e0d', '\u4f1a', '\u8ba9', '\u81ea', '\u5df1', '\u7684', '\u5973', '\u4eba', '\u53d7', '\u4e00', '\u70b9', '\u59d4', '\u5c48', '\uff0c', '\u6bcf', '\u6b21', '\u628a', '\u5979', '\u62b1', '\u5728', '\u6000', '\u91cc', '\uff0c', '\u770b', '\u5979', '\u6d0b', '\u6ea2', '\u7740', '\u5e78', '\u798f', '\u7684', '\u8138', '\uff0c', '\u6211', '\u90fd', '\u4f1a', '\u5f15', '\u4ee5', '\u4e3a', '\u50b2', '\uff0c', '\u8fd9', '\u7279', '\u4e48', '\u5c31', '\u662f', '\u6211', '\u7684', '\u5973', '\u4eba', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u5e72', '\u4ec0', '\u4e48', '\u4e5f', '\u4e0d', '\u80fd', '\u5fd8', '\u4e86', '\u81ea', '\u5df1', '\u5ab3', '\u5987', '\uff0c', '\u5c31', '\u7b97', '\u548c', '\u54e5', '\u4eec', '\u4e00', '\u8d77', '\u559d', '\u9152', '\uff0c', '\u559d', '\u5230', '\u5f88', '\u665a', '\uff0c', '\u4e5f', '\u8981', '\u63d0', '\u524d', '\u6253', '\u7535', '\u8bdd', '\u544a', '\u8bc9', '\u5979', '\uff0c', '\u8ba9', '\u5979', '\u65e9', '\u70b9', '\u4f11', '\u606f', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u5ab3', '\u5987', '\u7edd', '\u5bf9', '\u4e0d', '\u80fd', '\u62bd', '\u70df', '\uff0c', '\u559d', '\u9152', '\u8fd8', '\u52c9', '\u5f3a', '\u8fc7', '\u5f97', '\u53bb', '\uff0c', '\u4e0d', '\u8fc7', '\u8be5', '\u559d', '\u7684', '\u65f6', '\u5019', '\u559d', '\uff0c', '\u4e0d', '\u8be5', '\u559d', '\u7684', '\u65f6', '\u5019', '\uff0c', '\u5c11', '\u626f', '\u7eb3', '\u6781', '\u8584', '\u86cb', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u5ab3', '\u5987', '\u5fc5', '\u987b', '\u542c', '\u6211', '\u8bdd', '\uff0c', '\u5728', '\u4eba', '\u524d', '\u4e00', '\u5b9a', '\u8981', '\u7ed9', '\u6211', '\u9762', '\u5b50', '\uff0c', '\u56de', '\u5bb6', '\u4e86', '\u54b1', '\u4ec0', '\u4e48', '\u90fd', '\u597d', '\u8bf4', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u5c31', '\u7b97', '\u96be', '\u7684', '\u5403', '\u4e0d', '\u4e0a', '\u996d', '\u4e86', '\uff0c', '\u90fd', '\u4e0d', '\u5f20', '\u53e3', '\u8ddf', '\u5ab3', '\u5987', '\u8981', '\u4e00', '\u5206', '\u94b1', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4e0d', '\u7ba1', '\u4e0a', '\u5b66', '\u8fd8', '\u662f', '\u4e0a', '\u73ed', '\uff0c', '\u6211', '\u90fd', '\u4f1a', '\u9001', '\u5ab3', '\u5987', '\u56de', '\u5bb6', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4ea4', '\u5f80', '\u4e0d', '\u5230', '1', '\u5e74', '\uff0c', '\u7edd', '\u5bf9', '\u4e0d', '\u4f1a', '\u548c', '\u5ab3', '\u5987', '\u63d0', '\u8fc7', '\u5206', '\u7684', '\u8981', '\u6c42', '\uff0c', '\u6211', '\u4f1a', '\u5c0a', '\u91cd', '\u5979', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6e38', '\u620f', '\u6c38', '\u8fdc', '\u6bd4', '\u4e0d', '\u4e0a', '\u6211', '\u5ab3', '\u5987', '\u91cd', '\u8981', '\uff0c', '\u53ea', '\u8981', '\u5ab3', '\u5987', '\u53d1', '\u8bdd', '\uff0c', '\u6211', '\u7edd', '\u5bf9', '\u552f', '\u547d', '\u662f', '\u4ece', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4e0a', 'q', '\u7edd', '\u5bf9', '\u662f', '\u4e3a', '\u4e86', '\u7b49', '\u5ab3', '\u5987', '\uff0c', '\u6240', '\u6709', '\u66a7', '\u6627', '\u7684', '\u5fc3', '\u60c5', '\u53ea', '\u4e3a', '\u5979', '\u4e00', '\u4e2a', '\u5973', '\u4eba', '\u800c', '\u5199', '\uff0c', '\u6211', '\u4e0d', '\u4e00', '\u5b9a', '\u4f1a', '\u7ecf', '\u5e38', '\u5199', '\u65e5', '\u5fd7', '\uff0c', '\u53ef', '\u662f', '\u6211', '\u4f1a', '\u544a', '\u8bc9', '\u5168', '\u4e16', '\u754c', '\uff0c', '\u6211', '\u5f88', '\u7231', '\u5979', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4e0d', '\u4e00', '\u5b9a', '\u8981', '\u7ecf', '\u5e38', '\u5236', '\u9020', '\u6d6a', '\u6f2b', '\u3001', '\u5076', '\u5c14', '\u8fc7', '\u4e2a', '\u8282', '\u65e5', '\u4e5f', '\u8981', '\u9001', '\u675f', '\u73ab', '\u7470', '\u82b1', '\u7ed9', '\u5ab3', '\u5987', '\u62b1', '\u56de', '\u5bb6', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u624b', '\u673a', '\u4f1a', '24', '\u5c0f', '\u65f6', '\u4e3a', '\u5979', '\u5f00', '\u673a', '\uff0c', '\u8ba9', '\u5979', '\u534a', '\u591c', '\u75db', '\u7ecf', '\u7684', '\u65f6', '\u5019', '\uff0c', '\u505a', '\u6076', '\u68a6', '\u7684', '\u65f6', '\u5019', '\uff0c', '\u968f', '\u65f6', '\u53ef', '\u4ee5', '\u8054', '\u7cfb', '\u5230', '\u6211', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u4f1a', '\u7ecf', '\u5e38', '\u5e26', '\u5ab3', '\u5987', '\u51fa', '\u53bb', '\u73a9', '\uff0c', '\u5979', '\u4e0d', '\u4e00', '\u5b9a', '\u8981', '\u548c', '\u6211', '\u6240', '\u6709', '\u7684', '\u54e5', '\u4eec', '\u90fd', '\u8ba4', '\u8bc6', '\uff0c', '\u4f46', '\u89c1', '\u9762', '\u80fd', '\u8bf4', '\u7684', '\u4e0a', '\u8bdd', '\u5c31', '\u884c', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u4f1a', '\u548c', '\u5ab3', '\u5987', '\u7684', '\u59d0', '\u59b9', '\u54e5', '\u4eec', '\u641e', '\u597d', '\u5173', '\u7cfb', '\uff0c', '\u8ba9', '\u5979', '\u4eec', '\u76f8', '\u4fe1', '\u6211', '\u4e00', '\u5b9a', '\u53ef', '\u4ee5', '\u7ed9', '\u6211', '\u5ab3', '\u5987', '\u5e78', '\u798f', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u5435', '\u67b6', '\u540e', '\u3001', '\u4e5f', '\u8981', '\u4e3b', '\u52a8', '\u6253', '\u7535', '\u8bdd', '\u5173', '\u5fc3', '\u5979', '\uff0c', '\u54b1', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u7ed9', '\u5ab3', '\u5987', '\u670d', '\u4e2a', '\u8f6f', '\uff0c', '\u9053', '\u4e2a', '\u6b49', '\u600e', '\u4e48', '\u4e86', '\uff1f'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u7edd', '\u5bf9', '\u4e0d', '\u4f1a', '\u5acc', '\u5f03', '\u81ea', '\u5df1', '\u5ab3', '\u5987', '\uff0c', '\u62ff', '\u5979', '\u548c', '\u522b', '\u4eba', '\u6bd4', '\uff0c', '\u8bf4', '\u5979', '\u8fd9', '\u4e0d', '\u5982', '\u4eba', '\u5bb6', '\uff0c', '\u7eb3', '\u4e0d', '\u5982', '\u4eba', '\u5bb6', '\u7684', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u966a', '\u5ab3', '\u5987', '\u901b', '\u8857', '\u65f6', '\uff0c', '\u78b0', '\u89c1', '\u719f', '\u4eba', '\uff0c', '\u65e0', '\u8bba', '\u6211', '\u5ab3', '\u5987', '\u957f', '\u7684', '\u597d', '\u770b', '\u4e0e', '\u5426', '\uff0c', '\u6211', '\u90fd', '\u4f1a', '\u5927', '\u65b9', '\u7684', '\u4ecb', '\u7ecd', '\u3002'], ['\u8c01', '\u8ba9', '\u54b1', '\u7237', '\u4eec', '\u5c31', '\u597d', '\u8fd9', '\u53e3', '\u5462', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u60f3', '\u6211', '\u4f1a', '\u7ed9', '\u6211', '\u5ab3', '\u5987', '\u6700', '\u597d', '\u7684', '\u5e78', '\u798f', '\u3002'], ['\u3010', '\u6211', '\u4eec', '\u91cd', '\u5728', '\u5206', '\u4eab', '\u3002'], ['\u6240', '\u6709', '\u6587', '\u5b57', '\u548c', '\u7f8e', '\u56fe', '\uff0c', '\u6765', '\u81ea', '\u7f51', '\u7edc', '\uff0c', '\u6668', '\u6b23', '\u6559', '\u80b2', '\u6574', '\u7406', '\u3002'], ['\u5bf9', '\u539f', '\u6587', '\u4f5c', '\u8005', '\uff0c', '\u8868', '\u793a', '\u656c', '\u610f', '\u3002'], ['\u3011', '\u5173', '\u6ce8', '\u6668', '\u66e6', '\u6559', '\u80b2', '[UNK]', '[UNK]', '\u6668', '\u66e6', '\u6559', '\u80b2', '\uff08', '\u5fae', '\u4fe1', '\u53f7', '\uff1a', 'he', '##bc', '##x', '##jy', '\uff09', '\u3002'], ['\u6253', '\u5f00', '\u5fae', '\u4fe1', '\uff0c', '\u626b', '\u63cf', '\u4e8c', '\u7ef4', '\u7801', '\uff0c', '\u5173', '\u6ce8', '[UNK]', '\u6668', '\u66e6', '\u6559', '\u80b2', '[UNK]', '\uff0c', '\u83b7', '\u53d6', '\u66f4', '\u591a', '\u80b2', '\u513f', '\u8d44', '\u6e90', '\u3002'], ['\u70b9', '\u51fb', '\u4e0b', '\u9762', '\u8ba2', '\u9605', '\u6309', '\u94ae', '\u8ba2', '\u9605', '\uff0c', '\u4f1a', '\u6709', '\u66f4', '\u591a', '\u60ca', '\u559c', '\u54e6', '\uff01']]\n while i < len(document): # \u4ece\u6587\u6863\u7684\u7b2c\u4e00\u4e2a\u4f4d\u7f6e\u5f00\u59cb\uff0c\u6309\u4e2a\u5f80\u4e0b\u770b\n segment = document[i] # segment\u662f\u5217\u8868\uff0c\u4ee3\u8868\u7684\u662f\u6309\u5b57\u5206\u5f00\u7684\u4e00\u4e2a\u5b8c\u6574\u53e5\u5b50\uff0c\u5982 segment=['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u60f3', '\u6211', '\u4f1a', '\u7ed9', '\u6211', '\u5ab3', '\u5987', '\u6700', '\u597d', '\u7684', '\u5e78', '\u798f', '\u3002']\n # print(\"###i:\",i,\";segment:\",segment)\n current_chunk.append(segment) # \u5c06\u4e00\u4e2a\u72ec\u7acb\u7684\u53e5\u5b50\u52a0\u5165\u5230\u5f53\u524d\u7684\u6587\u672c\u5757\u4e2d\n current_length += len(segment) # \u7d2f\u8ba1\u5230\u4e3a\u6b62\u4f4d\u7f6e\u63a5\u89e6\u5230\u53e5\u5b50\u7684\u603b\u957f\u5ea6\n if i == len(document) - 1 or current_length >= target_seq_length: # \u5982\u679c\u7d2f\u8ba1\u7684\u5e8f\u5217\u957f\u5ea6\u8fbe\u5230\u4e86\u76ee\u6807\u7684\u957f\u5ea6==>\u6784\u9020\u5e76\u6dfb\u52a0\u5230\u201cA[SEP]B\u201c\u4e2d\u7684A\u548cB\u4e2d\u3002\n if current_chunk: # \u5982\u679c\u5f53\u524d\u5757\u4e0d\u4e3a\u7a7a\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2: # \u5f53\u524d\u5757\uff0c\u5982\u679c\u5305\u542b\u8d85\u8fc7\u4e24\u4e2a\u53e5\u5b50\uff0c\u600e\u53d6\u5f53\u524d\u5757\u7684\u4e00\u90e8\u5206\u4f5c\u4e3a\u201cA[SEP]B\u201c\u4e2d\u7684A\u90e8\u5206\n a_end = rng.randint(1, len(current_chunk) - 1)\n # \u5c06\u5f53\u524d\u6587\u672c\u6bb5\u4e2d\u9009\u53d6\u51fa\u6765\u7684\u524d\u534a\u90e8\u5206\uff0c\u8d4b\u503c\u7ed9A\u5373tokens_a\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n # \u6784\u9020\u201cA[SEP]B\u201c\u4e2d\u7684B\u90e8\u5206(\u539f\u672c\u7684B\u6709\u4e00\u90e8\u5206\u662f\u968f\u673a\u7684\u4ece\u53e6\u4e00\u4e2a\u6587\u6863\u4e2d\u9009\u53d6\u7684\uff0c\u6709\u4e00\u90e8\u5206\u662f\u6b63\u5e38\u7684\u5f53\u524d\u6587\u6863\u4e2d\u7684\u540e\u534a\u90e8\uff09\n tokens_b = []\n # Random next\n is_random_next = False\n if len(current_chunk) == 1 or rng.random() < 0.5: # \u670950%\u7684\u6982\u7387\uff0c\u662f\u4ece\u5176\u4ed6\u6587\u6863\u4e2d\u968f\u673a\u7684\u9009\u53d6\u4e00\u4e2a\u6587\u6863\uff0c\u5e76\u5f97\u5230\u8fd9\u4e2a\u6587\u6863\u7684\u540e\u534a\u7248\u672c\u4f5c\u4e3aB\u5373tokens_b\n is_random_next = True\n target_b_length = target_seq_length - len(tokens_a)\n\n # This should rarely go for more than one iteration for large\n # corpora. However, just to be careful, we try to make sure that\n # the random document is not the same as the document\n # we're processing.\n random_document_index=0\n for _ in range(10): # \u968f\u673a\u7684\u9009\u51fa\u4e00\u4e2a\u4e0e\u5f53\u524d\u7684\u6587\u6863\u4e0d\u4e00\u6837\u7684\u6587\u6863\u7684\u7d22\u5f15\n random_document_index = rng.randint(0, len(all_documents) - 1)\n if random_document_index != document_index:\n break\n\n random_document = all_documents[random_document_index] # \u9009\u51fa\u8fd9\u4e2a\u6587\u6863\n random_start = rng.randint(0, len(random_document) - 1) # \u4ece\u8fd9\u4e2a\u6587\u6863\u9009\u51fa\u4e00\u4e2a\u6bb5\u843d\u7684\u5f00\u59cb\u4f4d\u7f6e\n for j in range(random_start, len(random_document)): # \u4ece\u8fd9\u4e2a\u6587\u6863\u7684\u5f00\u59cb\u4f4d\u7f6e\u5230\u7ed3\u675f\uff0c\u4f5c\u4e3a\u6211\u4eec\u7684\u201cA[SEP]B\u201c\u4e2d\u7684B\u5373tokens_b\n tokens_b.extend(random_document[j])\n if len(tokens_b) >= target_b_length:\n break\n # We didn't actually use these segments so we \"put them back\" so\n # they don't go to waste. \u8fd9\u91cc\u662f\u4e3a\u4e86\u9632\u6b62\u6587\u672c\u7684\u6d6a\u8d39\u7684\u4e00\u4e2a\u5c0f\u6280\u5de7\n num_unused_segments = len(current_chunk) - a_end # e.g. 550-200=350\n i -= num_unused_segments # i=i-num_unused_segments, e.g. i=400, num_unused_segments=350, \u90a3\u4e48 i=i-num_unused_segments=400-350=50\n # Actual next\n else: # \u6709\u53e6\u591650%\u7684\u51e0\u4e4e\uff0c\u4ece\u5f53\u524d\u6587\u672c\u5757\uff08\u957f\u5ea6\u4e3amax_sequence_length\uff09\u4e2d\u7684\u540e\u6bb5\u4e2d\u586b\u5145\u5230tokens_b\u5373\u201cA[SEP]B\u201c\u4e2d\u7684B\u3002\n is_random_next = False\n for j in range(a_end, len(current_chunk)):\n tokens_b.extend(current_chunk[j])\n truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)\n\n assert len(tokens_a) >= 1\n assert len(tokens_b) >= 1\n\n # \u628atokens_a & tokens_b\u52a0\u5165\u5230\u6309\u7167bert\u7684\u98ce\u683c\uff0c\u5373\u4ee5[CLS]tokens_a[SEP]tokens_b[SEP]\u7684\u5f62\u5f0f\uff0c\u7ed3\u5408\u5230\u4e00\u8d77\uff0c\u4f5c\u4e3a\u6700\u7ec8\u7684tokens; \u4e5f\u5e26\u4e0asegment_ids\uff0c\u524d\u9762\u90e8\u5206segment_ids\u7684\u503c\u662f0\uff0c\u540e\u9762\u90e8\u5206\u7684\u503c\u662f1.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n # \u521b\u5efamasked LM\u7684\u4efb\u52a1\u7684\u6570\u636e Creates the predictions for the masked LM objective\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n instance = TrainingInstance( # \u521b\u5efa\u8bad\u7ec3\u5b9e\u4f8b\u7684\u5bf9\u8c61\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n current_chunk = [] # \u6e05\u7a7a\u5f53\u524d\u5757\n current_length = 0 # \u91cd\u7f6e\u5f53\u524d\u6587\u672c\u5757\u7684\u957f\u5ea6\n i += 1 # \u63a5\u7740\u6587\u6863\u4e2d\u7684\u5185\u5bb9\u5f80\u540e\u770b\n\n return instances","function_tokens":["def","create_instances_from_document_original","(","# THIS IS ORIGINAL BERT STYLE FOR CREATE DATA OF MLM AND NEXT SENTENCE PREDICTION TASK","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","document","=","all_documents","[","document_index","]","# \u5f97\u5230\u4e00\u4e2a\u6587\u6863","# Account for [CLS], [SEP], [SEP]","max_num_tokens","=","max_seq_length","-","3","# We *usually* want to fill up the entire sequence since we are padding","# to `max_seq_length` anyways, so short sequences are generally wasted","# computation. However, we *sometimes*","# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter","# sequences to minimize the mismatch between pre-training and fine-tuning.","# The `target_seq_length` is just a rough target however, whereas","# `max_seq_length` is a hard limit.","target_seq_length","=","max_num_tokens","if","rng",".","random","(",")","<","short_seq_prob",":","# \u6709\u4e00\u5b9a\u7684\u6bd4\u4f8b\uff0c\u598210%\u7684\u6982\u7387\uff0c\u6211\u4eec\u4f7f\u7528\u6bd4\u8f83\u77ed\u7684\u5e8f\u5217\u957f\u5ea6\uff0c\u4ee5\u7f13\u89e3\u9884\u8bad\u7ec3\u7684\u957f\u5e8f\u5217\u548c\u8c03\u4f18\u9636\u6bb5\uff08\u53ef\u80fd\u7684\uff09\u77ed\u5e8f\u5217\u7684\u4e0d\u4e00\u81f4\u60c5\u51b5","target_seq_length","=","rng",".","randint","(","2",",","max_num_tokens",")","# We DON'T just concatenate all of the tokens from a document into a long","# sequence and choose an arbitrary split point because this would make the","# next sentence prediction task too easy. Instead, we split the input into","# segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user","# input.","# \u8bbe\u6cd5\u4f7f\u7528\u5b9e\u9645\u7684\u53e5\u5b50\uff0c\u800c\u4e0d\u662f\u4efb\u610f\u7684\u622a\u65ad\u53e5\u5b50\uff0c\u4ece\u800c\u66f4\u597d\u7684\u6784\u9020\u53e5\u5b50\u8fde\u8d2f\u6027\u9884\u6d4b\u7684\u4efb\u52a1","instances","=","[","]","current_chunk","=","[","]","# \u5f53\u524d\u5904\u7406\u7684\u6587\u672c\u6bb5\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50","current_length","=","0","i","=","0","# print(\"###document:\",document) # \u4e00\u4e2adocument\u53ef\u4ee5\u662f\u4e00\u6574\u7bc7\u6587\u7ae0\u3001\u65b0\u95fb\u3001\u4e00\u4e2a\u8bcd\u6761\u7b49. document:[['\u662f', '\u7237', '\u4eec', '\uff0c', '\u5c31', '\u5f97', '\u7ed9', '\u5ab3', '\u5987', '\u5e78', '\u798f'], ['\u5173', '\u6ce8', '\u3010', '\u6668', '\u66e6', '\u6559', '\u80b2', '\u3011', '\uff0c', '\u83b7', '\u53d6', '\u80b2', '\u513f', '\u7684', '\u667a', '\u6167', '\uff0c', '\u4e0e', '\u5b69', '\u5b50', '\u4e00', '\u540c', '\u6210', '\u957f', '\uff01'], ['\u65b9', '\u6cd5', ':', '\u6253', '\u5f00', '\u5fae', '\u4fe1', '\u2192', '\u6dfb', '\u52a0', '\u670b', '\u53cb', '\u2192', '\u641c', '\u53f7', '\u2192', '##he', '##bc', '##x', '##jy', '##\u2192', '\u5173', '\u6ce8', '!', '\u6211', '\u662f', '\u4e00', '\u4e2a', '\u7237', '\u4eec', '\uff0c', '\u5b5d', '\u987a', '\u662f', '\u505a', '\u4eba', '\u7684', '\u7b2c', '\u4e00', '\u51c6', '\u5219', '\u3002'], ['\u752d', '\u7ba1', '\u5c0f', '\u65f6', '\u5019', '\u600e', '\u4e48', '\u8ddf', '\u5bb6', '\u957f', '\u72af', '\u6df7', '\u86cb', '\uff0c', '\u957f', '\u5927', '\u4e86', '\uff0c', '\u5c31', '\u5e95', '\u62a5', '\u7b54', '\u7236', '\u6bcd', '\uff0c', '\u4ee5', '\u540e', '\u6211', '\u5ab3', '\u5987', '\u4e5f', '\u5fc5', '\u987b', '\u5b5d', '\u987a', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u4e2a', '\u7237', '\u4eec', '\uff0c', '\u53ef', '\u4ee5', '\u82b1', '\u5fc3', '\uff0c', '\u53ef', '\u4ee5', '\u597d', '\u73a9', '\u3002'], ['\u4f46', '\u6211', '\u4e00', '\u5b9a', '\u4f1a', '\u627e', '\u4e00', '\u4e2a', '\u7ba1', '\u7684', '\u4f4f', '\u6211', '\u7684', '\u5973', '\u4eba', '\uff0c', '\u548c', '\u6211', '\u4e00', '\u8d77', '\u751f', '\u6d3b', '\u3002'], ['28', '\u5c81', '\u4ee5', '\u524d', '\u5728', '\u600e', '\u4e48', '\u73a9', '\u90fd', '\u884c', '\uff0c', '\u4f46', '\u6211', '\u6700', '\u540e', '\u4e00', '\u5b9a', '\u4f1a', '\u627e', '\u4e00', '\u4e2a', '\u52e4', '\u4fed', '\u6301', '\u5bb6', '\u7684', '\u5973', '\u4eba', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u4e0d', '\u4f1a', '\u8ba9', '\u81ea', '\u5df1', '\u7684', '\u5973', '\u4eba', '\u53d7', '\u4e00', '\u70b9', '\u59d4', '\u5c48', '\uff0c', '\u6bcf', '\u6b21', '\u628a', '\u5979', '\u62b1', '\u5728', '\u6000', '\u91cc', '\uff0c', '\u770b', '\u5979', '\u6d0b', '\u6ea2', '\u7740', '\u5e78', '\u798f', '\u7684', '\u8138', '\uff0c', '\u6211', '\u90fd', '\u4f1a', '\u5f15', '\u4ee5', '\u4e3a', '\u50b2', '\uff0c', '\u8fd9', '\u7279', '\u4e48', '\u5c31', '\u662f', '\u6211', '\u7684', '\u5973', '\u4eba', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u5e72', '\u4ec0', '\u4e48', '\u4e5f', '\u4e0d', '\u80fd', '\u5fd8', '\u4e86', '\u81ea', '\u5df1', '\u5ab3', '\u5987', '\uff0c', '\u5c31', '\u7b97', '\u548c', '\u54e5', '\u4eec', '\u4e00', '\u8d77', '\u559d', '\u9152', '\uff0c', '\u559d', '\u5230', '\u5f88', '\u665a', '\uff0c', '\u4e5f', '\u8981', '\u63d0', '\u524d', '\u6253', '\u7535', '\u8bdd', '\u544a', '\u8bc9', '\u5979', '\uff0c', '\u8ba9', '\u5979', '\u65e9', '\u70b9', '\u4f11', '\u606f', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u5ab3', '\u5987', '\u7edd', '\u5bf9', '\u4e0d', '\u80fd', '\u62bd', '\u70df', '\uff0c', '\u559d', '\u9152', '\u8fd8', '\u52c9', '\u5f3a', '\u8fc7', '\u5f97', '\u53bb', '\uff0c', '\u4e0d', '\u8fc7', '\u8be5', '\u559d', '\u7684', '\u65f6', '\u5019', '\u559d', '\uff0c', '\u4e0d', '\u8be5', '\u559d', '\u7684', '\u65f6', '\u5019', '\uff0c', '\u5c11', '\u626f', '\u7eb3', '\u6781', '\u8584', '\u86cb', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u5ab3', '\u5987', '\u5fc5', '\u987b', '\u542c', '\u6211', '\u8bdd', '\uff0c', '\u5728', '\u4eba', '\u524d', '\u4e00', '\u5b9a', '\u8981', '\u7ed9', '\u6211', '\u9762', '\u5b50', '\uff0c', '\u56de', '\u5bb6', '\u4e86', '\u54b1', '\u4ec0', '\u4e48', '\u90fd', '\u597d', '\u8bf4', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u5c31', '\u7b97', '\u96be', '\u7684', '\u5403', '\u4e0d', '\u4e0a', '\u996d', '\u4e86', '\uff0c', '\u90fd', '\u4e0d', '\u5f20', '\u53e3', '\u8ddf', '\u5ab3', '\u5987', '\u8981', '\u4e00', '\u5206', '\u94b1', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4e0d', '\u7ba1', '\u4e0a', '\u5b66', '\u8fd8', '\u662f', '\u4e0a', '\u73ed', '\uff0c', '\u6211', '\u90fd', '\u4f1a', '\u9001', '\u5ab3', '\u5987', '\u56de', '\u5bb6', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4ea4', '\u5f80', '\u4e0d', '\u5230', '1', '\u5e74', '\uff0c', '\u7edd', '\u5bf9', '\u4e0d', '\u4f1a', '\u548c', '\u5ab3', '\u5987', '\u63d0', '\u8fc7', '\u5206', '\u7684', '\u8981', '\u6c42', '\uff0c', '\u6211', '\u4f1a', '\u5c0a', '\u91cd', '\u5979', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6e38', '\u620f', '\u6c38', '\u8fdc', '\u6bd4', '\u4e0d', '\u4e0a', '\u6211', '\u5ab3', '\u5987', '\u91cd', '\u8981', '\uff0c', '\u53ea', '\u8981', '\u5ab3', '\u5987', '\u53d1', '\u8bdd', '\uff0c', '\u6211', '\u7edd', '\u5bf9', '\u552f', '\u547d', '\u662f', '\u4ece', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4e0a', 'q', '\u7edd', '\u5bf9', '\u662f', '\u4e3a', '\u4e86', '\u7b49', '\u5ab3', '\u5987', '\uff0c', '\u6240', '\u6709', '\u66a7', '\u6627', '\u7684', '\u5fc3', '\u60c5', '\u53ea', '\u4e3a', '\u5979', '\u4e00', '\u4e2a', '\u5973', '\u4eba', '\u800c', '\u5199', '\uff0c', '\u6211', '\u4e0d', '\u4e00', '\u5b9a', '\u4f1a', '\u7ecf', '\u5e38', '\u5199', '\u65e5', '\u5fd7', '\uff0c', '\u53ef', '\u662f', '\u6211', '\u4f1a', '\u544a', '\u8bc9', '\u5168', '\u4e16', '\u754c', '\uff0c', '\u6211', '\u5f88', '\u7231', '\u5979', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u4e0d', '\u4e00', '\u5b9a', '\u8981', '\u7ecf', '\u5e38', '\u5236', '\u9020', '\u6d6a', '\u6f2b', '\u3001', '\u5076', '\u5c14', '\u8fc7', '\u4e2a', '\u8282', '\u65e5', '\u4e5f', '\u8981', '\u9001', '\u675f', '\u73ab', '\u7470', '\u82b1', '\u7ed9', '\u5ab3', '\u5987', '\u62b1', '\u56de', '\u5bb6', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u624b', '\u673a', '\u4f1a', '24', '\u5c0f', '\u65f6', '\u4e3a', '\u5979', '\u5f00', '\u673a', '\uff0c', '\u8ba9', '\u5979', '\u534a', '\u591c', '\u75db', '\u7ecf', '\u7684', '\u65f6', '\u5019', '\uff0c', '\u505a', '\u6076', '\u68a6', '\u7684', '\u65f6', '\u5019', '\uff0c', '\u968f', '\u65f6', '\u53ef', '\u4ee5', '\u8054', '\u7cfb', '\u5230', '\u6211', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u4f1a', '\u7ecf', '\u5e38', '\u5e26', '\u5ab3', '\u5987', '\u51fa', '\u53bb', '\u73a9', '\uff0c', '\u5979', '\u4e0d', '\u4e00', '\u5b9a', '\u8981', '\u548c', '\u6211', '\u6240', '\u6709', '\u7684', '\u54e5', '\u4eec', '\u90fd', '\u8ba4', '\u8bc6', '\uff0c', '\u4f46', '\u89c1', '\u9762', '\u80fd', '\u8bf4', '\u7684', '\u4e0a', '\u8bdd', '\u5c31', '\u884c', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u4f1a', '\u548c', '\u5ab3', '\u5987', '\u7684', '\u59d0', '\u59b9', '\u54e5', '\u4eec', '\u641e', '\u597d', '\u5173', '\u7cfb', '\uff0c', '\u8ba9', '\u5979', '\u4eec', '\u76f8', '\u4fe1', '\u6211', '\u4e00', '\u5b9a', '\u53ef', '\u4ee5', '\u7ed9', '\u6211', '\u5ab3', '\u5987', '\u5e78', '\u798f', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u5435', '\u67b6', '\u540e', '\u3001', '\u4e5f', '\u8981', '\u4e3b', '\u52a8', '\u6253', '\u7535', '\u8bdd', '\u5173', '\u5fc3', '\u5979', '\uff0c', '\u54b1', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u7ed9', '\u5ab3', '\u5987', '\u670d', '\u4e2a', '\u8f6f', '\uff0c', '\u9053', '\u4e2a', '\u6b49', '\u600e', '\u4e48', '\u4e86', '\uff1f'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u7edd', '\u5bf9', '\u4e0d', '\u4f1a', '\u5acc', '\u5f03', '\u81ea', '\u5df1', '\u5ab3', '\u5987', '\uff0c', '\u62ff', '\u5979', '\u548c', '\u522b', '\u4eba', '\u6bd4', '\uff0c', '\u8bf4', '\u5979', '\u8fd9', '\u4e0d', '\u5982', '\u4eba', '\u5bb6', '\uff0c', '\u7eb3', '\u4e0d', '\u5982', '\u4eba', '\u5bb6', '\u7684', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u966a', '\u5ab3', '\u5987', '\u901b', '\u8857', '\u65f6', '\uff0c', '\u78b0', '\u89c1', '\u719f', '\u4eba', '\uff0c', '\u65e0', '\u8bba', '\u6211', '\u5ab3', '\u5987', '\u957f', '\u7684', '\u597d', '\u770b', '\u4e0e', '\u5426', '\uff0c', '\u6211', '\u90fd', '\u4f1a', '\u5927', '\u65b9', '\u7684', '\u4ecb', '\u7ecd', '\u3002'], ['\u8c01', '\u8ba9', '\u54b1', '\u7237', '\u4eec', '\u5c31', '\u597d', '\u8fd9', '\u53e3', '\u5462', '\u3002'], ['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u60f3', '\u6211', '\u4f1a', '\u7ed9', '\u6211', '\u5ab3', '\u5987', '\u6700', '\u597d', '\u7684', '\u5e78', '\u798f', '\u3002'], ['\u3010', '\u6211', '\u4eec', '\u91cd', '\u5728', '\u5206', '\u4eab', '\u3002'], ['\u6240', '\u6709', '\u6587', '\u5b57', '\u548c', '\u7f8e', '\u56fe', '\uff0c', '\u6765', '\u81ea', '\u7f51', '\u7edc', '\uff0c', '\u6668', '\u6b23', '\u6559', '\u80b2', '\u6574', '\u7406', '\u3002'], ['\u5bf9', '\u539f', '\u6587', '\u4f5c', '\u8005', '\uff0c', '\u8868', '\u793a', '\u656c', '\u610f', '\u3002'], ['\u3011', '\u5173', '\u6ce8', '\u6668', '\u66e6', '\u6559', '\u80b2', '[UNK]', '[UNK]', '\u6668', '\u66e6', '\u6559', '\u80b2', '\uff08', '\u5fae', '\u4fe1', '\u53f7', '\uff1a', 'he', '##bc', '##x', '##jy', '\uff09', '\u3002'], ['\u6253', '\u5f00', '\u5fae', '\u4fe1', '\uff0c', '\u626b', '\u63cf', '\u4e8c', '\u7ef4', '\u7801', '\uff0c', '\u5173', '\u6ce8', '[UNK]', '\u6668', '\u66e6', '\u6559', '\u80b2', '[UNK]', '\uff0c', '\u83b7', '\u53d6', '\u66f4', '\u591a', '\u80b2', '\u513f', '\u8d44', '\u6e90', '\u3002'], ['\u70b9', '\u51fb', '\u4e0b', '\u9762', '\u8ba2', '\u9605', '\u6309', '\u94ae', '\u8ba2', '\u9605', '\uff0c', '\u4f1a', '\u6709', '\u66f4', '\u591a', '\u60ca', '\u559c', '\u54e6', '\uff01']]","while","i","<","len","(","document",")",":","# \u4ece\u6587\u6863\u7684\u7b2c\u4e00\u4e2a\u4f4d\u7f6e\u5f00\u59cb\uff0c\u6309\u4e2a\u5f80\u4e0b\u770b","segment","=","document","[","i","]","# segment\u662f\u5217\u8868\uff0c\u4ee3\u8868\u7684\u662f\u6309\u5b57\u5206\u5f00\u7684\u4e00\u4e2a\u5b8c\u6574\u53e5\u5b50\uff0c\u5982 segment=['\u6211', '\u662f', '\u4e00', '\u7237', '\u4eec', '\uff0c', '\u6211', '\u60f3', '\u6211', '\u4f1a', '\u7ed9', '\u6211', '\u5ab3', '\u5987', '\u6700', '\u597d', '\u7684', '\u5e78', '\u798f', '\u3002']","# print(\"###i:\",i,\";segment:\",segment)","current_chunk",".","append","(","segment",")","# \u5c06\u4e00\u4e2a\u72ec\u7acb\u7684\u53e5\u5b50\u52a0\u5165\u5230\u5f53\u524d\u7684\u6587\u672c\u5757\u4e2d","current_length","+=","len","(","segment",")","# \u7d2f\u8ba1\u5230\u4e3a\u6b62\u4f4d\u7f6e\u63a5\u89e6\u5230\u53e5\u5b50\u7684\u603b\u957f\u5ea6","if","i","==","len","(","document",")","-","1","or","current_length",">=","target_seq_length",":","# \u5982\u679c\u7d2f\u8ba1\u7684\u5e8f\u5217\u957f\u5ea6\u8fbe\u5230\u4e86\u76ee\u6807\u7684\u957f\u5ea6==>\u6784\u9020\u5e76\u6dfb\u52a0\u5230\u201cA[SEP]B\u201c\u4e2d\u7684A\u548cB\u4e2d\u3002","if","current_chunk",":","# \u5982\u679c\u5f53\u524d\u5757\u4e0d\u4e3a\u7a7a","# `a_end` is how many segments from `current_chunk` go into the `A`","# (first) sentence.","a_end","=","1","if","len","(","current_chunk",")",">=","2",":","# \u5f53\u524d\u5757\uff0c\u5982\u679c\u5305\u542b\u8d85\u8fc7\u4e24\u4e2a\u53e5\u5b50\uff0c\u600e\u53d6\u5f53\u524d\u5757\u7684\u4e00\u90e8\u5206\u4f5c\u4e3a\u201cA[SEP]B\u201c\u4e2d\u7684A\u90e8\u5206","a_end","=","rng",".","randint","(","1",",","len","(","current_chunk",")","-","1",")","# \u5c06\u5f53\u524d\u6587\u672c\u6bb5\u4e2d\u9009\u53d6\u51fa\u6765\u7684\u524d\u534a\u90e8\u5206\uff0c\u8d4b\u503c\u7ed9A\u5373tokens_a","tokens_a","=","[","]","for","j","in","range","(","a_end",")",":","tokens_a",".","extend","(","current_chunk","[","j","]",")","# \u6784\u9020\u201cA[SEP]B\u201c\u4e2d\u7684B\u90e8\u5206(\u539f\u672c\u7684B\u6709\u4e00\u90e8\u5206\u662f\u968f\u673a\u7684\u4ece\u53e6\u4e00\u4e2a\u6587\u6863\u4e2d\u9009\u53d6\u7684\uff0c\u6709\u4e00\u90e8\u5206\u662f\u6b63\u5e38\u7684\u5f53\u524d\u6587\u6863\u4e2d\u7684\u540e\u534a\u90e8\uff09","tokens_b","=","[","]","# Random next","is_random_next","=","False","if","len","(","current_chunk",")","==","1","or","rng",".","random","(",")","<","0.5",":","# \u670950%\u7684\u6982\u7387\uff0c\u662f\u4ece\u5176\u4ed6\u6587\u6863\u4e2d\u968f\u673a\u7684\u9009\u53d6\u4e00\u4e2a\u6587\u6863\uff0c\u5e76\u5f97\u5230\u8fd9\u4e2a\u6587\u6863\u7684\u540e\u534a\u7248\u672c\u4f5c\u4e3aB\u5373tokens_b","is_random_next","=","True","target_b_length","=","target_seq_length","-","len","(","tokens_a",")","# This should rarely go for more than one iteration for large","# corpora. However, just to be careful, we try to make sure that","# the random document is not the same as the document","# we're processing.","random_document_index","=","0","for","_","in","range","(","10",")",":","# \u968f\u673a\u7684\u9009\u51fa\u4e00\u4e2a\u4e0e\u5f53\u524d\u7684\u6587\u6863\u4e0d\u4e00\u6837\u7684\u6587\u6863\u7684\u7d22\u5f15","random_document_index","=","rng",".","randint","(","0",",","len","(","all_documents",")","-","1",")","if","random_document_index","!=","document_index",":","break","random_document","=","all_documents","[","random_document_index","]","# \u9009\u51fa\u8fd9\u4e2a\u6587\u6863","random_start","=","rng",".","randint","(","0",",","len","(","random_document",")","-","1",")","# \u4ece\u8fd9\u4e2a\u6587\u6863\u9009\u51fa\u4e00\u4e2a\u6bb5\u843d\u7684\u5f00\u59cb\u4f4d\u7f6e","for","j","in","range","(","random_start",",","len","(","random_document",")",")",":","# \u4ece\u8fd9\u4e2a\u6587\u6863\u7684\u5f00\u59cb\u4f4d\u7f6e\u5230\u7ed3\u675f\uff0c\u4f5c\u4e3a\u6211\u4eec\u7684\u201cA[SEP]B\u201c\u4e2d\u7684B\u5373tokens_b","tokens_b",".","extend","(","random_document","[","j","]",")","if","len","(","tokens_b",")",">=","target_b_length",":","break","# We didn't actually use these segments so we \"put them back\" so","# they don't go to waste. \u8fd9\u91cc\u662f\u4e3a\u4e86\u9632\u6b62\u6587\u672c\u7684\u6d6a\u8d39\u7684\u4e00\u4e2a\u5c0f\u6280\u5de7","num_unused_segments","=","len","(","current_chunk",")","-","a_end","# e.g. 550-200=350","i","-=","num_unused_segments","# i=i-num_unused_segments, e.g. i=400, num_unused_segments=350, \u90a3\u4e48 i=i-num_unused_segments=400-350=50","# Actual next","else",":","# \u6709\u53e6\u591650%\u7684\u51e0\u4e4e\uff0c\u4ece\u5f53\u524d\u6587\u672c\u5757\uff08\u957f\u5ea6\u4e3amax_sequence_length\uff09\u4e2d\u7684\u540e\u6bb5\u4e2d\u586b\u5145\u5230tokens_b\u5373\u201cA[SEP]B\u201c\u4e2d\u7684B\u3002","is_random_next","=","False","for","j","in","range","(","a_end",",","len","(","current_chunk",")",")",":","tokens_b",".","extend","(","current_chunk","[","j","]",")","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")","assert","len","(","tokens_a",")",">=","1","assert","len","(","tokens_b",")",">=","1","# \u628atokens_a & tokens_b\u52a0\u5165\u5230\u6309\u7167bert\u7684\u98ce\u683c\uff0c\u5373\u4ee5[CLS]tokens_a[SEP]tokens_b[SEP]\u7684\u5f62\u5f0f\uff0c\u7ed3\u5408\u5230\u4e00\u8d77\uff0c\u4f5c\u4e3a\u6700\u7ec8\u7684tokens; \u4e5f\u5e26\u4e0asegment_ids\uff0c\u524d\u9762\u90e8\u5206segment_ids\u7684\u503c\u662f0\uff0c\u540e\u9762\u90e8\u5206\u7684\u503c\u662f1.","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","# \u521b\u5efamasked LM\u7684\u4efb\u52a1\u7684\u6570\u636e Creates the predictions for the masked LM objective","(","tokens",",","masked_lm_positions",",","masked_lm_labels",")","=","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")","instance","=","TrainingInstance","(","# \u521b\u5efa\u8bad\u7ec3\u5b9e\u4f8b\u7684\u5bf9\u8c61","tokens","=","tokens",",","segment_ids","=","segment_ids",",","is_random_next","=","is_random_next",",","masked_lm_positions","=","masked_lm_positions",",","masked_lm_labels","=","masked_lm_labels",")","instances",".","append","(","instance",")","current_chunk","=","[","]","# \u6e05\u7a7a\u5f53\u524d\u5757","current_length","=","0","# \u91cd\u7f6e\u5f53\u524d\u6587\u672c\u5757\u7684\u957f\u5ea6","i","+=","1","# \u63a5\u7740\u6587\u6863\u4e2d\u7684\u5185\u5bb9\u5f80\u540e\u770b","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/create_pretraining_data.py#L372-L491"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/create_pretraining_data.py","language":"python","identifier":"create_masked_lm_predictions","parameters":"(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return (output_tokens, masked_lm_positions, masked_lm_labels)","docstring":"Creates the predictions for the masked LM objective.","docstring_summary":"Creates the predictions for the masked LM objective.","docstring_tokens":["Creates","the","predictions","for","the","masked","LM","objective","."],"function":"def create_masked_lm_predictions(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n continue\n # Whole Word Masking means that if we mask all of the wordpieces\n # corresponding to an original word. When a word has been split into\n # WordPieces, the first token does not have any marker and any subsequence\n # tokens are prefixed with ##. So whenever we see the ## token, we\n # append it to the previous set of word indexes.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each WordPiece independently, softmaxed\n # over the entire vocabulary.\n if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and\n token.startswith(\"##\")):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n rng.shuffle(cand_indexes)\n\n if FLAGS.non_chinese==False: # if non chinese is False, that means it is chinese, then try to remove \"##\" which is added previously\n output_tokens = [t[2:] if len(re.findall('##[\\u4E00-\\u9FA5]', t)) > 0 else t for t in tokens] # \u53bb\u6389\"##\"\n else: # english and other language, which is not chinese\n output_tokens = list(tokens)\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n if FLAGS.non_chinese == False: # if non chinese is False, that means it is chinese, then try to remove \"##\" which is added previously\n masked_token = tokens[index][2:] if len(re.findall('##[\\u4E00-\\u9FA5]', tokens[index])) > 0 else tokens[index] # \u53bb\u6389\"##\"\n else:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n assert len(masked_lms) <= num_to_predict\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n # tf.logging.info('%s' % (tokens))\n # tf.logging.info('%s' % (output_tokens))\n return (output_tokens, masked_lm_positions, masked_lm_labels)","function_tokens":["def","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","cand_indexes","=","[","]","for","(","i",",","token",")","in","enumerate","(","tokens",")",":","if","token","==","\"[CLS]\"","or","token","==","\"[SEP]\"",":","continue","# Whole Word Masking means that if we mask all of the wordpieces","# corresponding to an original word. When a word has been split into","# WordPieces, the first token does not have any marker and any subsequence","# tokens are prefixed with ##. So whenever we see the ## token, we","# append it to the previous set of word indexes.","#","# Note that Whole Word Masking does *not* change the training code","# at all -- we still predict each WordPiece independently, softmaxed","# over the entire vocabulary.","if","(","FLAGS",".","do_whole_word_mask","and","len","(","cand_indexes",")",">=","1","and","token",".","startswith","(","\"##\"",")",")",":","cand_indexes","[","-","1","]",".","append","(","i",")","else",":","cand_indexes",".","append","(","[","i","]",")","rng",".","shuffle","(","cand_indexes",")","if","FLAGS",".","non_chinese","==","False",":","# if non chinese is False, that means it is chinese, then try to remove \"##\" which is added previously","output_tokens","=","[","t","[","2",":","]","if","len","(","re",".","findall","(","'##[\\u4E00-\\u9FA5]'",",","t",")",")",">","0","else","t","for","t","in","tokens","]","# \u53bb\u6389\"##\"","else",":","# english and other language, which is not chinese","output_tokens","=","list","(","tokens",")","num_to_predict","=","min","(","max_predictions_per_seq",",","max","(","1",",","int","(","round","(","len","(","tokens",")","*","masked_lm_prob",")",")",")",")","masked_lms","=","[","]","covered_indexes","=","set","(",")","for","index_set","in","cand_indexes",":","if","len","(","masked_lms",")",">=","num_to_predict",":","break","# If adding a whole-word mask would exceed the maximum number of","# predictions, then just skip this candidate.","if","len","(","masked_lms",")","+","len","(","index_set",")",">","num_to_predict",":","continue","is_any_index_covered","=","False","for","index","in","index_set",":","if","index","in","covered_indexes",":","is_any_index_covered","=","True","break","if","is_any_index_covered",":","continue","for","index","in","index_set",":","covered_indexes",".","add","(","index",")","masked_token","=","None","# 80% of the time, replace with [MASK]","if","rng",".","random","(",")","<","0.8",":","masked_token","=","\"[MASK]\"","else",":","# 10% of the time, keep original","if","rng",".","random","(",")","<","0.5",":","if","FLAGS",".","non_chinese","==","False",":","# if non chinese is False, that means it is chinese, then try to remove \"##\" which is added previously","masked_token","=","tokens","[","index","]","[","2",":","]","if","len","(","re",".","findall","(","'##[\\u4E00-\\u9FA5]'",",","tokens","[","index","]",")",")",">","0","else","tokens","[","index","]","# \u53bb\u6389\"##\"","else",":","masked_token","=","tokens","[","index","]","# 10% of the time, replace with random word","else",":","masked_token","=","vocab_words","[","rng",".","randint","(","0",",","len","(","vocab_words",")","-","1",")","]","output_tokens","[","index","]","=","masked_token","masked_lms",".","append","(","MaskedLmInstance","(","index","=","index",",","label","=","tokens","[","index","]",")",")","assert","len","(","masked_lms",")","<=","num_to_predict","masked_lms","=","sorted","(","masked_lms",",","key","=","lambda","x",":","x",".","index",")","masked_lm_positions","=","[","]","masked_lm_labels","=","[","]","for","p","in","masked_lms",":","masked_lm_positions",".","append","(","p",".","index",")","masked_lm_labels",".","append","(","p",".","label",")","# tf.logging.info('%s' % (tokens))","# tf.logging.info('%s' % (output_tokens))","return","(","output_tokens",",","masked_lm_positions",",","masked_lm_labels",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/create_pretraining_data.py#L498-L579"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/create_pretraining_data.py","language":"python","identifier":"create_masked_lm_predictions_original","parameters":"(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return (output_tokens, masked_lm_positions, masked_lm_labels)","docstring":"Creates the predictions for the masked LM objective.","docstring_summary":"Creates the predictions for the masked LM objective.","docstring_tokens":["Creates","the","predictions","for","the","masked","LM","objective","."],"function":"def create_masked_lm_predictions_original(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n continue\n # Whole Word Masking means that if we mask all of the wordpieces\n # corresponding to an original word. When a word has been split into\n # WordPieces, the first token does not have any marker and any subsequence\n # tokens are prefixed with ##. So whenever we see the ## token, we\n # append it to the previous set of word indexes.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each WordPiece independently, softmaxed\n # over the entire vocabulary.\n if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and\n token.startswith(\"##\")):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n rng.shuffle(cand_indexes)\n\n output_tokens = list(tokens)\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n assert len(masked_lms) <= num_to_predict\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)","function_tokens":["def","create_masked_lm_predictions_original","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","cand_indexes","=","[","]","for","(","i",",","token",")","in","enumerate","(","tokens",")",":","if","token","==","\"[CLS]\"","or","token","==","\"[SEP]\"",":","continue","# Whole Word Masking means that if we mask all of the wordpieces","# corresponding to an original word. When a word has been split into","# WordPieces, the first token does not have any marker and any subsequence","# tokens are prefixed with ##. So whenever we see the ## token, we","# append it to the previous set of word indexes.","#","# Note that Whole Word Masking does *not* change the training code","# at all -- we still predict each WordPiece independently, softmaxed","# over the entire vocabulary.","if","(","FLAGS",".","do_whole_word_mask","and","len","(","cand_indexes",")",">=","1","and","token",".","startswith","(","\"##\"",")",")",":","cand_indexes","[","-","1","]",".","append","(","i",")","else",":","cand_indexes",".","append","(","[","i","]",")","rng",".","shuffle","(","cand_indexes",")","output_tokens","=","list","(","tokens",")","num_to_predict","=","min","(","max_predictions_per_seq",",","max","(","1",",","int","(","round","(","len","(","tokens",")","*","masked_lm_prob",")",")",")",")","masked_lms","=","[","]","covered_indexes","=","set","(",")","for","index_set","in","cand_indexes",":","if","len","(","masked_lms",")",">=","num_to_predict",":","break","# If adding a whole-word mask would exceed the maximum number of","# predictions, then just skip this candidate.","if","len","(","masked_lms",")","+","len","(","index_set",")",">","num_to_predict",":","continue","is_any_index_covered","=","False","for","index","in","index_set",":","if","index","in","covered_indexes",":","is_any_index_covered","=","True","break","if","is_any_index_covered",":","continue","for","index","in","index_set",":","covered_indexes",".","add","(","index",")","masked_token","=","None","# 80% of the time, replace with [MASK]","if","rng",".","random","(",")","<","0.8",":","masked_token","=","\"[MASK]\"","else",":","# 10% of the time, keep original","if","rng",".","random","(",")","<","0.5",":","masked_token","=","tokens","[","index","]","# 10% of the time, replace with random word","else",":","masked_token","=","vocab_words","[","rng",".","randint","(","0",",","len","(","vocab_words",")","-","1",")","]","output_tokens","[","index","]","=","masked_token","masked_lms",".","append","(","MaskedLmInstance","(","index","=","index",",","label","=","tokens","[","index","]",")",")","assert","len","(","masked_lms",")","<=","num_to_predict","masked_lms","=","sorted","(","masked_lms",",","key","=","lambda","x",":","x",".","index",")","masked_lm_positions","=","[","]","masked_lm_labels","=","[","]","for","p","in","masked_lms",":","masked_lm_positions",".","append","(","p",".","index",")","masked_lm_labels",".","append","(","p",".","label",")","return","(","output_tokens",",","masked_lm_positions",",","masked_lm_labels",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/create_pretraining_data.py#L581-L654"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/create_pretraining_data.py","language":"python","identifier":"truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_num_tokens, rng)","argument_list":"","return_statement":"","docstring":"Truncates a pair of sequences to a maximum sequence length.","docstring_summary":"Truncates a pair of sequences to a maximum sequence length.","docstring_tokens":["Truncates","a","pair","of","sequences","to","a","maximum","sequence","length","."],"function":"def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):\n \"\"\"Truncates a pair of sequences to a maximum sequence length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()","function_tokens":["def","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")",":","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_num_tokens",":","break","trunc_tokens","=","tokens_a","if","len","(","tokens_a",")",">","len","(","tokens_b",")","else","tokens_b","assert","len","(","trunc_tokens",")",">=","1","# We want to sometimes truncate from the front and sometimes from the","# back to add more randomness and avoid biases.","if","rng",".","random","(",")","<","0.5",":","del","trunc_tokens","[","0","]","else",":","trunc_tokens",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/create_pretraining_data.py#L657-L672"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/bert_utils.py","language":"python","identifier":"get_shape_list","parameters":"(tensor, expected_rank=None, name=None)","argument_list":"","return_statement":"return shape","docstring":"Returns a list of the shape of tensor, preferring static dimensions.\n\n\tArgs:\n\t\ttensor: A tf.Tensor object to find the shape of.\n\t\texpected_rank: (optional) int. The expected rank of `tensor`. If this is\n\t\t\tspecified and the `tensor` has a different rank, and exception will be\n\t\t\tthrown.\n\t\tname: Optional name of the tensor for the error message.\n\n\tReturns:\n\t\tA list of dimensions of the shape of tensor. All static dimensions will\n\t\tbe returned as python integers, and dynamic dimensions will be returned\n\t\tas tf.Tensor scalars.","docstring_summary":"Returns a list of the shape of tensor, preferring static dimensions.","docstring_tokens":["Returns","a","list","of","the","shape","of","tensor","preferring","static","dimensions","."],"function":"def get_shape_list(tensor, expected_rank=None, name=None):\n\t\"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n\tArgs:\n\t\ttensor: A tf.Tensor object to find the shape of.\n\t\texpected_rank: (optional) int. The expected rank of `tensor`. If this is\n\t\t\tspecified and the `tensor` has a different rank, and exception will be\n\t\t\tthrown.\n\t\tname: Optional name of the tensor for the error message.\n\n\tReturns:\n\t\tA list of dimensions of the shape of tensor. All static dimensions will\n\t\tbe returned as python integers, and dynamic dimensions will be returned\n\t\tas tf.Tensor scalars.\n\t\"\"\"\n\tif name is None:\n\t\tname = tensor.name\n\n\tif expected_rank is not None:\n\t\tassert_rank(tensor, expected_rank, name)\n\n\tshape = tensor.shape.as_list()\n\n\tnon_static_indexes = []\n\tfor (index, dim) in enumerate(shape):\n\t\tif dim is None:\n\t\t\tnon_static_indexes.append(index)\n\n\tif not non_static_indexes:\n\t\treturn shape\n\n\tdyn_shape = tf.shape(tensor)\n\tfor index in non_static_indexes:\n\t\tshape[index] = dyn_shape[index]\n\treturn shape","function_tokens":["def","get_shape_list","(","tensor",",","expected_rank","=","None",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","if","expected_rank","is","not","None",":","assert_rank","(","tensor",",","expected_rank",",","name",")","shape","=","tensor",".","shape",".","as_list","(",")","non_static_indexes","=","[","]","for","(","index",",","dim",")","in","enumerate","(","shape",")",":","if","dim","is","None",":","non_static_indexes",".","append","(","index",")","if","not","non_static_indexes",":","return","shape","dyn_shape","=","tf",".","shape","(","tensor",")","for","index","in","non_static_indexes",":","shape","[","index","]","=","dyn_shape","[","index","]","return","shape"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/bert_utils.py#L13-L47"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/bert_utils.py","language":"python","identifier":"reshape_to_matrix","parameters":"(input_tensor)","argument_list":"","return_statement":"return output_tensor","docstring":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_summary":"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).","docstring_tokens":["Reshapes","a",">","=","rank","2","tensor","to","a","rank","2","tensor","(","i",".","e",".","a","matrix",")","."],"function":"def reshape_to_matrix(input_tensor):\n\t\"\"\"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).\"\"\"\n\tndims = input_tensor.shape.ndims\n\tif ndims < 2:\n\t\traise ValueError(\"Input tensor must have at least rank 2. Shape = %s\" %\n\t\t\t\t\t\t\t\t\t\t (input_tensor.shape))\n\tif ndims == 2:\n\t\treturn input_tensor\n\n\twidth = input_tensor.shape[-1]\n\toutput_tensor = tf.reshape(input_tensor, [-1, width])\n\treturn output_tensor","function_tokens":["def","reshape_to_matrix","(","input_tensor",")",":","ndims","=","input_tensor",".","shape",".","ndims","if","ndims","<","2",":","raise","ValueError","(","\"Input tensor must have at least rank 2. Shape = %s\"","%","(","input_tensor",".","shape",")",")","if","ndims","==","2",":","return","input_tensor","width","=","input_tensor",".","shape","[","-","1","]","output_tensor","=","tf",".","reshape","(","input_tensor",",","[","-","1",",","width","]",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/bert_utils.py#L49-L60"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/bert_utils.py","language":"python","identifier":"reshape_from_matrix","parameters":"(output_tensor, orig_shape_list)","argument_list":"","return_statement":"return tf.reshape(output_tensor, orig_dims + [width])","docstring":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_summary":"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.","docstring_tokens":["Reshapes","a","rank","2","tensor","back","to","its","original","rank",">","=","2","tensor","."],"function":"def reshape_from_matrix(output_tensor, orig_shape_list):\n\t\"\"\"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.\"\"\"\n\tif len(orig_shape_list) == 2:\n\t\treturn output_tensor\n\n\toutput_shape = get_shape_list(output_tensor)\n\n\torig_dims = orig_shape_list[0:-1]\n\twidth = output_shape[-1]\n\n\treturn tf.reshape(output_tensor, orig_dims + [width])","function_tokens":["def","reshape_from_matrix","(","output_tensor",",","orig_shape_list",")",":","if","len","(","orig_shape_list",")","==","2",":","return","output_tensor","output_shape","=","get_shape_list","(","output_tensor",")","orig_dims","=","orig_shape_list","[","0",":","-","1","]","width","=","output_shape","[","-","1","]","return","tf",".","reshape","(","output_tensor",",","orig_dims","+","[","width","]",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/bert_utils.py#L62-L72"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/bert_utils.py","language":"python","identifier":"assert_rank","parameters":"(tensor, expected_rank, name=None)","argument_list":"","return_statement":"","docstring":"Raises an exception if the tensor rank is not of the expected rank.\n\n\tArgs:\n\t\ttensor: A tf.Tensor to check the rank of.\n\t\texpected_rank: Python integer or list of integers, expected rank.\n\t\tname: Optional name of the tensor for the error message.\n\n\tRaises:\n\t\tValueError: If the expected shape doesn't match the actual shape.","docstring_summary":"Raises an exception if the tensor rank is not of the expected rank.","docstring_tokens":["Raises","an","exception","if","the","tensor","rank","is","not","of","the","expected","rank","."],"function":"def assert_rank(tensor, expected_rank, name=None):\n\t\"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n\tArgs:\n\t\ttensor: A tf.Tensor to check the rank of.\n\t\texpected_rank: Python integer or list of integers, expected rank.\n\t\tname: Optional name of the tensor for the error message.\n\n\tRaises:\n\t\tValueError: If the expected shape doesn't match the actual shape.\n\t\"\"\"\n\tif name is None:\n\t\tname = tensor.name\n\n\texpected_rank_dict = {}\n\tif isinstance(expected_rank, six.integer_types):\n\t\texpected_rank_dict[expected_rank] = True\n\telse:\n\t\tfor x in expected_rank:\n\t\t\texpected_rank_dict[x] = True\n\n\tactual_rank = tensor.shape.ndims\n\tif actual_rank not in expected_rank_dict:\n\t\tscope_name = tf.get_variable_scope().name\n\t\traise ValueError(\n\t\t\t\t\"For the tensor `%s` in scope `%s`, the actual rank \"\n\t\t\t\t\"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n\t\t\t\t(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))","function_tokens":["def","assert_rank","(","tensor",",","expected_rank",",","name","=","None",")",":","if","name","is","None",":","name","=","tensor",".","name","expected_rank_dict","=","{","}","if","isinstance","(","expected_rank",",","six",".","integer_types",")",":","expected_rank_dict","[","expected_rank","]","=","True","else",":","for","x","in","expected_rank",":","expected_rank_dict","[","x","]","=","True","actual_rank","=","tensor",".","shape",".","ndims","if","actual_rank","not","in","expected_rank_dict",":","scope_name","=","tf",".","get_variable_scope","(",")",".","name","raise","ValueError","(","\"For the tensor `%s` in scope `%s`, the actual rank \"","\"`%d` (shape = %s) is not equal to the expected rank `%s`\"","%","(","name",",","scope_name",",","actual_rank",",","str","(","tensor",".","shape",")",",","str","(","expected_rank",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/bert_utils.py#L74-L101"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/bert_utils.py","language":"python","identifier":"gather_indexes","parameters":"(sequence_tensor, positions)","argument_list":"","return_statement":"return output_tensor","docstring":"Gathers the vectors at the specific positions over a minibatch.","docstring_summary":"Gathers the vectors at the specific positions over a minibatch.","docstring_tokens":["Gathers","the","vectors","at","the","specific","positions","over","a","minibatch","."],"function":"def gather_indexes(sequence_tensor, positions):\n\t\"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n\tsequence_shape = get_shape_list(sequence_tensor, expected_rank=3)\n\tbatch_size = sequence_shape[0]\n\tseq_length = sequence_shape[1]\n\twidth = sequence_shape[2]\n\n\tflat_offsets = tf.reshape(\n\t\t\ttf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n\tflat_positions = tf.reshape(positions + flat_offsets, [-1])\n\tflat_sequence_tensor = tf.reshape(sequence_tensor,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[batch_size * seq_length, width])\n\toutput_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n\treturn output_tensor","function_tokens":["def","gather_indexes","(","sequence_tensor",",","positions",")",":","sequence_shape","=","get_shape_list","(","sequence_tensor",",","expected_rank","=","3",")","batch_size","=","sequence_shape","[","0","]","seq_length","=","sequence_shape","[","1","]","width","=","sequence_shape","[","2","]","flat_offsets","=","tf",".","reshape","(","tf",".","range","(","0",",","batch_size",",","dtype","=","tf",".","int32",")","*","seq_length",",","[","-","1",",","1","]",")","flat_positions","=","tf",".","reshape","(","positions","+","flat_offsets",",","[","-","1","]",")","flat_sequence_tensor","=","tf",".","reshape","(","sequence_tensor",",","[","batch_size","*","seq_length",",","width","]",")","output_tensor","=","tf",".","gather","(","flat_sequence_tensor",",","flat_positions",")","return","output_tensor"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/bert_utils.py#L103-L116"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py","language":"python","identifier":"write_instance_to_example_files","parameters":"(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files)","argument_list":"","return_statement":"","docstring":"Create TF example files from `TrainingInstance`s.","docstring_summary":"Create TF example files from `TrainingInstance`s.","docstring_tokens":["Create","TF","example","files","from","TrainingInstance","s","."],"function":"def write_instance_to_example_files(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files):\n \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n writers = []\n for output_file in output_files:\n writers.append(tf.python_io.TFRecordWriter(output_file))\n\n writer_index = 0\n\n total_written = 0\n for (inst_index, instance) in enumerate(instances):\n input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)\n input_mask = [1] * len(input_ids)\n segment_ids = list(instance.segment_ids)\n assert len(input_ids) <= max_seq_length\n\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n # print(\"length of segment_ids:\",len(segment_ids),\"max_seq_length:\", max_seq_length)\n assert len(segment_ids) == max_seq_length\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n while len(masked_lm_positions) < max_predictions_per_seq:\n masked_lm_positions.append(0)\n masked_lm_ids.append(0)\n masked_lm_weights.append(0.0)\n\n next_sentence_label = 1 if instance.is_random_next else 0\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n features[\"masked_lm_positions\"] = create_int_feature(masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n features[\"next_sentence_labels\"] = create_int_feature([next_sentence_label])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n\n writers[writer_index].write(tf_example.SerializeToString())\n writer_index = (writer_index + 1) % len(writers)\n\n total_written += 1\n\n if inst_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in instance.tokens]))\n\n for feature_name in features.keys():\n feature = features[feature_name]\n values = []\n if feature.int64_list.value:\n values = feature.int64_list.value\n elif feature.float_list.value:\n values = feature.float_list.value\n tf.logging.info(\n \"%s: %s\" % (feature_name, \" \".join([str(x) for x in values])))\n\n for writer in writers:\n writer.close()\n\n tf.logging.info(\"Wrote %d total instances\", total_written)","function_tokens":["def","write_instance_to_example_files","(","instances",",","tokenizer",",","max_seq_length",",","max_predictions_per_seq",",","output_files",")",":","writers","=","[","]","for","output_file","in","output_files",":","writers",".","append","(","tf",".","python_io",".","TFRecordWriter","(","output_file",")",")","writer_index","=","0","total_written","=","0","for","(","inst_index",",","instance",")","in","enumerate","(","instances",")",":","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","tokens",")","input_mask","=","[","1","]","*","len","(","input_ids",")","segment_ids","=","list","(","instance",".","segment_ids",")","assert","len","(","input_ids",")","<=","max_seq_length","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","# print(\"length of segment_ids:\",len(segment_ids),\"max_seq_length:\", max_seq_length)","assert","len","(","segment_ids",")","==","max_seq_length","masked_lm_positions","=","list","(","instance",".","masked_lm_positions",")","masked_lm_ids","=","tokenizer",".","convert_tokens_to_ids","(","instance",".","masked_lm_labels",")","masked_lm_weights","=","[","1.0","]","*","len","(","masked_lm_ids",")","while","len","(","masked_lm_positions",")","<","max_predictions_per_seq",":","masked_lm_positions",".","append","(","0",")","masked_lm_ids",".","append","(","0",")","masked_lm_weights",".","append","(","0.0",")","next_sentence_label","=","1","if","instance",".","is_random_next","else","0","features","=","collections",".","OrderedDict","(",")","features","[","\"input_ids\"","]","=","create_int_feature","(","input_ids",")","features","[","\"input_mask\"","]","=","create_int_feature","(","input_mask",")","features","[","\"segment_ids\"","]","=","create_int_feature","(","segment_ids",")","features","[","\"masked_lm_positions\"","]","=","create_int_feature","(","masked_lm_positions",")","features","[","\"masked_lm_ids\"","]","=","create_int_feature","(","masked_lm_ids",")","features","[","\"masked_lm_weights\"","]","=","create_float_feature","(","masked_lm_weights",")","features","[","\"next_sentence_labels\"","]","=","create_int_feature","(","[","next_sentence_label","]",")","tf_example","=","tf",".","train",".","Example","(","features","=","tf",".","train",".","Features","(","feature","=","features",")",")","writers","[","writer_index","]",".","write","(","tf_example",".","SerializeToString","(",")",")","writer_index","=","(","writer_index","+","1",")","%","len","(","writers",")","total_written","+=","1","if","inst_index","<","20",":","tf",".","logging",".","info","(","\"*** Example ***\"",")","tf",".","logging",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","instance",".","tokens","]",")",")","for","feature_name","in","features",".","keys","(",")",":","feature","=","features","[","feature_name","]","values","=","[","]","if","feature",".","int64_list",".","value",":","values","=","feature",".","int64_list",".","value","elif","feature",".","float_list",".","value",":","values","=","feature",".","float_list",".","value","tf",".","logging",".","info","(","\"%s: %s\"","%","(","feature_name",",","\" \"",".","join","(","[","str","(","x",")","for","x","in","values","]",")",")",")","for","writer","in","writers",":","writer",".","close","(",")","tf",".","logging",".","info","(","\"Wrote %d total instances\"",",","total_written",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py#L98-L169"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py","language":"python","identifier":"create_training_instances","parameters":"(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng)","argument_list":"","return_statement":"return instances","docstring":"Create `TrainingInstance`s from raw text.","docstring_summary":"Create `TrainingInstance`s from raw text.","docstring_tokens":["Create","TrainingInstance","s","from","raw","text","."],"function":"def create_training_instances(input_files, tokenizer, max_seq_length,\n dupe_factor, short_seq_prob, masked_lm_prob,\n max_predictions_per_seq, rng):\n \"\"\"Create `TrainingInstance`s from raw text.\"\"\"\n all_documents = [[]]\n\n # Input file format:\n # (1) One sentence per line. These should ideally be actual sentences, not\n # entire paragraphs or arbitrary spans of text. (Because we use the\n # sentence boundaries for the \"next sentence prediction\" task).\n # (2) Blank lines between documents. Document boundaries are needed so\n # that the \"next sentence prediction\" task doesn't span between documents.\n print(\"create_training_instances.started...\")\n for input_file in input_files:\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline().replace(\"\",\"\"))# .replace(\"\u201d\",\"\")) # \u5c06\u3001\u201d\u66ff\u6362\u6389\u3002\n if not line:\n break\n line = line.strip()\n\n # Empty lines are used as document delimiters\n if not line:\n all_documents.append([])\n tokens = tokenizer.tokenize(line)\n if tokens:\n all_documents[-1].append(tokens)\n\n # Remove empty documents\n all_documents = [x for x in all_documents if x]\n rng.shuffle(all_documents)\n\n vocab_words = list(tokenizer.vocab.keys())\n instances = []\n for _ in range(dupe_factor):\n for document_index in range(len(all_documents)):\n instances.extend(\n create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng))\n\n rng.shuffle(instances)\n print(\"create_training_instances.ended...\")\n\n return instances","function_tokens":["def","create_training_instances","(","input_files",",","tokenizer",",","max_seq_length",",","dupe_factor",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","rng",")",":","all_documents","=","[","[","]","]","# Input file format:","# (1) One sentence per line. These should ideally be actual sentences, not","# entire paragraphs or arbitrary spans of text. (Because we use the","# sentence boundaries for the \"next sentence prediction\" task).","# (2) Blank lines between documents. Document boundaries are needed so","# that the \"next sentence prediction\" task doesn't span between documents.","print","(","\"create_training_instances.started...\"",")","for","input_file","in","input_files",":","with","tf",".","gfile",".","GFile","(","input_file",",","\"r\"",")","as","reader",":","while","True",":","line","=","tokenization",".","convert_to_unicode","(","reader",".","readline","(",")",".","replace","(","\"\"",",","\"\"",")",")","# .replace(\"\u201d\",\"\")) # \u5c06\u3001\u201d\u66ff\u6362\u6389\u3002","if","not","line",":","break","line","=","line",".","strip","(",")","# Empty lines are used as document delimiters","if","not","line",":","all_documents",".","append","(","[","]",")","tokens","=","tokenizer",".","tokenize","(","line",")","if","tokens",":","all_documents","[","-","1","]",".","append","(","tokens",")","# Remove empty documents","all_documents","=","[","x","for","x","in","all_documents","if","x","]","rng",".","shuffle","(","all_documents",")","vocab_words","=","list","(","tokenizer",".","vocab",".","keys","(",")",")","instances","=","[","]","for","_","in","range","(","dupe_factor",")",":","for","document_index","in","range","(","len","(","all_documents",")",")",":","instances",".","extend","(","create_instances_from_document","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",")","rng",".","shuffle","(","instances",")","print","(","\"create_training_instances.ended...\"",")","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py#L182-L226"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py","language":"python","identifier":"_is_chinese_char","parameters":"(cp)","argument_list":"","return_statement":"","docstring":"Checks whether CP is the codepoint of a CJK character.","docstring_summary":"Checks whether CP is the codepoint of a CJK character.","docstring_tokens":["Checks","whether","CP","is","the","codepoint","of","a","CJK","character","."],"function":"def _is_chinese_char(cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True","function_tokens":["def","_is_chinese_char","(","cp",")",":","# This defines a \"chinese character\" as anything in the CJK Unicode block:","# https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)","#","# Note that the CJK Unicode block is NOT all Japanese and Korean characters,","# despite its name. The modern Korean Hangul alphabet is a different block,","# as is Japanese Hiragana and Katakana. Those alphabets are used to write","# space-separated words, so they are not treated specially and handled","# like the all of the other languages.","if","(","(","cp",">=","0x4E00","and","cp","<=","0x9FFF",")","or","#","(","cp",">=","0x3400","and","cp","<=","0x4DBF",")","or","#","(","cp",">=","0x20000","and","cp","<=","0x2A6DF",")","or","#","(","cp",">=","0x2A700","and","cp","<=","0x2B73F",")","or","#","(","cp",">=","0x2B740","and","cp","<=","0x2B81F",")","or","#","(","cp",">=","0x2B820","and","cp","<=","0x2CEAF",")","or","(","cp",">=","0xF900","and","cp","<=","0xFAFF",")","or","#","(","cp",">=","0x2F800","and","cp","<=","0x2FA1F",")",")",":","#","return","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py#L229-L247"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py","language":"python","identifier":"get_new_segment","parameters":"(segment)","argument_list":"","return_statement":"return new_segment","docstring":"\u8f93\u5165\u4e00\u53e5\u8bdd\uff0c\u8fd4\u56de\u4e00\u53e5\u7ecf\u8fc7\u5904\u7406\u7684\u8bdd: \u4e3a\u4e86\u652f\u6301\u4e2d\u6587\u5168\u79f0mask\uff0c\u5c06\u88ab\u5206\u5f00\u7684\u8bcd\uff0c\u5c06\u4e0a\u7279\u6b8a\u6807\u8bb0(\"#\")\uff0c\u4f7f\u5f97\u540e\u7eed\u5904\u7406\u6a21\u5757\uff0c\u80fd\u591f\u77e5\u9053\u54ea\u4e9b\u5b57\u662f\u5c5e\u4e8e\u540c\u4e00\u4e2a\u8bcd\u7684\u3002\n :param segment: \u4e00\u53e5\u8bdd\n :return: \u4e00\u53e5\u5904\u7406\u8fc7\u7684\u8bdd","docstring_summary":"\u8f93\u5165\u4e00\u53e5\u8bdd\uff0c\u8fd4\u56de\u4e00\u53e5\u7ecf\u8fc7\u5904\u7406\u7684\u8bdd: \u4e3a\u4e86\u652f\u6301\u4e2d\u6587\u5168\u79f0mask\uff0c\u5c06\u88ab\u5206\u5f00\u7684\u8bcd\uff0c\u5c06\u4e0a\u7279\u6b8a\u6807\u8bb0(\"#\")\uff0c\u4f7f\u5f97\u540e\u7eed\u5904\u7406\u6a21\u5757\uff0c\u80fd\u591f\u77e5\u9053\u54ea\u4e9b\u5b57\u662f\u5c5e\u4e8e\u540c\u4e00\u4e2a\u8bcd\u7684\u3002\n :param segment: \u4e00\u53e5\u8bdd\n :return: \u4e00\u53e5\u5904\u7406\u8fc7\u7684\u8bdd","docstring_tokens":["\u8f93\u5165\u4e00\u53e5\u8bdd\uff0c\u8fd4\u56de\u4e00\u53e5\u7ecf\u8fc7\u5904\u7406\u7684\u8bdd",":","\u4e3a\u4e86\u652f\u6301\u4e2d\u6587\u5168\u79f0mask\uff0c\u5c06\u88ab\u5206\u5f00\u7684\u8bcd\uff0c\u5c06\u4e0a\u7279\u6b8a\u6807\u8bb0","(","#",")","\uff0c\u4f7f\u5f97\u540e\u7eed\u5904\u7406\u6a21\u5757\uff0c\u80fd\u591f\u77e5\u9053\u54ea\u4e9b\u5b57\u662f\u5c5e\u4e8e\u540c\u4e00\u4e2a\u8bcd\u7684\u3002",":","param","segment",":","\u4e00\u53e5\u8bdd",":","return",":","\u4e00\u53e5\u5904\u7406\u8fc7\u7684\u8bdd"],"function":"def get_new_segment(segment): # \u65b0\u589e\u7684\u65b9\u6cd5 ####\n \"\"\"\n \u8f93\u5165\u4e00\u53e5\u8bdd\uff0c\u8fd4\u56de\u4e00\u53e5\u7ecf\u8fc7\u5904\u7406\u7684\u8bdd: \u4e3a\u4e86\u652f\u6301\u4e2d\u6587\u5168\u79f0mask\uff0c\u5c06\u88ab\u5206\u5f00\u7684\u8bcd\uff0c\u5c06\u4e0a\u7279\u6b8a\u6807\u8bb0(\"#\")\uff0c\u4f7f\u5f97\u540e\u7eed\u5904\u7406\u6a21\u5757\uff0c\u80fd\u591f\u77e5\u9053\u54ea\u4e9b\u5b57\u662f\u5c5e\u4e8e\u540c\u4e00\u4e2a\u8bcd\u7684\u3002\n :param segment: \u4e00\u53e5\u8bdd\n :return: \u4e00\u53e5\u5904\u7406\u8fc7\u7684\u8bdd\n \"\"\"\n seq_cws = jieba.lcut(\"\".join(segment))\n seq_cws_dict = {x: 1 for x in seq_cws}\n new_segment = []\n i = 0\n while i < len(segment):\n if len(re.findall('[\\u4E00-\\u9FA5]', segment[i]))==0: # \u4e0d\u662f\u4e2d\u6587\u7684\uff0c\u539f\u6587\u52a0\u8fdb\u53bb\u3002\n new_segment.append(segment[i])\n i += 1\n continue\n\n has_add = False\n for length in range(3,0,-1):\n if i+length>len(segment):\n continue\n if ''.join(segment[i:i+length]) in seq_cws_dict:\n new_segment.append(segment[i])\n for l in range(1, length):\n new_segment.append('##' + segment[i+l])\n i += length\n has_add = True\n break\n if not has_add:\n new_segment.append(segment[i])\n i += 1\n return new_segment","function_tokens":["def","get_new_segment","(","segment",")",":","# \u65b0\u589e\u7684\u65b9\u6cd5 ####","seq_cws","=","jieba",".","lcut","(","\"\"",".","join","(","segment",")",")","seq_cws_dict","=","{","x",":","1","for","x","in","seq_cws","}","new_segment","=","[","]","i","=","0","while","i","<","len","(","segment",")",":","if","len","(","re",".","findall","(","'[\\u4E00-\\u9FA5]'",",","segment","[","i","]",")",")","==","0",":","# \u4e0d\u662f\u4e2d\u6587\u7684\uff0c\u539f\u6587\u52a0\u8fdb\u53bb\u3002","new_segment",".","append","(","segment","[","i","]",")","i","+=","1","continue","has_add","=","False","for","length","in","range","(","3",",","0",",","-","1",")",":","if","i","+","length",">","len","(","segment",")",":","continue","if","''",".","join","(","segment","[","i",":","i","+","length","]",")","in","seq_cws_dict",":","new_segment",".","append","(","segment","[","i","]",")","for","l","in","range","(","1",",","length",")",":","new_segment",".","append","(","'##'","+","segment","[","i","+","l","]",")","i","+=","length","has_add","=","True","break","if","not","has_add",":","new_segment",".","append","(","segment","[","i","]",")","i","+=","1","return","new_segment"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py#L250-L280"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py","language":"python","identifier":"get_raw_instance","parameters":"(document,max_sequence_length)","argument_list":"","return_statement":"return result_list","docstring":"\u83b7\u53d6\u521d\u6b65\u7684\u8bad\u7ec3\u5b9e\u4f8b\uff0c\u5c06\u6574\u6bb5\u6309\u7167max_sequence_length\u5207\u5206\u6210\u591a\u4e2a\u90e8\u5206,\u5e76\u4ee5\u591a\u4e2a\u5904\u7406\u597d\u7684\u5b9e\u4f8b\u7684\u5f62\u5f0f\u8fd4\u56de\u3002\n :param document: \u4e00\u6574\u6bb5\n :param max_sequence_length:\n :return: a list. each element is a sequence of text","docstring_summary":"\u83b7\u53d6\u521d\u6b65\u7684\u8bad\u7ec3\u5b9e\u4f8b\uff0c\u5c06\u6574\u6bb5\u6309\u7167max_sequence_length\u5207\u5206\u6210\u591a\u4e2a\u90e8\u5206,\u5e76\u4ee5\u591a\u4e2a\u5904\u7406\u597d\u7684\u5b9e\u4f8b\u7684\u5f62\u5f0f\u8fd4\u56de\u3002\n :param document: \u4e00\u6574\u6bb5\n :param max_sequence_length:\n :return: a list. each element is a sequence of text","docstring_tokens":["\u83b7\u53d6\u521d\u6b65\u7684\u8bad\u7ec3\u5b9e\u4f8b\uff0c\u5c06\u6574\u6bb5\u6309\u7167max_sequence_length\u5207\u5206\u6210\u591a\u4e2a\u90e8\u5206","\u5e76\u4ee5\u591a\u4e2a\u5904\u7406\u597d\u7684\u5b9e\u4f8b\u7684\u5f62\u5f0f\u8fd4\u56de\u3002",":","param","document",":","\u4e00\u6574\u6bb5",":","param","max_sequence_length",":",":","return",":","a","list",".","each","element","is","a","sequence","of","text"],"function":"def get_raw_instance(document,max_sequence_length): # \u65b0\u589e\u7684\u65b9\u6cd5 TODO need check again to ensure full use of data\n \"\"\"\n \u83b7\u53d6\u521d\u6b65\u7684\u8bad\u7ec3\u5b9e\u4f8b\uff0c\u5c06\u6574\u6bb5\u6309\u7167max_sequence_length\u5207\u5206\u6210\u591a\u4e2a\u90e8\u5206,\u5e76\u4ee5\u591a\u4e2a\u5904\u7406\u597d\u7684\u5b9e\u4f8b\u7684\u5f62\u5f0f\u8fd4\u56de\u3002\n :param document: \u4e00\u6574\u6bb5\n :param max_sequence_length:\n :return: a list. each element is a sequence of text\n \"\"\"\n max_sequence_length_allowed=max_sequence_length-2\n document = [seq for seq in document if len(seq)max_sequence_length_allowed\/2: # \/2\n result_list.append(curr_seq)\n\n # # \u8ba1\u7b97\u603b\u5171\u53ef\u4ee5\u5f97\u5230\u591a\u5c11\u4efd\n # num_instance=int(len(big_list)\/max_sequence_length_allowed)+1\n # print(\"num_instance:\",num_instance)\n # # \u5207\u5206\u6210\u591a\u4efd\uff0c\u6dfb\u52a0\u5230\u5217\u8868\u4e2d\n # result_list=[]\n # for j in range(num_instance):\n # index=j*max_sequence_length_allowed\n # end_index=index+max_sequence_length_allowed if j!=num_instance-1 else -1\n # result_list.append(big_list[index:end_index])\n return result_list","function_tokens":["def","get_raw_instance","(","document",",","max_sequence_length",")",":","# \u65b0\u589e\u7684\u65b9\u6cd5 TODO need check again to ensure full use of data","max_sequence_length_allowed","=","max_sequence_length","-","2","document","=","[","seq","for","seq","in","document","if","len","(","seq",")","<","max_sequence_length_allowed","]","sizes","=","[","len","(","seq",")","for","seq","in","document","]","result_list","=","[","]","curr_seq","=","[","]","# \u5f53\u524d\u5904\u7406\u7684\u5e8f\u5217","sz_idx","=","0","while","sz_idx","<","len","(","sizes",")",":","# \u5f53\u524d\u53e5\u5b50\u52a0\u4e0a\u65b0\u7684\u53e5\u5b50\uff0c\u5982\u679c\u957f\u5ea6\u5c0f\u4e8e\u6700\u5927\u9650\u5236\uff0c\u5219\u5408\u5e76\u5f53\u524d\u53e5\u5b50\u548c\u65b0\u53e5\u5b50\uff1b\u5426\u5219\u5373\u8d85\u8fc7\u4e86\u6700\u5927\u9650\u5236\uff0c\u90a3\u4e48\u505a\u4e3a\u4e00\u4e2a\u65b0\u7684\u5e8f\u5217\u52a0\u5230\u76ee\u6807\u5217\u8868\u4e2d","if","len","(","curr_seq",")","+","sizes","[","sz_idx","]","<=","max_sequence_length_allowed",":","# or len(curr_seq)==0:","curr_seq","+=","document","[","sz_idx","]","sz_idx","+=","1","else",":","result_list",".","append","(","curr_seq",")","curr_seq","=","[","]","# \u5bf9\u6700\u540e\u4e00\u4e2a\u5e8f\u5217\u8fdb\u884c\u5904\u7406\uff0c\u5982\u679c\u592a\u77ed\u7684\u8bdd\uff0c\u4e22\u5f03\u6389\u3002","if","len","(","curr_seq",")",">","max_sequence_length_allowed","\/","2",":","# \/2","result_list",".","append","(","curr_seq",")","# # \u8ba1\u7b97\u603b\u5171\u53ef\u4ee5\u5f97\u5230\u591a\u5c11\u4efd","# num_instance=int(len(big_list)\/max_sequence_length_allowed)+1","# print(\"num_instance:\",num_instance)","# # \u5207\u5206\u6210\u591a\u4efd\uff0c\u6dfb\u52a0\u5230\u5217\u8868\u4e2d","# result_list=[]","# for j in range(num_instance):","# index=j*max_sequence_length_allowed","# end_index=index+max_sequence_length_allowed if j!=num_instance-1 else -1","# result_list.append(big_list[index:end_index])","return","result_list"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py#L282-L317"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py","language":"python","identifier":"create_instances_from_document","parameters":"( # \u65b0\u589e\u7684\u65b9\u6cd5\n # \u76ee\u6807\u6309\u7167RoBERTa\u7684\u601d\u8def\uff0c\u4f7f\u7528DOC-SENTENCES\uff0c\u5e76\u4f1a\u53bb\u6389NSP\u4efb\u52a1: \u4ece\u4e00\u4e2a\u6587\u6863\u4e2d\u8fde\u7eed\u7684\u83b7\u5f97\u6587\u672c\uff0c\u76f4\u5230\u8fbe\u5230\u6700\u5927\u957f\u5ea6\u3002\u5982\u679c\u662f\u4ece\u4e0b\u4e00\u4e2a\u6587\u6863\u4e2d\u83b7\u5f97\uff0c\u90a3\u4e48\u52a0\u4e0a\u4e00\u4e2a\u5206\u9694\u7b26\n # document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.\n # \u7ed9\u5b9a\u4e00\u4e2adocument\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u751f\u6210\u4e00\u4e9binstance.\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return instances","docstring":"Creates `TrainingInstance`s for a single document.","docstring_summary":"Creates `TrainingInstance`s for a single document.","docstring_tokens":["Creates","TrainingInstance","s","for","a","single","document","."],"function":"def create_instances_from_document( # \u65b0\u589e\u7684\u65b9\u6cd5\n # \u76ee\u6807\u6309\u7167RoBERTa\u7684\u601d\u8def\uff0c\u4f7f\u7528DOC-SENTENCES\uff0c\u5e76\u4f1a\u53bb\u6389NSP\u4efb\u52a1: \u4ece\u4e00\u4e2a\u6587\u6863\u4e2d\u8fde\u7eed\u7684\u83b7\u5f97\u6587\u672c\uff0c\u76f4\u5230\u8fbe\u5230\u6700\u5927\u957f\u5ea6\u3002\u5982\u679c\u662f\u4ece\u4e0b\u4e00\u4e2a\u6587\u6863\u4e2d\u83b7\u5f97\uff0c\u90a3\u4e48\u52a0\u4e0a\u4e00\u4e2a\u5206\u9694\u7b26\n # document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.\n # \u7ed9\u5b9a\u4e00\u4e2adocument\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u751f\u6210\u4e00\u4e9binstance.\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[document_index]\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n\n #target_seq_length = max_num_tokens\n #if rng.random() < short_seq_prob:\n # target_seq_length = rng.randint(2, max_num_tokens)\n\n instances = []\n raw_text_list_list=get_raw_instance(document, max_seq_length) # document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.\n for j, raw_text_list in enumerate(raw_text_list_list):\n ####################################################################################################################\n raw_text_list = get_new_segment(raw_text_list) # \u7ed3\u5408\u5206\u8bcd\u7684\u4e2d\u6587\u7684whole mask\u8bbe\u7f6e\u5373\u5728\u9700\u8981\u7684\u5730\u65b9\u52a0\u4e0a\u201c##\u201d\n # 1\u3001\u8bbe\u7f6etoken, segment_ids\n is_random_next=True # this will not be used, so it's value doesn't matter\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in raw_text_list:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n ################################################################################################################\n # 2\u3001\u8c03\u7528\u539f\u6709\u7684\u65b9\u6cd5\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n\n return instances","function_tokens":["def","create_instances_from_document","(","# \u65b0\u589e\u7684\u65b9\u6cd5","# \u76ee\u6807\u6309\u7167RoBERTa\u7684\u601d\u8def\uff0c\u4f7f\u7528DOC-SENTENCES\uff0c\u5e76\u4f1a\u53bb\u6389NSP\u4efb\u52a1: \u4ece\u4e00\u4e2a\u6587\u6863\u4e2d\u8fde\u7eed\u7684\u83b7\u5f97\u6587\u672c\uff0c\u76f4\u5230\u8fbe\u5230\u6700\u5927\u957f\u5ea6\u3002\u5982\u679c\u662f\u4ece\u4e0b\u4e00\u4e2a\u6587\u6863\u4e2d\u83b7\u5f97\uff0c\u90a3\u4e48\u52a0\u4e0a\u4e00\u4e2a\u5206\u9694\u7b26","# document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.","# \u7ed9\u5b9a\u4e00\u4e2adocument\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u751f\u6210\u4e00\u4e9binstance.","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","document","=","all_documents","[","document_index","]","# Account for [CLS], [SEP], [SEP]","max_num_tokens","=","max_seq_length","-","3","# We *usually* want to fill up the entire sequence since we are padding","# to `max_seq_length` anyways, so short sequences are generally wasted","# computation. However, we *sometimes*","# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter","# sequences to minimize the mismatch between pre-training and fine-tuning.","# The `target_seq_length` is just a rough target however, whereas","# `max_seq_length` is a hard limit.","#target_seq_length = max_num_tokens","#if rng.random() < short_seq_prob:","# target_seq_length = rng.randint(2, max_num_tokens)","instances","=","[","]","raw_text_list_list","=","get_raw_instance","(","document",",","max_seq_length",")","# document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.","for","j",",","raw_text_list","in","enumerate","(","raw_text_list_list",")",":","####################################################################################################################","raw_text_list","=","get_new_segment","(","raw_text_list",")","# \u7ed3\u5408\u5206\u8bcd\u7684\u4e2d\u6587\u7684whole mask\u8bbe\u7f6e\u5373\u5728\u9700\u8981\u7684\u5730\u65b9\u52a0\u4e0a\u201c##\u201d","# 1\u3001\u8bbe\u7f6etoken, segment_ids","is_random_next","=","True","# this will not be used, so it's value doesn't matter","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","raw_text_list",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","################################################################################################################","# 2\u3001\u8c03\u7528\u539f\u6709\u7684\u65b9\u6cd5","(","tokens",",","masked_lm_positions",",","masked_lm_labels",")","=","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")","instance","=","TrainingInstance","(","tokens","=","tokens",",","segment_ids","=","segment_ids",",","is_random_next","=","is_random_next",",","masked_lm_positions","=","masked_lm_positions",",","masked_lm_labels","=","masked_lm_labels",")","instances",".","append","(","instance",")","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py#L319-L372"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py","language":"python","identifier":"create_instances_from_document_original","parameters":"(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return instances","docstring":"Creates `TrainingInstance`s for a single document.","docstring_summary":"Creates `TrainingInstance`s for a single document.","docstring_tokens":["Creates","TrainingInstance","s","for","a","single","document","."],"function":"def create_instances_from_document_original(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[document_index]\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if rng.random() < short_seq_prob:\n target_seq_length = rng.randint(2, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n instances = []\n current_chunk = []\n current_length = 0\n i = 0\n print(\"document_index:\",document_index,\"document:\",type(document),\" ;document:\",document) # document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.\n while i < len(document):\n segment = document[i] # \u53d6\u5230\u4e00\u4e2a\u90e8\u5206\uff08\u53ef\u80fd\u662f\u4e00\u6bb5\u8bdd\uff09\n print(\"i:\",i,\" ;segment:\",segment)\n ####################################################################################################################\n segment = get_new_segment(segment) # \u7ed3\u5408\u5206\u8bcd\u7684\u4e2d\u6587\u7684whole mask\u8bbe\u7f6e\u5373\u5728\u9700\u8981\u7684\u5730\u65b9\u52a0\u4e0a\u201c##\u201d\n ###################################################################################################################\n current_chunk.append(segment)\n current_length += len(segment)\n print(\"#####condition:\",i == len(document) - 1 or current_length >= target_seq_length)\n if i == len(document) - 1 or current_length >= target_seq_length:\n if current_chunk:\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2:\n a_end = rng.randint(1, len(current_chunk) - 1)\n\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n tokens_b = []\n # Random next\n is_random_next = False\n if len(current_chunk) == 1 or rng.random() < 0.5:\n is_random_next = True\n target_b_length = target_seq_length - len(tokens_a)\n\n # This should rarely go for more than one iteration for large\n # corpora. However, just to be careful, we try to make sure that\n # the random document is not the same as the document\n # we're processing.\n for _ in range(10):\n random_document_index = rng.randint(0, len(all_documents) - 1)\n if random_document_index != document_index:\n break\n\n random_document = all_documents[random_document_index]\n random_start = rng.randint(0, len(random_document) - 1)\n for j in range(random_start, len(random_document)):\n tokens_b.extend(random_document[j])\n if len(tokens_b) >= target_b_length:\n break\n # We didn't actually use these segments so we \"put them back\" so\n # they don't go to waste.\n num_unused_segments = len(current_chunk) - a_end\n i -= num_unused_segments\n # Actual next\n else:\n is_random_next = False\n for j in range(a_end, len(current_chunk)):\n tokens_b.extend(current_chunk[j])\n truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)\n\n assert len(tokens_a) >= 1\n assert len(tokens_b) >= 1\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n current_chunk = []\n current_length = 0\n i += 1\n\n return instances","function_tokens":["def","create_instances_from_document_original","(","all_documents",",","document_index",",","max_seq_length",",","short_seq_prob",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","document","=","all_documents","[","document_index","]","# Account for [CLS], [SEP], [SEP]","max_num_tokens","=","max_seq_length","-","3","# We *usually* want to fill up the entire sequence since we are padding","# to `max_seq_length` anyways, so short sequences are generally wasted","# computation. However, we *sometimes*","# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter","# sequences to minimize the mismatch between pre-training and fine-tuning.","# The `target_seq_length` is just a rough target however, whereas","# `max_seq_length` is a hard limit.","target_seq_length","=","max_num_tokens","if","rng",".","random","(",")","<","short_seq_prob",":","target_seq_length","=","rng",".","randint","(","2",",","max_num_tokens",")","# We DON'T just concatenate all of the tokens from a document into a long","# sequence and choose an arbitrary split point because this would make the","# next sentence prediction task too easy. Instead, we split the input into","# segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user","# input.","instances","=","[","]","current_chunk","=","[","]","current_length","=","0","i","=","0","print","(","\"document_index:\"",",","document_index",",","\"document:\"",",","type","(","document",")",",","\" ;document:\"",",","document",")","# document\u5373\u4e00\u6574\u6bb5\u8bdd\uff0c\u5305\u542b\u591a\u4e2a\u53e5\u5b50\u3002\u6bcf\u4e2a\u53e5\u5b50\u53eb\u505asegment.","while","i","<","len","(","document",")",":","segment","=","document","[","i","]","# \u53d6\u5230\u4e00\u4e2a\u90e8\u5206\uff08\u53ef\u80fd\u662f\u4e00\u6bb5\u8bdd\uff09","print","(","\"i:\"",",","i",",","\" ;segment:\"",",","segment",")","####################################################################################################################","segment","=","get_new_segment","(","segment",")","# \u7ed3\u5408\u5206\u8bcd\u7684\u4e2d\u6587\u7684whole mask\u8bbe\u7f6e\u5373\u5728\u9700\u8981\u7684\u5730\u65b9\u52a0\u4e0a\u201c##\u201d","###################################################################################################################","current_chunk",".","append","(","segment",")","current_length","+=","len","(","segment",")","print","(","\"#####condition:\"",",","i","==","len","(","document",")","-","1","or","current_length",">=","target_seq_length",")","if","i","==","len","(","document",")","-","1","or","current_length",">=","target_seq_length",":","if","current_chunk",":","# `a_end` is how many segments from `current_chunk` go into the `A`","# (first) sentence.","a_end","=","1","if","len","(","current_chunk",")",">=","2",":","a_end","=","rng",".","randint","(","1",",","len","(","current_chunk",")","-","1",")","tokens_a","=","[","]","for","j","in","range","(","a_end",")",":","tokens_a",".","extend","(","current_chunk","[","j","]",")","tokens_b","=","[","]","# Random next","is_random_next","=","False","if","len","(","current_chunk",")","==","1","or","rng",".","random","(",")","<","0.5",":","is_random_next","=","True","target_b_length","=","target_seq_length","-","len","(","tokens_a",")","# This should rarely go for more than one iteration for large","# corpora. However, just to be careful, we try to make sure that","# the random document is not the same as the document","# we're processing.","for","_","in","range","(","10",")",":","random_document_index","=","rng",".","randint","(","0",",","len","(","all_documents",")","-","1",")","if","random_document_index","!=","document_index",":","break","random_document","=","all_documents","[","random_document_index","]","random_start","=","rng",".","randint","(","0",",","len","(","random_document",")","-","1",")","for","j","in","range","(","random_start",",","len","(","random_document",")",")",":","tokens_b",".","extend","(","random_document","[","j","]",")","if","len","(","tokens_b",")",">=","target_b_length",":","break","# We didn't actually use these segments so we \"put them back\" so","# they don't go to waste.","num_unused_segments","=","len","(","current_chunk",")","-","a_end","i","-=","num_unused_segments","# Actual next","else",":","is_random_next","=","False","for","j","in","range","(","a_end",",","len","(","current_chunk",")",")",":","tokens_b",".","extend","(","current_chunk","[","j","]",")","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")","assert","len","(","tokens_a",")",">=","1","assert","len","(","tokens_b",")",">=","1","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","(","tokens",",","masked_lm_positions",",","masked_lm_labels",")","=","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")","instance","=","TrainingInstance","(","tokens","=","tokens",",","segment_ids","=","segment_ids",",","is_random_next","=","is_random_next",",","masked_lm_positions","=","masked_lm_positions",",","masked_lm_labels","=","masked_lm_labels",")","instances",".","append","(","instance",")","current_chunk","=","[","]","current_length","=","0","i","+=","1","return","instances"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py#L376-L494"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py","language":"python","identifier":"create_masked_lm_predictions","parameters":"(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng)","argument_list":"","return_statement":"return (output_tokens, masked_lm_positions, masked_lm_labels)","docstring":"Creates the predictions for the masked LM objective.","docstring_summary":"Creates the predictions for the masked LM objective.","docstring_tokens":["Creates","the","predictions","for","the","masked","LM","objective","."],"function":"def create_masked_lm_predictions(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n continue\n # Whole Word Masking means that if we mask all of the wordpieces\n # corresponding to an original word. When a word has been split into\n # WordPieces, the first token does not have any marker and any subsequence\n # tokens are prefixed with ##. So whenever we see the ## token, we\n # append it to the previous set of word indexes.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each WordPiece independently, softmaxed\n # over the entire vocabulary.\n if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and\n token.startswith(\"##\")):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n rng.shuffle(cand_indexes)\n\n output_tokens = [t[2:] if len(re.findall('##[\\u4E00-\\u9FA5]', t))>0 else t for t in tokens] # \u53bb\u6389\"##\"\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index][2:] if len(re.findall('##[\\u4E00-\\u9FA5]', tokens[index]))>0 else tokens[index] # \u53bb\u6389\"##\"\n # 10% of the time, replace with random word\n else:\n masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n assert len(masked_lms) <= num_to_predict\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n # tf.logging.info('%s' % (tokens))\n # tf.logging.info('%s' % (output_tokens))\n return (output_tokens, masked_lm_positions, masked_lm_labels)","function_tokens":["def","create_masked_lm_predictions","(","tokens",",","masked_lm_prob",",","max_predictions_per_seq",",","vocab_words",",","rng",")",":","cand_indexes","=","[","]","for","(","i",",","token",")","in","enumerate","(","tokens",")",":","if","token","==","\"[CLS]\"","or","token","==","\"[SEP]\"",":","continue","# Whole Word Masking means that if we mask all of the wordpieces","# corresponding to an original word. When a word has been split into","# WordPieces, the first token does not have any marker and any subsequence","# tokens are prefixed with ##. So whenever we see the ## token, we","# append it to the previous set of word indexes.","#","# Note that Whole Word Masking does *not* change the training code","# at all -- we still predict each WordPiece independently, softmaxed","# over the entire vocabulary.","if","(","FLAGS",".","do_whole_word_mask","and","len","(","cand_indexes",")",">=","1","and","token",".","startswith","(","\"##\"",")",")",":","cand_indexes","[","-","1","]",".","append","(","i",")","else",":","cand_indexes",".","append","(","[","i","]",")","rng",".","shuffle","(","cand_indexes",")","output_tokens","=","[","t","[","2",":","]","if","len","(","re",".","findall","(","'##[\\u4E00-\\u9FA5]'",",","t",")",")",">","0","else","t","for","t","in","tokens","]","# \u53bb\u6389\"##\"","num_to_predict","=","min","(","max_predictions_per_seq",",","max","(","1",",","int","(","round","(","len","(","tokens",")","*","masked_lm_prob",")",")",")",")","masked_lms","=","[","]","covered_indexes","=","set","(",")","for","index_set","in","cand_indexes",":","if","len","(","masked_lms",")",">=","num_to_predict",":","break","# If adding a whole-word mask would exceed the maximum number of","# predictions, then just skip this candidate.","if","len","(","masked_lms",")","+","len","(","index_set",")",">","num_to_predict",":","continue","is_any_index_covered","=","False","for","index","in","index_set",":","if","index","in","covered_indexes",":","is_any_index_covered","=","True","break","if","is_any_index_covered",":","continue","for","index","in","index_set",":","covered_indexes",".","add","(","index",")","masked_token","=","None","# 80% of the time, replace with [MASK]","if","rng",".","random","(",")","<","0.8",":","masked_token","=","\"[MASK]\"","else",":","# 10% of the time, keep original","if","rng",".","random","(",")","<","0.5",":","masked_token","=","tokens","[","index","]","[","2",":","]","if","len","(","re",".","findall","(","'##[\\u4E00-\\u9FA5]'",",","tokens","[","index","]",")",")",">","0","else","tokens","[","index","]","# \u53bb\u6389\"##\"","# 10% of the time, replace with random word","else",":","masked_token","=","vocab_words","[","rng",".","randint","(","0",",","len","(","vocab_words",")","-","1",")","]","output_tokens","[","index","]","=","masked_token","masked_lms",".","append","(","MaskedLmInstance","(","index","=","index",",","label","=","tokens","[","index","]",")",")","assert","len","(","masked_lms",")","<=","num_to_predict","masked_lms","=","sorted","(","masked_lms",",","key","=","lambda","x",":","x",".","index",")","masked_lm_positions","=","[","]","masked_lm_labels","=","[","]","for","p","in","masked_lms",":","masked_lm_positions",".","append","(","p",".","index",")","masked_lm_labels",".","append","(","p",".","label",")","# tf.logging.info('%s' % (tokens))","# tf.logging.info('%s' % (output_tokens))","return","(","output_tokens",",","masked_lm_positions",",","masked_lm_labels",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py#L501-L576"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py","language":"python","identifier":"truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_num_tokens, rng)","argument_list":"","return_statement":"","docstring":"Truncates a pair of sequences to a maximum sequence length.","docstring_summary":"Truncates a pair of sequences to a maximum sequence length.","docstring_tokens":["Truncates","a","pair","of","sequences","to","a","maximum","sequence","length","."],"function":"def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):\n \"\"\"Truncates a pair of sequences to a maximum sequence length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()","function_tokens":["def","truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_num_tokens",",","rng",")",":","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_num_tokens",":","break","trunc_tokens","=","tokens_a","if","len","(","tokens_a",")",">","len","(","tokens_b",")","else","tokens_b","assert","len","(","trunc_tokens",")",">=","1","# We want to sometimes truncate from the front and sometimes from the","# back to add more randomness and avoid biases.","if","rng",".","random","(",")","<","0.5",":","del","trunc_tokens","[","0","]","else",":","trunc_tokens",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models\/albert\/resources\/create_pretraining_data_roberta.py#L579-L594"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py","language":"python","identifier":"gelu","parameters":"(x)","argument_list":"","return_statement":"return x * 0.5 * (1.0 + torch.erf(x \/ math.sqrt(2.0)))","docstring":"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))","docstring_summary":"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))","docstring_tokens":["Implementation","of","the","gelu","activation","function",".","For","information",":","OpenAI","GPT","s","gelu","is","slightly","different","(","and","gives","slightly","different","results",")",":","0",".","5","*","x","*","(","1","+","torch",".","tanh","(","math",".","sqrt","(","2","\/","math",".","pi",")","*","(","x","+","0",".","044715","*","torch",".","pow","(","x","3","))))"],"function":"def gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x \/ math.sqrt(2.0)))","function_tokens":["def","gelu","(","x",")",":","return","x","*","0.5","*","(","1.0","+","torch",".","erf","(","x","\/","math",".","sqrt","(","2.0",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py#L49-L54"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py","language":"python","identifier":"BertConfig.__init__","parameters":"(self,\n vocab_size_or_config_json_file,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02)","argument_list":"","return_statement":"","docstring":"Constructs BertConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler. If string, \"gelu\", \"relu\" and \"swish\" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.","docstring_summary":"Constructs BertConfig.","docstring_tokens":["Constructs","BertConfig","."],"function":"def __init__(self,\n vocab_size_or_config_json_file,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler. If string, \"gelu\", \"relu\" and \"swish\" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n if isinstance(vocab_size_or_config_json_file, str):\n with open(vocab_size_or_config_json_file, \"r\", encoding='utf-8') as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif isinstance(vocab_size_or_config_json_file, int):\n self.vocab_size = vocab_size_or_config_json_file\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n else:\n raise ValueError(\"First argument must be either a vocabulary size (int)\"\n \"or the path to a pretrained model config file (str)\")","function_tokens":["def","__init__","(","self",",","vocab_size_or_config_json_file",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","hidden_act","=","\"gelu\"",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","max_position_embeddings","=","512",",","type_vocab_size","=","2",",","initializer_range","=","0.02",")",":","if","isinstance","(","vocab_size_or_config_json_file",",","str",")",":","with","open","(","vocab_size_or_config_json_file",",","\"r\"",",","encoding","=","'utf-8'",")","as","reader",":","json_config","=","json",".","loads","(","reader",".","read","(",")",")","for","key",",","value","in","json_config",".","items","(",")",":","self",".","__dict__","[","key","]","=","value","elif","isinstance","(","vocab_size_or_config_json_file",",","int",")",":","self",".","vocab_size","=","vocab_size_or_config_json_file","self",".","hidden_size","=","hidden_size","self",".","num_hidden_layers","=","num_hidden_layers","self",".","num_attention_heads","=","num_attention_heads","self",".","hidden_act","=","hidden_act","self",".","intermediate_size","=","intermediate_size","self",".","hidden_dropout_prob","=","hidden_dropout_prob","self",".","attention_probs_dropout_prob","=","attention_probs_dropout_prob","self",".","max_position_embeddings","=","max_position_embeddings","self",".","type_vocab_size","=","type_vocab_size","self",".","initializer_range","=","initializer_range","else",":","raise","ValueError","(","\"First argument must be either a vocabulary size (int)\"","\"or the path to a pretrained model config file (str)\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py#L68-L123"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py","language":"python","identifier":"BertConfig.from_dict","parameters":"(cls, json_object)","argument_list":"","return_statement":"return config","docstring":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_summary":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","Python","dictionary","of","parameters","."],"function":"def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size_or_config_json_file=-1)\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config","function_tokens":["def","from_dict","(","cls",",","json_object",")",":","config","=","BertConfig","(","vocab_size_or_config_json_file","=","-","1",")","for","key",",","value","in","json_object",".","items","(",")",":","config",".","__dict__","[","key","]","=","value","return","config"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py#L126-L131"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py","language":"python","identifier":"BertConfig.from_json_file","parameters":"(cls, json_file)","argument_list":"","return_statement":"return cls.from_dict(json.loads(text))","docstring":"Constructs a `BertConfig` from a json file of parameters.","docstring_summary":"Constructs a `BertConfig` from a json file of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","json","file","of","parameters","."],"function":"def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))","function_tokens":["def","from_json_file","(","cls",",","json_file",")",":","with","open","(","json_file",",","\"r\"",",","encoding","=","'utf-8'",")","as","reader",":","text","=","reader",".","read","(",")","return","cls",".","from_dict","(","json",".","loads","(","text",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py#L134-L138"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py","language":"python","identifier":"BertConfig.to_dict","parameters":"(self)","argument_list":"","return_statement":"return output","docstring":"Serializes this instance to a Python dictionary.","docstring_summary":"Serializes this instance to a Python dictionary.","docstring_tokens":["Serializes","this","instance","to","a","Python","dictionary","."],"function":"def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output","function_tokens":["def","to_dict","(","self",")",":","output","=","copy",".","deepcopy","(","self",".","__dict__",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py#L143-L146"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py","language":"python","identifier":"BertConfig.to_json_string","parameters":"(self)","argument_list":"","return_statement":"return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","docstring":"Serializes this instance to a JSON string.","docstring_summary":"Serializes this instance to a JSON string.","docstring_tokens":["Serializes","this","instance","to","a","JSON","string","."],"function":"def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","function_tokens":["def","to_json_string","(","self",")",":","return","json",".","dumps","(","self",".","to_dict","(",")",",","indent","=","2",",","sort_keys","=","True",")","+","\"\\n\""],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py#L148-L150"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py","language":"python","identifier":"ALBertConfig.__init__","parameters":"(self,\n vocab_size_or_config_json_file,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n embedding_size=128,\n ln_type=\"postln\",\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02)","argument_list":"","return_statement":"","docstring":"Constructs BertConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler. If string, \"gelu\", \"relu\" and \"swish\" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.","docstring_summary":"Constructs BertConfig.","docstring_tokens":["Constructs","BertConfig","."],"function":"def __init__(self,\n vocab_size_or_config_json_file,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n embedding_size=128,\n ln_type=\"postln\",\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler. If string, \"gelu\", \"relu\" and \"swish\" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n if isinstance(vocab_size_or_config_json_file, str):\n with open(vocab_size_or_config_json_file, \"r\", encoding='utf-8') as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif isinstance(vocab_size_or_config_json_file, int):\n self.vocab_size = vocab_size_or_config_json_file\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.embedding_size = embedding_size\n self.ln_type = ln_type\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n else:\n raise ValueError(\"First argument must be either a vocabulary size (int)\"\n \"or the path to a pretrained model config file (str)\")","function_tokens":["def","__init__","(","self",",","vocab_size_or_config_json_file",",","hidden_size","=","768",",","num_hidden_layers","=","12",",","num_attention_heads","=","12",",","intermediate_size","=","3072",",","embedding_size","=","128",",","ln_type","=","\"postln\"",",","hidden_act","=","\"gelu\"",",","hidden_dropout_prob","=","0.1",",","attention_probs_dropout_prob","=","0.1",",","max_position_embeddings","=","512",",","type_vocab_size","=","2",",","initializer_range","=","0.02",")",":","if","isinstance","(","vocab_size_or_config_json_file",",","str",")",":","with","open","(","vocab_size_or_config_json_file",",","\"r\"",",","encoding","=","'utf-8'",")","as","reader",":","json_config","=","json",".","loads","(","reader",".","read","(",")",")","for","key",",","value","in","json_config",".","items","(",")",":","self",".","__dict__","[","key","]","=","value","elif","isinstance","(","vocab_size_or_config_json_file",",","int",")",":","self",".","vocab_size","=","vocab_size_or_config_json_file","self",".","hidden_size","=","hidden_size","self",".","num_hidden_layers","=","num_hidden_layers","self",".","num_attention_heads","=","num_attention_heads","self",".","embedding_size","=","embedding_size","self",".","ln_type","=","ln_type","self",".","hidden_act","=","hidden_act","self",".","intermediate_size","=","intermediate_size","self",".","hidden_dropout_prob","=","hidden_dropout_prob","self",".","attention_probs_dropout_prob","=","attention_probs_dropout_prob","self",".","max_position_embeddings","=","max_position_embeddings","self",".","type_vocab_size","=","type_vocab_size","self",".","initializer_range","=","initializer_range","else",":","raise","ValueError","(","\"First argument must be either a vocabulary size (int)\"","\"or the path to a pretrained model config file (str)\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py#L157-L216"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py","language":"python","identifier":"ALBertConfig.from_dict","parameters":"(cls, json_object)","argument_list":"","return_statement":"return config","docstring":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_summary":"Constructs a `BertConfig` from a Python dictionary of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","Python","dictionary","of","parameters","."],"function":"def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size_or_config_json_file=-1)\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config","function_tokens":["def","from_dict","(","cls",",","json_object",")",":","config","=","BertConfig","(","vocab_size_or_config_json_file","=","-","1",")","for","key",",","value","in","json_object",".","items","(",")",":","config",".","__dict__","[","key","]","=","value","return","config"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py#L219-L224"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py","language":"python","identifier":"ALBertConfig.from_json_file","parameters":"(cls, json_file)","argument_list":"","return_statement":"return cls.from_dict(json.loads(text))","docstring":"Constructs a `BertConfig` from a json file of parameters.","docstring_summary":"Constructs a `BertConfig` from a json file of parameters.","docstring_tokens":["Constructs","a","BertConfig","from","a","json","file","of","parameters","."],"function":"def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))","function_tokens":["def","from_json_file","(","cls",",","json_file",")",":","with","open","(","json_file",",","\"r\"",",","encoding","=","'utf-8'",")","as","reader",":","text","=","reader",".","read","(",")","return","cls",".","from_dict","(","json",".","loads","(","text",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py#L227-L231"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py","language":"python","identifier":"ALBertConfig.to_dict","parameters":"(self)","argument_list":"","return_statement":"return output","docstring":"Serializes this instance to a Python dictionary.","docstring_summary":"Serializes this instance to a Python dictionary.","docstring_tokens":["Serializes","this","instance","to","a","Python","dictionary","."],"function":"def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output","function_tokens":["def","to_dict","(","self",")",":","output","=","copy",".","deepcopy","(","self",".","__dict__",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py#L236-L239"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py","language":"python","identifier":"ALBertConfig.to_json_string","parameters":"(self)","argument_list":"","return_statement":"return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","docstring":"Serializes this instance to a JSON string.","docstring_summary":"Serializes this instance to a JSON string.","docstring_tokens":["Serializes","this","instance","to","a","JSON","string","."],"function":"def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","function_tokens":["def","to_json_string","(","self",")",":","return","json",".","dumps","(","self",".","to_dict","(",")",",","indent","=","2",",","sort_keys","=","True",")","+","\"\\n\""],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py#L241-L243"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py","language":"python","identifier":"PreTrainedBertModel.init_bert_weights","parameters":"(self, module)","argument_list":"","return_statement":"","docstring":"Initialize the weights.","docstring_summary":"Initialize the weights.","docstring_tokens":["Initialize","the","weights","."],"function":"def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.normal_(mean=0.0, std=self.config.initializer_range)\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()","function_tokens":["def","init_bert_weights","(","self",",","module",")",":","if","isinstance","(","module",",","(","nn",".","Linear",",","nn",".","Embedding",")",")",":","# Slightly different from the TF version which uses truncated_normal for initialization","# cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","elif","isinstance","(","module",",","BertLayerNorm",")",":","module",".","bias",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","if","isinstance","(","module",",","nn",".","Linear",")","and","module",".","bias","is","not","None",":","module",".","bias",".","data",".","zero_","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py#L615-L626"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py","language":"python","identifier":"PreTrainedBertModel.from_pretrained","parameters":"(cls, pretrained_model_name, state_dict=None, cache_dir=None, *inputs, **kwargs)","argument_list":"","return_statement":"return model","docstring":"Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.\n Download and cache the pre-trained model file if needed.\n\n Params:\n pretrained_model_name: either:\n - a str with the name of a pre-trained model to load selected in the list of:\n . `bert-base-uncased`\n . `bert-large-uncased`\n . `bert-base-cased`\n . `bert-base-multilingual`\n . `bert-base-chinese`\n - a path or url to a pretrained model archive containing:\n . `bert_config.json` a configuration file for the model\n . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance\n cache_dir: an optional path to a folder in which the pre-trained models will be cached.\n state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models\n *inputs, **kwargs: additional input for the specific Bert class\n (ex: num_labels for BertForSequenceClassification)","docstring_summary":"Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.\n Download and cache the pre-trained model file if needed.","docstring_tokens":["Instantiate","a","PreTrainedBertModel","from","a","pre","-","trained","model","file","or","a","pytorch","state","dict",".","Download","and","cache","the","pre","-","trained","model","file","if","needed","."],"function":"def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, *inputs, **kwargs):\n \"\"\"\n Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.\n Download and cache the pre-trained model file if needed.\n\n Params:\n pretrained_model_name: either:\n - a str with the name of a pre-trained model to load selected in the list of:\n . `bert-base-uncased`\n . `bert-large-uncased`\n . `bert-base-cased`\n . `bert-base-multilingual`\n . `bert-base-chinese`\n - a path or url to a pretrained model archive containing:\n . `bert_config.json` a configuration file for the model\n . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance\n cache_dir: an optional path to a folder in which the pre-trained models will be cached.\n state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models\n *inputs, **kwargs: additional input for the specific Bert class\n (ex: num_labels for BertForSequenceClassification)\n \"\"\"\n if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:\n archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]\n else:\n archive_file = pretrained_model_name\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)\n except FileNotFoundError:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name,\n ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),\n archive_file))\n return None\n if resolved_archive_file == archive_file:\n logger.info(\"loading archive file {}\".format(archive_file))\n else:\n logger.info(\"loading archive file {} from cache at {}\".format(\n archive_file, resolved_archive_file))\n tempdir = None\n if os.path.isdir(resolved_archive_file):\n serialization_dir = resolved_archive_file\n else:\n # Extract archive to temp dir\n tempdir = tempfile.mkdtemp()\n logger.info(\"extracting archive file {} to temp dir {}\".format(\n resolved_archive_file, tempdir))\n with tarfile.open(resolved_archive_file, 'r:gz') as archive:\n archive.extractall(tempdir)\n serialization_dir = tempdir\n # Load config\n config_file = os.path.join(serialization_dir, CONFIG_NAME)\n config = BertConfig.from_json_file(config_file)\n logger.info(\"Model config {}\".format(config))\n # Instantiate model.\n model = cls(config, *inputs, **kwargs)\n if state_dict is None:\n weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)\n state_dict = torch.load(weights_path)\n\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if 'gamma' in key:\n new_key = key.replace('gamma', 'weight')\n if 'beta' in key:\n new_key = key.replace('beta', 'bias')\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=''):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + '.')\n\n load(model, prefix='' if hasattr(model, 'bert') else 'bert.')\n if len(missing_keys) > 0:\n logger.info(\"Weights of {} not initialized from pretrained model: {}\".format(\n model.__class__.__name__, missing_keys))\n if len(unexpected_keys) > 0:\n logger.info(\"Weights from pretrained model not used in {}: {}\".format(\n model.__class__.__name__, unexpected_keys))\n if tempdir:\n # Clean up temp dir\n shutil.rmtree(tempdir)\n return model","function_tokens":["def","from_pretrained","(","cls",",","pretrained_model_name",",","state_dict","=","None",",","cache_dir","=","None",",","*","inputs",",","*","*","kwargs",")",":","if","pretrained_model_name","in","PRETRAINED_MODEL_ARCHIVE_MAP",":","archive_file","=","PRETRAINED_MODEL_ARCHIVE_MAP","[","pretrained_model_name","]","else",":","archive_file","=","pretrained_model_name","# redirect to the cache, if necessary","try",":","resolved_archive_file","=","cached_path","(","archive_file",",","cache_dir","=","cache_dir",")","except","FileNotFoundError",":","logger",".","error","(","\"Model name '{}' was not found in model name list ({}). \"","\"We assumed '{}' was a path or url but couldn't find any file \"","\"associated to this path or url.\"",".","format","(","pretrained_model_name",",","', '",".","join","(","PRETRAINED_MODEL_ARCHIVE_MAP",".","keys","(",")",")",",","archive_file",")",")","return","None","if","resolved_archive_file","==","archive_file",":","logger",".","info","(","\"loading archive file {}\"",".","format","(","archive_file",")",")","else",":","logger",".","info","(","\"loading archive file {} from cache at {}\"",".","format","(","archive_file",",","resolved_archive_file",")",")","tempdir","=","None","if","os",".","path",".","isdir","(","resolved_archive_file",")",":","serialization_dir","=","resolved_archive_file","else",":","# Extract archive to temp dir","tempdir","=","tempfile",".","mkdtemp","(",")","logger",".","info","(","\"extracting archive file {} to temp dir {}\"",".","format","(","resolved_archive_file",",","tempdir",")",")","with","tarfile",".","open","(","resolved_archive_file",",","'r:gz'",")","as","archive",":","archive",".","extractall","(","tempdir",")","serialization_dir","=","tempdir","# Load config","config_file","=","os",".","path",".","join","(","serialization_dir",",","CONFIG_NAME",")","config","=","BertConfig",".","from_json_file","(","config_file",")","logger",".","info","(","\"Model config {}\"",".","format","(","config",")",")","# Instantiate model.","model","=","cls","(","config",",","*","inputs",",","*","*","kwargs",")","if","state_dict","is","None",":","weights_path","=","os",".","path",".","join","(","serialization_dir",",","WEIGHTS_NAME",")","state_dict","=","torch",".","load","(","weights_path",")","old_keys","=","[","]","new_keys","=","[","]","for","key","in","state_dict",".","keys","(",")",":","new_key","=","None","if","'gamma'","in","key",":","new_key","=","key",".","replace","(","'gamma'",",","'weight'",")","if","'beta'","in","key",":","new_key","=","key",".","replace","(","'beta'",",","'bias'",")","if","new_key",":","old_keys",".","append","(","key",")","new_keys",".","append","(","new_key",")","for","old_key",",","new_key","in","zip","(","old_keys",",","new_keys",")",":","state_dict","[","new_key","]","=","state_dict",".","pop","(","old_key",")","missing_keys","=","[","]","unexpected_keys","=","[","]","error_msgs","=","[","]","# copy state_dict so _load_from_state_dict can modify it","metadata","=","getattr","(","state_dict",",","'_metadata'",",","None",")","state_dict","=","state_dict",".","copy","(",")","if","metadata","is","not","None",":","state_dict",".","_metadata","=","metadata","def","load","(","module",",","prefix","=","''",")",":","local_metadata","=","{","}","if","metadata","is","None","else","metadata",".","get","(","prefix","[",":","-","1","]",",","{","}",")","module",".","_load_from_state_dict","(","state_dict",",","prefix",",","local_metadata",",","True",",","missing_keys",",","unexpected_keys",",","error_msgs",")","for","name",",","child","in","module",".","_modules",".","items","(",")",":","if","child","is","not","None",":","load","(","child",",","prefix","+","name","+","'.'",")","load","(","model",",","prefix","=","''","if","hasattr","(","model",",","'bert'",")","else","'bert.'",")","if","len","(","missing_keys",")",">","0",":","logger",".","info","(","\"Weights of {} not initialized from pretrained model: {}\"",".","format","(","model",".","__class__",".","__name__",",","missing_keys",")",")","if","len","(","unexpected_keys",")",">","0",":","logger",".","info","(","\"Weights from pretrained model not used in {}: {}\"",".","format","(","model",".","__class__",".","__name__",",","unexpected_keys",")",")","if","tempdir",":","# Clean up temp dir","shutil",".","rmtree","(","tempdir",")","return","model"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/pytorch_modeling.py#L629-L733"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py","language":"python","identifier":"gelu","parameters":"(x)","argument_list":"","return_statement":"return x * 0.5 * (1.0 + torch.erf(x \/ math.sqrt(2.0)))","docstring":"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))","docstring_summary":"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))","docstring_tokens":["Implementation","of","the","gelu","activation","function",".","For","information",":","OpenAI","GPT","s","gelu","is","slightly","different","(","and","gives","slightly","different","results",")",":","0",".","5","*","x","*","(","1","+","torch",".","tanh","(","math",".","sqrt","(","2","\/","math",".","pi",")","*","(","x","+","0",".","044715","*","torch",".","pow","(","x","3","))))"],"function":"def gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x \/ math.sqrt(2.0)))","function_tokens":["def","gelu","(","x",")",":","return","x","*","0.5","*","(","1.0","+","torch",".","erf","(","x","\/","math",".","sqrt","(","2.0",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py#L32-L37"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py","language":"python","identifier":"AlbertConfig.__init__","parameters":"(self,\n vocab_size,\n embedding_size=128,\n hidden_size=4096,\n num_hidden_layers=12,\n num_hidden_groups=1,\n num_attention_heads=64,\n intermediate_size=16384,\n inner_group_num=1,\n down_scale_factor=1,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0,\n attention_probs_dropout_prob=0,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02)","argument_list":"","return_statement":"","docstring":"Constructs AlbertConfig.\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`.\n embedding_size: size of voc embeddings.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_hidden_groups: Number of group for the hidden layers, parameters in\n the same group are shared.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n inner_group_num: int, number of inner repetition of attention and ffn.\n down_scale_factor: float, the scale to apply\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `AlbertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.","docstring_summary":"Constructs AlbertConfig.\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`.\n embedding_size: size of voc embeddings.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_hidden_groups: Number of group for the hidden layers, parameters in\n the same group are shared.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n inner_group_num: int, number of inner repetition of attention and ffn.\n down_scale_factor: float, the scale to apply\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `AlbertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.","docstring_tokens":["Constructs","AlbertConfig",".","Args",":","vocab_size",":","Vocabulary","size","of","inputs_ids","in","AlbertModel",".","embedding_size",":","size","of","voc","embeddings",".","hidden_size",":","Size","of","the","encoder","layers","and","the","pooler","layer",".","num_hidden_layers",":","Number","of","hidden","layers","in","the","Transformer","encoder",".","num_hidden_groups",":","Number","of","group","for","the","hidden","layers","parameters","in","the","same","group","are","shared",".","num_attention_heads",":","Number","of","attention","heads","for","each","attention","layer","in","the","Transformer","encoder",".","intermediate_size",":","The","size","of","the","intermediate","(","i",".","e",".","feed","-","forward",")","layer","in","the","Transformer","encoder",".","inner_group_num",":","int","number","of","inner","repetition","of","attention","and","ffn",".","down_scale_factor",":","float","the","scale","to","apply","hidden_act",":","The","non","-","linear","activation","function","(","function","or","string",")","in","the","encoder","and","pooler",".","hidden_dropout_prob",":","The","dropout","probability","for","all","fully","connected","layers","in","the","embeddings","encoder","and","pooler",".","attention_probs_dropout_prob",":","The","dropout","ratio","for","the","attention","probabilities",".","max_position_embeddings",":","The","maximum","sequence","length","that","this","model","might","ever","be","used","with",".","Typically","set","this","to","something","large","just","in","case","(","e",".","g",".","512","or","1024","or","2048",")",".","type_vocab_size",":","The","vocabulary","size","of","the","token_type_ids","passed","into","AlbertModel",".","initializer_range",":","The","stdev","of","the","truncated_normal_initializer","for","initializing","all","weight","matrices","."],"function":"def __init__(self,\n vocab_size,\n embedding_size=128,\n hidden_size=4096,\n num_hidden_layers=12,\n num_hidden_groups=1,\n num_attention_heads=64,\n intermediate_size=16384,\n inner_group_num=1,\n down_scale_factor=1,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0,\n attention_probs_dropout_prob=0,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02):\n \"\"\"Constructs AlbertConfig.\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`.\n embedding_size: size of voc embeddings.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_hidden_groups: Number of group for the hidden layers, parameters in\n the same group are shared.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n inner_group_num: int, number of inner repetition of attention and ffn.\n down_scale_factor: float, the scale to apply\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `AlbertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n self.vocab_size = vocab_size\n self.embedding_size = embedding_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_hidden_groups = num_hidden_groups\n self.num_attention_heads = num_attention_heads\n self.inner_group_num = inner_group_num\n self.down_scale_factor = down_scale_factor\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range","function_tokens":["def","__init__","(","self",",","vocab_size",",","embedding_size","=","128",",","hidden_size","=","4096",",","num_hidden_layers","=","12",",","num_hidden_groups","=","1",",","num_attention_heads","=","64",",","intermediate_size","=","16384",",","inner_group_num","=","1",",","down_scale_factor","=","1",",","hidden_act","=","\"gelu\"",",","hidden_dropout_prob","=","0",",","attention_probs_dropout_prob","=","0",",","max_position_embeddings","=","512",",","type_vocab_size","=","2",",","initializer_range","=","0.02",")",":","self",".","vocab_size","=","vocab_size","self",".","embedding_size","=","embedding_size","self",".","hidden_size","=","hidden_size","self",".","num_hidden_layers","=","num_hidden_layers","self",".","num_hidden_groups","=","num_hidden_groups","self",".","num_attention_heads","=","num_attention_heads","self",".","inner_group_num","=","inner_group_num","self",".","down_scale_factor","=","down_scale_factor","self",".","hidden_act","=","hidden_act","self",".","intermediate_size","=","intermediate_size","self",".","hidden_dropout_prob","=","hidden_dropout_prob","self",".","attention_probs_dropout_prob","=","attention_probs_dropout_prob","self",".","max_position_embeddings","=","max_position_embeddings","self",".","type_vocab_size","=","type_vocab_size","self",".","initializer_range","=","initializer_range"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py#L56-L114"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py","language":"python","identifier":"AlbertConfig.from_dict","parameters":"(cls, json_object)","argument_list":"","return_statement":"return config","docstring":"Constructs a `AlbertConfig` from a Python dictionary of parameters.","docstring_summary":"Constructs a `AlbertConfig` from a Python dictionary of parameters.","docstring_tokens":["Constructs","a","AlbertConfig","from","a","Python","dictionary","of","parameters","."],"function":"def from_dict(cls, json_object):\n \"\"\"Constructs a `AlbertConfig` from a Python dictionary of parameters.\"\"\"\n config = AlbertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config","function_tokens":["def","from_dict","(","cls",",","json_object",")",":","config","=","AlbertConfig","(","vocab_size","=","None",")","for","(","key",",","value",")","in","six",".","iteritems","(","json_object",")",":","config",".","__dict__","[","key","]","=","value","return","config"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py#L117-L122"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py","language":"python","identifier":"AlbertConfig.from_json_file","parameters":"(cls, json_file)","argument_list":"","return_statement":"return cls.from_dict(json.loads(text))","docstring":"Constructs a `AlbertConfig` from a json file of parameters.","docstring_summary":"Constructs a `AlbertConfig` from a json file of parameters.","docstring_tokens":["Constructs","a","AlbertConfig","from","a","json","file","of","parameters","."],"function":"def from_json_file(cls, json_file):\n \"\"\"Constructs a `AlbertConfig` from a json file of parameters.\"\"\"\n with open(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))","function_tokens":["def","from_json_file","(","cls",",","json_file",")",":","with","open","(","json_file",",","\"r\"",")","as","reader",":","text","=","reader",".","read","(",")","return","cls",".","from_dict","(","json",".","loads","(","text",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py#L125-L129"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py","language":"python","identifier":"AlbertConfig.to_dict","parameters":"(self)","argument_list":"","return_statement":"return output","docstring":"Serializes this instance to a Python dictionary.","docstring_summary":"Serializes this instance to a Python dictionary.","docstring_tokens":["Serializes","this","instance","to","a","Python","dictionary","."],"function":"def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output","function_tokens":["def","to_dict","(","self",")",":","output","=","copy",".","deepcopy","(","self",".","__dict__",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py#L131-L134"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py","language":"python","identifier":"AlbertConfig.to_json_string","parameters":"(self)","argument_list":"","return_statement":"return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","docstring":"Serializes this instance to a JSON string.","docstring_summary":"Serializes this instance to a JSON string.","docstring_tokens":["Serializes","this","instance","to","a","JSON","string","."],"function":"def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","function_tokens":["def","to_json_string","(","self",")",":","return","json",".","dumps","(","self",".","to_dict","(",")",",","indent","=","2",",","sort_keys","=","True",")","+","\"\\n\""],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py#L136-L138"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py","language":"python","identifier":"AlbertModel.init_bert_weights","parameters":"(self, module)","argument_list":"","return_statement":"","docstring":"Initialize the weights.","docstring_summary":"Initialize the weights.","docstring_tokens":["Initialize","the","weights","."],"function":"def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.normal_(mean=0.0, std=self.config.initializer_range)\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()","function_tokens":["def","init_bert_weights","(","self",",","module",")",":","if","isinstance","(","module",",","(","nn",".","Linear",",","nn",".","Embedding",")",")",":","# Slightly different from the TF version which uses truncated_normal for initialization","# cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","elif","isinstance","(","module",",","BertLayerNorm",")",":","module",".","bias",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","if","isinstance","(","module",",","nn",".","Linear",")","and","module",".","bias","is","not","None",":","module",".","bias",".","data",".","zero_","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py#L362-L373"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py","language":"python","identifier":"AlbertForMRC.init_bert_weights","parameters":"(self, module)","argument_list":"","return_statement":"","docstring":"Initialize the weights.","docstring_summary":"Initialize the weights.","docstring_tokens":["Initialize","the","weights","."],"function":"def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.normal_(mean=0.0, std=self.config.initializer_range)\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()","function_tokens":["def","init_bert_weights","(","self",",","module",")",":","if","isinstance","(","module",",","(","nn",".","Linear",",","nn",".","Embedding",")",")",":","# Slightly different from the TF version which uses truncated_normal for initialization","# cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","elif","isinstance","(","module",",","BertLayerNorm",")",":","module",".","bias",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","if","isinstance","(","module",",","nn",".","Linear",")","and","module",".","bias","is","not","None",":","module",".","bias",".","data",".","zero_","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py#L434-L445"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py","language":"python","identifier":"AlbertForMultipleChoice.init_bert_weights","parameters":"(self, module)","argument_list":"","return_statement":"","docstring":"Initialize the weights.","docstring_summary":"Initialize the weights.","docstring_tokens":["Initialize","the","weights","."],"function":"def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.normal_(mean=0.0, std=self.config.initializer_range)\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()","function_tokens":["def","init_bert_weights","(","self",",","module",")",":","if","isinstance","(","module",",","(","nn",".","Linear",",","nn",".","Embedding",")",")",":","# Slightly different from the TF version which uses truncated_normal for initialization","# cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","elif","isinstance","(","module",",","BertLayerNorm",")",":","module",".","bias",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","if","isinstance","(","module",",","nn",".","Linear",")","and","module",".","bias","is","not","None",":","module",".","bias",".","data",".","zero_","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/google_albert_pytorch_modeling.py#L483-L494"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/run_c3.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, label_list, max_seq_length, tokenizer)","argument_list":"","return_statement":"return features","docstring":"Loads a data file into a list of `InputBatch`s.","docstring_summary":"Loads a data file into a list of `InputBatch`s.","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputBatch","s","."],"function":"def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n print(\"#examples\", len(examples))\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = [[]]\n for (ex_index, example) in enumerate(tqdm(examples)):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = tokenizer.tokenize(example.text_b)\n\n tokens_c = tokenizer.tokenize(example.text_c)\n\n _truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_seq_length - 4)\n tokens_b = tokens_c + [\"[SEP]\"] + tokens_b\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features[-1].append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n if len(features[-1]) == n_class:\n features.append([])\n\n if len(features[-1]) == 0:\n features = features[:-1]\n print('#features', len(features))\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","label_list",",","max_seq_length",",","tokenizer",")",":","print","(","\"#examples\"",",","len","(","examples",")",")","label_map","=","{","}","for","(","i",",","label",")","in","enumerate","(","label_list",")",":","label_map","[","label","]","=","i","features","=","[","[","]","]","for","(","ex_index",",","example",")","in","enumerate","(","tqdm","(","examples",")",")",":","tokens_a","=","tokenizer",".","tokenize","(","example",".","text_a",")","tokens_b","=","tokenizer",".","tokenize","(","example",".","text_b",")","tokens_c","=","tokenizer",".","tokenize","(","example",".","text_c",")","_truncate_seq_tuple","(","tokens_a",",","tokens_b",",","tokens_c",",","max_seq_length","-","4",")","tokens_b","=","tokens_c","+","[","\"[SEP]\"","]","+","tokens_b","tokens","=","[","]","segment_ids","=","[","]","tokens",".","append","(","\"[CLS]\"",")","segment_ids",".","append","(","0",")","for","token","in","tokens_a",":","tokens",".","append","(","token",")","segment_ids",".","append","(","0",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","0",")","if","tokens_b",":","for","token","in","tokens_b",":","tokens",".","append","(","token",")","segment_ids",".","append","(","1",")","tokens",".","append","(","\"[SEP]\"",")","segment_ids",".","append","(","1",")","input_ids","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","input_mask","=","[","1","]","*","len","(","input_ids",")","# Zero-pad up to the sequence length.","while","len","(","input_ids",")","<","max_seq_length",":","input_ids",".","append","(","0",")","input_mask",".","append","(","0",")","segment_ids",".","append","(","0",")","assert","len","(","input_ids",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_ids",")","==","max_seq_length","label_id","=","label_map","[","example",".","label","]","if","ex_index","<","5",":","logger",".","info","(","\"*** Example ***\"",")","logger",".","info","(","\"guid: %s\"","%","(","example",".","guid",")",")","logger",".","info","(","\"tokens: %s\"","%","\" \"",".","join","(","[","tokenization",".","printable_text","(","x",")","for","x","in","tokens","]",")",")","logger",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","logger",".","info","(","\"input_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_mask","]",")",")","logger",".","info","(","\"segment_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","segment_ids","]",")",")","logger",".","info","(","\"label: %s (id = %d)\"","%","(","example",".","label",",","label_id",")",")","features","[","-","1","]",".","append","(","InputFeatures","(","input_ids","=","input_ids",",","input_mask","=","input_mask",",","segment_ids","=","segment_ids",",","label_id","=","label_id",")",")","if","len","(","features","[","-","1","]",")","==","n_class",":","features",".","append","(","[","]",")","if","len","(","features","[","-","1","]",")","==","0",":","features","=","features","[",":","-","1","]","print","(","'#features'",",","len","(","features",")",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/run_c3.py#L175-L252"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/run_c3.py","language":"python","identifier":"_truncate_seq_pair","parameters":"(tokens_a, tokens_b, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence pair in place to the maximum length.","docstring_summary":"Truncates a sequence pair in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","pair","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()","function_tokens":["def","_truncate_seq_pair","(","tokens_a",",","tokens_b",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">","len","(","tokens_b",")",":","tokens_a",".","pop","(",")","else",":","tokens_b",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/run_c3.py#L255-L269"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/run_c3.py","language":"python","identifier":"_truncate_seq_tuple","parameters":"(tokens_a, tokens_b, tokens_c, max_length)","argument_list":"","return_statement":"","docstring":"Truncates a sequence tuple in place to the maximum length.","docstring_summary":"Truncates a sequence tuple in place to the maximum length.","docstring_tokens":["Truncates","a","sequence","tuple","in","place","to","the","maximum","length","."],"function":"def _truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_length):\n \"\"\"Truncates a sequence tuple in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b) + len(tokens_c)\n if total_length <= max_length:\n break\n if len(tokens_a) >= len(tokens_b) and len(tokens_a) >= len(tokens_c):\n tokens_a.pop()\n elif len(tokens_b) >= len(tokens_a) and len(tokens_b) >= len(tokens_c):\n tokens_b.pop()\n else:\n tokens_c.pop()","function_tokens":["def","_truncate_seq_tuple","(","tokens_a",",","tokens_b",",","tokens_c",",","max_length",")",":","# This is a simple heuristic which will always truncate the longer sequence","# one token at a time. This makes more sense than truncating an equal percent","# of tokens from each, since if one sequence is very short then each token","# that's truncated likely contains more information than a longer sequence.","while","True",":","total_length","=","len","(","tokens_a",")","+","len","(","tokens_b",")","+","len","(","tokens_c",")","if","total_length","<=","max_length",":","break","if","len","(","tokens_a",")",">=","len","(","tokens_b",")","and","len","(","tokens_a",")",">=","len","(","tokens_c",")",":","tokens_a",".","pop","(",")","elif","len","(","tokens_b",")",">=","len","(","tokens_a",")","and","len","(","tokens_b",")",">=","len","(","tokens_c",")",":","tokens_b",".","pop","(",")","else",":","tokens_c",".","pop","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/run_c3.py#L272-L288"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/run_c3.py","language":"python","identifier":"InputExample.__init__","parameters":"(self, guid, text_a, text_b=None, label=None, text_c=None)","argument_list":"","return_statement":"","docstring":"Constructs a InputExample.\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.","docstring_summary":"Constructs a InputExample.\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.","docstring_tokens":["Constructs","a","InputExample",".","Args",":","guid",":","Unique","id","for","the","example",".","text_a",":","string",".","The","untokenized","text","of","the","first","sequence",".","For","single","sequence","tasks","only","this","sequence","must","be","specified",".","text_b",":","(","Optional",")","string",".","The","untokenized","text","of","the","second","sequence",".","Only","must","be","specified","for","sequence","pair","tasks",".","label",":","(","Optional",")","string",".","The","label","of","the","example",".","This","should","be","specified","for","train","and","dev","examples","but","not","for","test","examples","."],"function":"def __init__(self, guid, text_a, text_b=None, label=None, text_c=None):\n \"\"\"Constructs a InputExample.\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.text_c = text_c\n self.label = label","function_tokens":["def","__init__","(","self",",","guid",",","text_a",",","text_b","=","None",",","label","=","None",",","text_c","=","None",")",":","self",".","guid","=","guid","self",".","text_a","=","text_a","self",".","text_b","=","text_b","self",".","text_c","=","text_c","self",".","label","=","label"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/run_c3.py#L53-L68"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/run_c3.py","language":"python","identifier":"DataProcessor.get_train_examples","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the train set.","docstring_summary":"Gets a collection of `InputExample`s for the train set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","train","set","."],"function":"def get_train_examples(self):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_train_examples","(","self",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/run_c3.py#L84-L86"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/run_c3.py","language":"python","identifier":"DataProcessor.get_dev_examples","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the dev set.","docstring_summary":"Gets a collection of `InputExample`s for the dev set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","dev","set","."],"function":"def get_dev_examples(self):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_dev_examples","(","self",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/run_c3.py#L88-L90"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/run_c3.py","language":"python","identifier":"DataProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Gets the list of labels for this data set.","docstring_summary":"Gets the list of labels for this data set.","docstring_tokens":["Gets","the","list","of","labels","for","this","data","set","."],"function":"def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_labels","(","self",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/run_c3.py#L92-L94"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/run_c3.py","language":"python","identifier":"DataProcessor._read_tsv","parameters":"(cls, input_file, quotechar=None)","argument_list":"","return_statement":"","docstring":"Reads a tab separated value file.","docstring_summary":"Reads a tab separated value file.","docstring_tokens":["Reads","a","tab","separated","value","file","."],"function":"def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines","function_tokens":["def","_read_tsv","(","cls",",","input_file",",","quotechar","=","None",")",":","with","open","(","input_file",",","\"r\"",")","as","f",":","reader","=","csv",".","reader","(","f",",","delimiter","=","\"\\t\"",",","quotechar","=","quotechar",")","lines","=","[","]","for","line","in","reader",":","lines",".","append","(","line",")","return","lines"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/run_c3.py#L97-L104"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/run_c3.py","language":"python","identifier":"c3Processor.get_train_examples","parameters":"(self)","argument_list":"","return_statement":"return self._create_examples(self.D[0], \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self.D[0], \"train\")","function_tokens":["def","get_train_examples","(","self",")",":","return","self",".","_create_examples","(","self",".","D","[","0","]",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/run_c3.py#L130-L132"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/run_c3.py","language":"python","identifier":"c3Processor.get_test_examples","parameters":"(self)","argument_list":"","return_statement":"return self._create_examples(self.D[2], \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self.D[2], \"test\")","function_tokens":["def","get_test_examples","(","self",")",":","return","self",".","_create_examples","(","self",".","D","[","2","]",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/run_c3.py#L134-L136"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/run_c3.py","language":"python","identifier":"c3Processor.get_dev_examples","parameters":"(self)","argument_list":"","return_statement":"return self._create_examples(self.D[1], \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self.D[1], \"dev\")","function_tokens":["def","get_dev_examples","(","self",")",":","return","self",".","_create_examples","(","self",".","D","[","1","]",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/run_c3.py#L138-L140"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/run_c3.py","language":"python","identifier":"c3Processor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return [\"0\", \"1\", \"2\", \"3\"]","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\", \"2\", \"3\"]","function_tokens":["def","get_labels","(","self",")",":","return","[","\"0\"",",","\"1\"",",","\"2\"",",","\"3\"","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/run_c3.py#L142-L144"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/run_c3.py","language":"python","identifier":"c3Processor._create_examples","parameters":"(self, data, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples(self, data, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n cache_dir = os.path.join(self.data_dir, set_type + '_examples.pkl')\n if os.path.exists(cache_dir):\n examples = pickle.load(open(cache_dir, 'rb'))\n else:\n examples = []\n for (i, d) in enumerate(data):\n answer = -1\n # \u8fd9\u91ccdata[i]\u67096\u4e2a\u5143\u7d20\uff0c0\u662fcontext\uff0c1\u662f\u95ee\u9898\uff0c2~5\u662fchoice\uff0c6\u662f\u7b54\u6848\n for k in range(4):\n if data[i][2 + k] == data[i][6]:\n answer = str(k)\n\n label = tokenization.convert_to_unicode(answer)\n\n for k in range(4):\n guid = \"%s-%s-%s\" % (set_type, i, k)\n text_a = tokenization.convert_to_unicode(data[i][0])\n text_b = tokenization.convert_to_unicode(data[i][k + 2])\n text_c = tokenization.convert_to_unicode(data[i][1])\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, text_c=text_c))\n\n with open(cache_dir, 'wb') as w:\n pickle.dump(examples, w)\n\n return examples","function_tokens":["def","_create_examples","(","self",",","data",",","set_type",")",":","cache_dir","=","os",".","path",".","join","(","self",".","data_dir",",","set_type","+","'_examples.pkl'",")","if","os",".","path",".","exists","(","cache_dir",")",":","examples","=","pickle",".","load","(","open","(","cache_dir",",","'rb'",")",")","else",":","examples","=","[","]","for","(","i",",","d",")","in","enumerate","(","data",")",":","answer","=","-","1","# \u8fd9\u91ccdata[i]\u67096\u4e2a\u5143\u7d20\uff0c0\u662fcontext\uff0c1\u662f\u95ee\u9898\uff0c2~5\u662fchoice\uff0c6\u662f\u7b54\u6848","for","k","in","range","(","4",")",":","if","data","[","i","]","[","2","+","k","]","==","data","[","i","]","[","6","]",":","answer","=","str","(","k",")","label","=","tokenization",".","convert_to_unicode","(","answer",")","for","k","in","range","(","4",")",":","guid","=","\"%s-%s-%s\"","%","(","set_type",",","i",",","k",")","text_a","=","tokenization",".","convert_to_unicode","(","data","[","i","]","[","0","]",")","text_b","=","tokenization",".","convert_to_unicode","(","data","[","i","]","[","k","+","2","]",")","text_c","=","tokenization",".","convert_to_unicode","(","data","[","i","]","[","1","]",")","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",",","text_c","=","text_c",")",")","with","open","(","cache_dir",",","'wb'",")","as","w",":","pickle",".","dump","(","examples",",","w",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/run_c3.py#L146-L172"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_output.py","language":"python","identifier":"write_predictions_topk","parameters":"(config, all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file)","argument_list":"","return_statement":"","docstring":"Write final predictions to the json file and log-odds of null if needed.","docstring_summary":"Write final predictions to the json file and log-odds of null if needed.","docstring_tokens":["Write","final","predictions","to","the","json","file","and","log","-","odds","of","null","if","needed","."],"function":"def write_predictions_topk(config, all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n print(\"Writing predictions to: %s\" % (output_prediction_file))\n print(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature['example_index']].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(tqdm(all_examples)):\n features = example_index_to_features[example_index]\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature['unique_id']]\n for i in range(config.start_n_top):\n for j in range(config.end_n_top):\n start_logit = result.start_top_logits[i]\n start_index = result.start_top_index[i]\n\n j_index = i * config.end_n_top + j\n\n end_logit = result.end_top_logits[j_index]\n end_index = result.end_top_index[j_index]\n\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature['tokens']):\n continue\n if end_index >= len(feature['tokens']):\n continue\n if not feature['token_is_max_context'].get(str(start_index), False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=start_logit,\n end_logit=end_logit))\n\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n # ipdb.set_trace()\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature['tokens'][pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature['token_to_orig_map'][str(pred.start_index)]\n orig_doc_end = feature['token_to_orig_map'][str(pred.end_index)]\n orig_tokens = example['ori_doc_tokens'][orig_doc_start:(orig_doc_end + 1)]\n tok_text = \"\".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \"\".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(_NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n # ipdb.set_trace()\n\n total_scores = []\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n\n probs = _compute_softmax(total_scores)\n\n # ipdb.set_trace()\n\n nbest_json = []\n # ipdb.set_trace()\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = float(probs[i])\n output[\"start_logit\"] = float(entry.start_logit)\n output[\"end_logit\"] = float(entry.end_logit)\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n # ipdb.set_trace()\n\n all_predictions[example['qid']] = nbest_json[0][\"text\"]\n all_nbest_json[example['qid']] = nbest_json\n\n with open(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4, ensure_ascii=False) + \"\\n\")\n\n with open(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4, ensure_ascii=False) + \"\\n\")","function_tokens":["def","write_predictions_topk","(","config",",","all_examples",",","all_features",",","all_results",",","n_best_size",",","max_answer_length",",","do_lower_case",",","output_prediction_file",",","output_nbest_file",")",":","print","(","\"Writing predictions to: %s\"","%","(","output_prediction_file",")",")","print","(","\"Writing nbest to: %s\"","%","(","output_nbest_file",")",")","example_index_to_features","=","collections",".","defaultdict","(","list",")","for","feature","in","all_features",":","example_index_to_features","[","feature","[","'example_index'","]","]",".","append","(","feature",")","unique_id_to_result","=","{","}","for","result","in","all_results",":","unique_id_to_result","[","result",".","unique_id","]","=","result","_PrelimPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"PrelimPrediction\"",",","[","\"feature_index\"",",","\"start_index\"",",","\"end_index\"",",","\"start_logit\"",",","\"end_logit\"","]",")","all_predictions","=","collections",".","OrderedDict","(",")","all_nbest_json","=","collections",".","OrderedDict","(",")","for","(","example_index",",","example",")","in","enumerate","(","tqdm","(","all_examples",")",")",":","features","=","example_index_to_features","[","example_index","]","prelim_predictions","=","[","]","# keep track of the minimum score of null start+end of position 0","for","(","feature_index",",","feature",")","in","enumerate","(","features",")",":","result","=","unique_id_to_result","[","feature","[","'unique_id'","]","]","for","i","in","range","(","config",".","start_n_top",")",":","for","j","in","range","(","config",".","end_n_top",")",":","start_logit","=","result",".","start_top_logits","[","i","]","start_index","=","result",".","start_top_index","[","i","]","j_index","=","i","*","config",".","end_n_top","+","j","end_logit","=","result",".","end_top_logits","[","j_index","]","end_index","=","result",".","end_top_index","[","j_index","]","# We could hypothetically create invalid predictions, e.g., predict","# that the start of the span is in the question. We throw out all","# invalid predictions.","if","start_index",">=","len","(","feature","[","'tokens'","]",")",":","continue","if","end_index",">=","len","(","feature","[","'tokens'","]",")",":","continue","if","not","feature","[","'token_is_max_context'","]",".","get","(","str","(","start_index",")",",","False",")",":","continue","if","end_index","<","start_index",":","continue","length","=","end_index","-","start_index","+","1","if","length",">","max_answer_length",":","continue","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","feature_index",",","start_index","=","start_index",",","end_index","=","end_index",",","start_logit","=","start_logit",",","end_logit","=","end_logit",")",")","prelim_predictions","=","sorted","(","prelim_predictions",",","key","=","lambda","x",":","(","x",".","start_logit","+","x",".","end_logit",")",",","reverse","=","True",")","_NbestPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"NbestPrediction\"",",","[","\"text\"",",","\"start_logit\"",",","\"end_logit\"","]",")","seen_predictions","=","{","}","nbest","=","[","]","# ipdb.set_trace()","for","pred","in","prelim_predictions",":","if","len","(","nbest",")",">=","n_best_size",":","break","feature","=","features","[","pred",".","feature_index","]","if","pred",".","start_index",">","0",":","# this is a non-null prediction","tok_tokens","=","feature","[","'tokens'","]","[","pred",".","start_index",":","(","pred",".","end_index","+","1",")","]","orig_doc_start","=","feature","[","'token_to_orig_map'","]","[","str","(","pred",".","start_index",")","]","orig_doc_end","=","feature","[","'token_to_orig_map'","]","[","str","(","pred",".","end_index",")","]","orig_tokens","=","example","[","'ori_doc_tokens'","]","[","orig_doc_start",":","(","orig_doc_end","+","1",")","]","tok_text","=","\"\"",".","join","(","tok_tokens",")","# De-tokenize WordPieces that have been split off.","tok_text","=","tok_text",".","replace","(","\" ##\"",",","\"\"",")","tok_text","=","tok_text",".","replace","(","\"##\"",",","\"\"",")","# Clean whitespace","tok_text","=","tok_text",".","strip","(",")","tok_text","=","\" \"",".","join","(","tok_text",".","split","(",")",")","orig_text","=","\"\"",".","join","(","orig_tokens",")","final_text","=","get_final_text","(","tok_text",",","orig_text",",","do_lower_case",")","if","final_text","in","seen_predictions",":","continue","seen_predictions","[","final_text","]","=","True","else",":","final_text","=","\"\"","seen_predictions","[","final_text","]","=","True","nbest",".","append","(","_NbestPrediction","(","text","=","final_text",",","start_logit","=","pred",".","start_logit",",","end_logit","=","pred",".","end_logit",")",")","# In very rare edge cases we could have no valid predictions. So we","# just create a nonce prediction in this case to avoid failure.","if","not","nbest",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"empty\"",",","start_logit","=","0.0",",","end_logit","=","0.0",")",")","assert","len","(","nbest",")",">=","1","# ipdb.set_trace()","total_scores","=","[","]","for","entry","in","nbest",":","total_scores",".","append","(","entry",".","start_logit","+","entry",".","end_logit",")","probs","=","_compute_softmax","(","total_scores",")","# ipdb.set_trace()","nbest_json","=","[","]","# ipdb.set_trace()","for","(","i",",","entry",")","in","enumerate","(","nbest",")",":","output","=","collections",".","OrderedDict","(",")","output","[","\"text\"","]","=","entry",".","text","output","[","\"probability\"","]","=","float","(","probs","[","i","]",")","output","[","\"start_logit\"","]","=","float","(","entry",".","start_logit",")","output","[","\"end_logit\"","]","=","float","(","entry",".","end_logit",")","nbest_json",".","append","(","output",")","assert","len","(","nbest_json",")",">=","1","# ipdb.set_trace()","all_predictions","[","example","[","'qid'","]","]","=","nbest_json","[","0","]","[","\"text\"","]","all_nbest_json","[","example","[","'qid'","]","]","=","nbest_json","with","open","(","output_prediction_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_predictions",",","indent","=","4",",","ensure_ascii","=","False",")","+","\"\\n\"",")","with","open","(","output_nbest_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_nbest_json",",","indent","=","4",",","ensure_ascii","=","False",")","+","\"\\n\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_output.py#L10-L154"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_output.py","language":"python","identifier":"write_predictions","parameters":"(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, version_2_with_negative=False, null_score_diff_threshold=0.)","argument_list":"","return_statement":"","docstring":"Write final predictions to the json file and log-odds of null if needed.","docstring_summary":"Write final predictions to the json file and log-odds of null if needed.","docstring_tokens":["Write","final","predictions","to","the","json","file","and","log","-","odds","of","null","if","needed","."],"function":"def write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, version_2_with_negative=False, null_score_diff_threshold=0.):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n print(\"Writing predictions to: %s\" % (output_prediction_file))\n print(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature['example_index']].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(tqdm(all_examples)):\n features = example_index_to_features[example_index]\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min null score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature['unique_id']]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature['tokens']):\n continue\n if end_index >= len(feature['tokens']):\n continue\n if str(start_index) not in feature['token_to_orig_map'] and \\\n start_index not in feature['token_to_orig_map']:\n continue\n if str(end_index) not in feature['token_to_orig_map'] and \\\n end_index not in feature['token_to_orig_map']:\n continue\n if not feature['token_is_max_context'].get(str(start_index), False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n if version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature['tokens'][pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature['token_to_orig_map'][str(pred.start_index)]\n orig_doc_end = feature['token_to_orig_map'][str(pred.end_index)]\n orig_tokens = example['ori_doc_tokens'][orig_doc_start:(orig_doc_end + 1)]\n tok_text = \"\".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \"\".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n # if we didn't include the empty option in the n-best, include it\n if version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\",\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n\n # In very rare edge cases we could only have single null prediction.\n # So we just create a nonce prediction in this case to avoid failure.\n if len(nbest) == 1:\n nbest.insert(0, _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(_NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = float(probs[i])\n output[\"start_logit\"] = float(entry.start_logit)\n output[\"end_logit\"] = float(entry.end_logit)\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not version_2_with_negative:\n all_predictions[example['qid']] = nbest_json[0][\"text\"]\n all_nbest_json[example['qid']] = nbest_json\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)\n scores_diff_json[example['qid']] = score_diff\n if score_diff > null_score_diff_threshold:\n all_predictions[example['qid']] = \"\"\n else:\n all_predictions[example['qid']] = best_non_null_entry.text\n all_nbest_json[example['qid']] = nbest_json\n\n with open(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4, ensure_ascii=False) + \"\\n\")\n\n with open(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4, ensure_ascii=False) + \"\\n\")","function_tokens":["def","write_predictions","(","all_examples",",","all_features",",","all_results",",","n_best_size",",","max_answer_length",",","do_lower_case",",","output_prediction_file",",","output_nbest_file",",","version_2_with_negative","=","False",",","null_score_diff_threshold","=","0.",")",":","print","(","\"Writing predictions to: %s\"","%","(","output_prediction_file",")",")","print","(","\"Writing nbest to: %s\"","%","(","output_nbest_file",")",")","example_index_to_features","=","collections",".","defaultdict","(","list",")","for","feature","in","all_features",":","example_index_to_features","[","feature","[","'example_index'","]","]",".","append","(","feature",")","unique_id_to_result","=","{","}","for","result","in","all_results",":","unique_id_to_result","[","result",".","unique_id","]","=","result","_PrelimPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"PrelimPrediction\"",",","[","\"feature_index\"",",","\"start_index\"",",","\"end_index\"",",","\"start_logit\"",",","\"end_logit\"","]",")","all_predictions","=","collections",".","OrderedDict","(",")","all_nbest_json","=","collections",".","OrderedDict","(",")","scores_diff_json","=","collections",".","OrderedDict","(",")","for","(","example_index",",","example",")","in","enumerate","(","tqdm","(","all_examples",")",")",":","features","=","example_index_to_features","[","example_index","]","prelim_predictions","=","[","]","# keep track of the minimum score of null start+end of position 0","score_null","=","1000000","# large and positive","min_null_feature_index","=","0","# the paragraph slice with min null score","null_start_logit","=","0","# the start logit at the slice with min null score","null_end_logit","=","0","# the end logit at the slice with min null score","for","(","feature_index",",","feature",")","in","enumerate","(","features",")",":","result","=","unique_id_to_result","[","feature","[","'unique_id'","]","]","start_indexes","=","_get_best_indexes","(","result",".","start_logits",",","n_best_size",")","end_indexes","=","_get_best_indexes","(","result",".","end_logits",",","n_best_size",")","# if we could have irrelevant answers, get the min score of irrelevant","if","version_2_with_negative",":","feature_null_score","=","result",".","start_logits","[","0","]","+","result",".","end_logits","[","0","]","if","feature_null_score","<","score_null",":","score_null","=","feature_null_score","min_null_feature_index","=","feature_index","null_start_logit","=","result",".","start_logits","[","0","]","null_end_logit","=","result",".","end_logits","[","0","]","for","start_index","in","start_indexes",":","for","end_index","in","end_indexes",":","# We could hypothetically create invalid predictions, e.g., predict","# that the start of the span is in the question. We throw out all","# invalid predictions.","if","start_index",">=","len","(","feature","[","'tokens'","]",")",":","continue","if","end_index",">=","len","(","feature","[","'tokens'","]",")",":","continue","if","str","(","start_index",")","not","in","feature","[","'token_to_orig_map'","]","and","start_index","not","in","feature","[","'token_to_orig_map'","]",":","continue","if","str","(","end_index",")","not","in","feature","[","'token_to_orig_map'","]","and","end_index","not","in","feature","[","'token_to_orig_map'","]",":","continue","if","not","feature","[","'token_is_max_context'","]",".","get","(","str","(","start_index",")",",","False",")",":","continue","if","end_index","<","start_index",":","continue","length","=","end_index","-","start_index","+","1","if","length",">","max_answer_length",":","continue","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","feature_index",",","start_index","=","start_index",",","end_index","=","end_index",",","start_logit","=","result",".","start_logits","[","start_index","]",",","end_logit","=","result",".","end_logits","[","end_index","]",")",")","if","version_2_with_negative",":","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","min_null_feature_index",",","start_index","=","0",",","end_index","=","0",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","prelim_predictions","=","sorted","(","prelim_predictions",",","key","=","lambda","x",":","(","x",".","start_logit","+","x",".","end_logit",")",",","reverse","=","True",")","_NbestPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"NbestPrediction\"",",","[","\"text\"",",","\"start_logit\"",",","\"end_logit\"","]",")","seen_predictions","=","{","}","nbest","=","[","]","for","pred","in","prelim_predictions",":","if","len","(","nbest",")",">=","n_best_size",":","break","feature","=","features","[","pred",".","feature_index","]","if","pred",".","start_index",">","0",":","# this is a non-null prediction","tok_tokens","=","feature","[","'tokens'","]","[","pred",".","start_index",":","(","pred",".","end_index","+","1",")","]","orig_doc_start","=","feature","[","'token_to_orig_map'","]","[","str","(","pred",".","start_index",")","]","orig_doc_end","=","feature","[","'token_to_orig_map'","]","[","str","(","pred",".","end_index",")","]","orig_tokens","=","example","[","'ori_doc_tokens'","]","[","orig_doc_start",":","(","orig_doc_end","+","1",")","]","tok_text","=","\"\"",".","join","(","tok_tokens",")","# De-tokenize WordPieces that have been split off.","tok_text","=","tok_text",".","replace","(","\" ##\"",",","\"\"",")","tok_text","=","tok_text",".","replace","(","\"##\"",",","\"\"",")","# Clean whitespace","tok_text","=","tok_text",".","strip","(",")","tok_text","=","\" \"",".","join","(","tok_text",".","split","(",")",")","orig_text","=","\"\"",".","join","(","orig_tokens",")","final_text","=","get_final_text","(","tok_text",",","orig_text",",","do_lower_case",")","if","final_text","in","seen_predictions",":","continue","seen_predictions","[","final_text","]","=","True","else",":","final_text","=","\"\"","seen_predictions","[","final_text","]","=","True","nbest",".","append","(","_NbestPrediction","(","text","=","final_text",",","start_logit","=","pred",".","start_logit",",","end_logit","=","pred",".","end_logit",")",")","# if we didn't include the empty option in the n-best, include it","if","version_2_with_negative",":","if","\"\"","not","in","seen_predictions",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"\"",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","# In very rare edge cases we could only have single null prediction.","# So we just create a nonce prediction in this case to avoid failure.","if","len","(","nbest",")","==","1",":","nbest",".","insert","(","0",",","_NbestPrediction","(","text","=","\"empty\"",",","start_logit","=","0.0",",","end_logit","=","0.0",")",")","# In very rare edge cases we could have no valid predictions. So we","# just create a nonce prediction in this case to avoid failure.","if","not","nbest",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"empty\"",",","start_logit","=","0.0",",","end_logit","=","0.0",")",")","assert","len","(","nbest",")",">=","1","total_scores","=","[","]","best_non_null_entry","=","None","for","entry","in","nbest",":","total_scores",".","append","(","entry",".","start_logit","+","entry",".","end_logit",")","if","not","best_non_null_entry",":","if","entry",".","text",":","best_non_null_entry","=","entry","probs","=","_compute_softmax","(","total_scores",")","nbest_json","=","[","]","for","(","i",",","entry",")","in","enumerate","(","nbest",")",":","output","=","collections",".","OrderedDict","(",")","output","[","\"text\"","]","=","entry",".","text","output","[","\"probability\"","]","=","float","(","probs","[","i","]",")","output","[","\"start_logit\"","]","=","float","(","entry",".","start_logit",")","output","[","\"end_logit\"","]","=","float","(","entry",".","end_logit",")","nbest_json",".","append","(","output",")","assert","len","(","nbest_json",")",">=","1","if","not","version_2_with_negative",":","all_predictions","[","example","[","'qid'","]","]","=","nbest_json","[","0","]","[","\"text\"","]","all_nbest_json","[","example","[","'qid'","]","]","=","nbest_json","else",":","# predict \"\" iff the null score - the score of best non-null > threshold","score_diff","=","score_null","-","best_non_null_entry",".","start_logit","-","(","best_non_null_entry",".","end_logit",")","scores_diff_json","[","example","[","'qid'","]","]","=","score_diff","if","score_diff",">","null_score_diff_threshold",":","all_predictions","[","example","[","'qid'","]","]","=","\"\"","else",":","all_predictions","[","example","[","'qid'","]","]","=","best_non_null_entry",".","text","all_nbest_json","[","example","[","'qid'","]","]","=","nbest_json","with","open","(","output_prediction_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_predictions",",","indent","=","4",",","ensure_ascii","=","False",")","+","\"\\n\"",")","with","open","(","output_nbest_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_nbest_json",",","indent","=","4",",","ensure_ascii","=","False",")","+","\"\\n\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_output.py#L157-L340"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_output.py","language":"python","identifier":"get_final_text","parameters":"(pred_text, orig_text, do_lower_case, verbose_logging=False)","argument_list":"","return_statement":"return output_text","docstring":"Project the tokenized prediction back to the original text.","docstring_summary":"Project the tokenized prediction back to the original text.","docstring_tokens":["Project","the","tokenized","prediction","back","to","the","original","text","."],"function":"def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping\/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heuristic between\n # `pred_text` and `orig_text` to get a character-to-character alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \"\".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if verbose_logging:\n print(\"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if verbose_logging:\n print(\"Length not equal after stripping spaces: '%s' vs '%s'\" % (orig_ns_text, tok_ns_text))\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in tok_ns_to_s_map.items():\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if verbose_logging:\n print(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if verbose_logging:\n print(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text","function_tokens":["def","get_final_text","(","pred_text",",","orig_text",",","do_lower_case",",","verbose_logging","=","False",")",":","# When we created the data, we kept track of the alignment between original","# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So","# now `orig_text` contains the span of our original text corresponding to the","# span that we predicted.","#","# However, `orig_text` may contain extra characters that we don't want in","# our prediction.","#","# For example, let's say:","# pred_text = steve smith","# orig_text = Steve Smith's","#","# We don't want to return `orig_text` because it contains the extra \"'s\".","#","# We don't want to return `pred_text` because it's already been normalized","# (the SQuAD eval script also does punctuation stripping\/lower casing but","# our tokenizer does additional normalization like stripping accent","# characters).","#","# What we really want to return is \"Steve Smith\".","#","# Therefore, we have to apply a semi-complicated alignment heuristic between","# `pred_text` and `orig_text` to get a character-to-character alignment. This","# can fail in certain cases in which case we just return `orig_text`.","def","_strip_spaces","(","text",")",":","ns_chars","=","[","]","ns_to_s_map","=","collections",".","OrderedDict","(",")","for","(","i",",","c",")","in","enumerate","(","text",")",":","if","c","==","\" \"",":","continue","ns_to_s_map","[","len","(","ns_chars",")","]","=","i","ns_chars",".","append","(","c",")","ns_text","=","\"\"",".","join","(","ns_chars",")","return","(","ns_text",",","ns_to_s_map",")","# We first tokenize `orig_text`, strip whitespace from the result","# and `pred_text`, and check if they are the same length. If they are","# NOT the same length, the heuristic has failed. If they are the same","# length, we assume the characters are one-to-one aligned.","tokenizer","=","BasicTokenizer","(","do_lower_case","=","do_lower_case",")","tok_text","=","\"\"",".","join","(","tokenizer",".","tokenize","(","orig_text",")",")","start_position","=","tok_text",".","find","(","pred_text",")","if","start_position","==","-","1",":","if","verbose_logging",":","print","(","\"Unable to find text: '%s' in '%s'\"","%","(","pred_text",",","orig_text",")",")","return","orig_text","end_position","=","start_position","+","len","(","pred_text",")","-","1","(","orig_ns_text",",","orig_ns_to_s_map",")","=","_strip_spaces","(","orig_text",")","(","tok_ns_text",",","tok_ns_to_s_map",")","=","_strip_spaces","(","tok_text",")","if","len","(","orig_ns_text",")","!=","len","(","tok_ns_text",")",":","if","verbose_logging",":","print","(","\"Length not equal after stripping spaces: '%s' vs '%s'\"","%","(","orig_ns_text",",","tok_ns_text",")",")","return","orig_text","# We then project the characters in `pred_text` back to `orig_text` using","# the character-to-character alignment.","tok_s_to_ns_map","=","{","}","for","(","i",",","tok_index",")","in","tok_ns_to_s_map",".","items","(",")",":","tok_s_to_ns_map","[","tok_index","]","=","i","orig_start_position","=","None","if","start_position","in","tok_s_to_ns_map",":","ns_start_position","=","tok_s_to_ns_map","[","start_position","]","if","ns_start_position","in","orig_ns_to_s_map",":","orig_start_position","=","orig_ns_to_s_map","[","ns_start_position","]","if","orig_start_position","is","None",":","if","verbose_logging",":","print","(","\"Couldn't map start position\"",")","return","orig_text","orig_end_position","=","None","if","end_position","in","tok_s_to_ns_map",":","ns_end_position","=","tok_s_to_ns_map","[","end_position","]","if","ns_end_position","in","orig_ns_to_s_map",":","orig_end_position","=","orig_ns_to_s_map","[","ns_end_position","]","if","orig_end_position","is","None",":","if","verbose_logging",":","print","(","\"Couldn't map end position\"",")","return","orig_text","output_text","=","orig_text","[","orig_start_position",":","(","orig_end_position","+","1",")","]","return","output_text"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_output.py#L343-L434"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_output.py","language":"python","identifier":"_get_best_indexes","parameters":"(logits, n_best_size)","argument_list":"","return_statement":"return best_indexes","docstring":"Get the n-best logits from a list.","docstring_summary":"Get the n-best logits from a list.","docstring_tokens":["Get","the","n","-","best","logits","from","a","list","."],"function":"def _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes","function_tokens":["def","_get_best_indexes","(","logits",",","n_best_size",")",":","index_and_score","=","sorted","(","enumerate","(","logits",")",",","key","=","lambda","x",":","x","[","1","]",",","reverse","=","True",")","best_indexes","=","[","]","for","i","in","range","(","len","(","index_and_score",")",")",":","if","i",">=","n_best_size",":","break","best_indexes",".","append","(","index_and_score","[","i","]","[","0","]",")","return","best_indexes"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_output.py#L437-L446"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_output.py","language":"python","identifier":"_compute_softmax","parameters":"(scores)","argument_list":"","return_statement":"return probs","docstring":"Compute softmax probability over raw logits.","docstring_summary":"Compute softmax probability over raw logits.","docstring_tokens":["Compute","softmax","probability","over","raw","logits","."],"function":"def _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score \/ total_sum)\n return probs","function_tokens":["def","_compute_softmax","(","scores",")",":","if","not","scores",":","return","[","]","max_score","=","None","for","score","in","scores",":","if","max_score","is","None","or","score",">","max_score",":","max_score","=","score","exp_scores","=","[","]","total_sum","=","0.0","for","score","in","scores",":","x","=","math",".","exp","(","score","-","max_score",")","exp_scores",".","append","(","x",")","total_sum","+=","x","probs","=","[","]","for","score","in","exp_scores",":","probs",".","append","(","score","\/","total_sum",")","return","probs"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_output.py#L449-L469"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_output.py","language":"python","identifier":"write_predictions_topk","parameters":"(FLAGS, all_examples, all_features, all_results, n_best_size,\n max_answer_length, output_prediction_file, output_nbest_file)","argument_list":"","return_statement":"","docstring":"Write final predictions to the json file and log-odds of null if needed.","docstring_summary":"Write final predictions to the json file and log-odds of null if needed.","docstring_tokens":["Write","final","predictions","to","the","json","file","and","log","-","odds","of","null","if","needed","."],"function":"def write_predictions_topk(FLAGS, all_examples, all_features, all_results, n_best_size,\n max_answer_length, output_prediction_file, output_nbest_file):\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\",\n \"start_log_prob\", \"end_log_prob\"])\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_log_prob\", \"end_log_prob\"])\n\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n print(\"Writing predictions to: %s\" % (output_prediction_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature['example_index']].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature['unique_id']]\n\n cur_null_score = result.cls_logits\n\n # if we could have irrelevant answers, get the min score of irrelevant\n score_null = min(score_null, cur_null_score)\n\n for i in range(FLAGS.start_n_top):\n for j in range(FLAGS.end_n_top):\n start_log_prob = result.start_top_log_probs[i]\n start_index = result.start_top_index[i]\n\n j_index = i * FLAGS.end_n_top + j\n\n end_log_prob = result.end_top_log_probs[j_index]\n end_index = result.end_top_index[j_index]\n\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= feature['paragraph_len'] - 1:\n continue\n if end_index >= feature['paragraph_len'] - 1:\n continue\n\n if not feature['token_is_max_context'].get(start_index, False) and \\\n not feature['token_is_max_context'].get(str(start_index), False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_log_prob=start_log_prob,\n end_log_prob=end_log_prob))\n\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_log_prob + x.end_log_prob),\n reverse=True)\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n\n tok_start_to_orig_index = feature['tok_start_to_orig_index']\n tok_end_to_orig_index = feature['tok_end_to_orig_index']\n start_orig_pos = tok_start_to_orig_index[pred.start_index]\n end_orig_pos = tok_end_to_orig_index[pred.end_index]\n\n paragraph_text = example['paragraph_text']\n final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()\n\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_log_prob=pred.start_log_prob,\n end_log_prob=pred.end_log_prob))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"\", start_log_prob=-1e6,\n end_log_prob=-1e6))\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_log_prob + entry.end_log_prob)\n if not best_non_null_entry:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_log_prob\"] = entry.start_log_prob\n output[\"end_log_prob\"] = entry.end_log_prob\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n assert best_non_null_entry is not None\n\n score_diff = score_null\n scores_diff_json[example['qas_id']] = score_diff\n # note(zhiliny): always predict best_non_null_entry\n # and the evaluation script will search for the best threshold\n all_predictions[example['qas_id']] = best_non_null_entry.text\n\n all_nbest_json[example['qas_id']] = nbest_json\n\n with open(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4, ensure_ascii=False) + \"\\n\")\n\n with open(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4, ensure_ascii=False) + \"\\n\")","function_tokens":["def","write_predictions_topk","(","FLAGS",",","all_examples",",","all_features",",","all_results",",","n_best_size",",","max_answer_length",",","output_prediction_file",",","output_nbest_file",")",":","_PrelimPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"PrelimPrediction\"",",","[","\"feature_index\"",",","\"start_index\"",",","\"end_index\"",",","\"start_log_prob\"",",","\"end_log_prob\"","]",")","_NbestPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"NbestPrediction\"",",","[","\"text\"",",","\"start_log_prob\"",",","\"end_log_prob\"","]",")","print","(","\"Writing predictions to: %s\"","%","(","output_prediction_file",")",")","example_index_to_features","=","collections",".","defaultdict","(","list",")","for","feature","in","all_features",":","example_index_to_features","[","feature","[","'example_index'","]","]",".","append","(","feature",")","unique_id_to_result","=","{","}","for","result","in","all_results",":","unique_id_to_result","[","result",".","unique_id","]","=","result","all_predictions","=","collections",".","OrderedDict","(",")","all_nbest_json","=","collections",".","OrderedDict","(",")","scores_diff_json","=","collections",".","OrderedDict","(",")","for","(","example_index",",","example",")","in","enumerate","(","all_examples",")",":","features","=","example_index_to_features","[","example_index","]","prelim_predictions","=","[","]","# keep track of the minimum score of null start+end of position 0","score_null","=","1000000","# large and positive","for","(","feature_index",",","feature",")","in","enumerate","(","features",")",":","result","=","unique_id_to_result","[","feature","[","'unique_id'","]","]","cur_null_score","=","result",".","cls_logits","# if we could have irrelevant answers, get the min score of irrelevant","score_null","=","min","(","score_null",",","cur_null_score",")","for","i","in","range","(","FLAGS",".","start_n_top",")",":","for","j","in","range","(","FLAGS",".","end_n_top",")",":","start_log_prob","=","result",".","start_top_log_probs","[","i","]","start_index","=","result",".","start_top_index","[","i","]","j_index","=","i","*","FLAGS",".","end_n_top","+","j","end_log_prob","=","result",".","end_top_log_probs","[","j_index","]","end_index","=","result",".","end_top_index","[","j_index","]","# We could hypothetically create invalid predictions, e.g., predict","# that the start of the span is in the question. We throw out all","# invalid predictions.","if","start_index",">=","feature","[","'paragraph_len'","]","-","1",":","continue","if","end_index",">=","feature","[","'paragraph_len'","]","-","1",":","continue","if","not","feature","[","'token_is_max_context'","]",".","get","(","start_index",",","False",")","and","not","feature","[","'token_is_max_context'","]",".","get","(","str","(","start_index",")",",","False",")",":","continue","if","end_index","<","start_index",":","continue","length","=","end_index","-","start_index","+","1","if","length",">","max_answer_length",":","continue","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","feature_index",",","start_index","=","start_index",",","end_index","=","end_index",",","start_log_prob","=","start_log_prob",",","end_log_prob","=","end_log_prob",")",")","prelim_predictions","=","sorted","(","prelim_predictions",",","key","=","lambda","x",":","(","x",".","start_log_prob","+","x",".","end_log_prob",")",",","reverse","=","True",")","seen_predictions","=","{","}","nbest","=","[","]","for","pred","in","prelim_predictions",":","if","len","(","nbest",")",">=","n_best_size",":","break","feature","=","features","[","pred",".","feature_index","]","tok_start_to_orig_index","=","feature","[","'tok_start_to_orig_index'","]","tok_end_to_orig_index","=","feature","[","'tok_end_to_orig_index'","]","start_orig_pos","=","tok_start_to_orig_index","[","pred",".","start_index","]","end_orig_pos","=","tok_end_to_orig_index","[","pred",".","end_index","]","paragraph_text","=","example","[","'paragraph_text'","]","final_text","=","paragraph_text","[","start_orig_pos",":","end_orig_pos","+","1","]",".","strip","(",")","if","final_text","in","seen_predictions",":","continue","seen_predictions","[","final_text","]","=","True","nbest",".","append","(","_NbestPrediction","(","text","=","final_text",",","start_log_prob","=","pred",".","start_log_prob",",","end_log_prob","=","pred",".","end_log_prob",")",")","# In very rare edge cases we could have no valid predictions. So we","# just create a nonce prediction in this case to avoid failure.","if","not","nbest",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"\"",",","start_log_prob","=","-","1e6",",","end_log_prob","=","-","1e6",")",")","total_scores","=","[","]","best_non_null_entry","=","None","for","entry","in","nbest",":","total_scores",".","append","(","entry",".","start_log_prob","+","entry",".","end_log_prob",")","if","not","best_non_null_entry",":","best_non_null_entry","=","entry","probs","=","_compute_softmax","(","total_scores",")","nbest_json","=","[","]","for","(","i",",","entry",")","in","enumerate","(","nbest",")",":","output","=","collections",".","OrderedDict","(",")","output","[","\"text\"","]","=","entry",".","text","output","[","\"probability\"","]","=","probs","[","i","]","output","[","\"start_log_prob\"","]","=","entry",".","start_log_prob","output","[","\"end_log_prob\"","]","=","entry",".","end_log_prob","nbest_json",".","append","(","output",")","assert","len","(","nbest_json",")",">=","1","assert","best_non_null_entry","is","not","None","score_diff","=","score_null","scores_diff_json","[","example","[","'qas_id'","]","]","=","score_diff","# note(zhiliny): always predict best_non_null_entry","# and the evaluation script will search for the best threshold","all_predictions","[","example","[","'qas_id'","]","]","=","best_non_null_entry",".","text","all_nbest_json","[","example","[","'qas_id'","]","]","=","nbest_json","with","open","(","output_prediction_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_predictions",",","indent","=","4",",","ensure_ascii","=","False",")","+","\"\\n\"",")","with","open","(","output_nbest_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_nbest_json",",","indent","=","4",",","ensure_ascii","=","False",")","+","\"\\n\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_output.py#L10-L156"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_output.py","language":"python","identifier":"write_predictions","parameters":"(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, version_2_with_negative=False, null_score_diff_threshold=0.)","argument_list":"","return_statement":"","docstring":"Write final predictions to the json file and log-odds of null if needed.","docstring_summary":"Write final predictions to the json file and log-odds of null if needed.","docstring_tokens":["Write","final","predictions","to","the","json","file","and","log","-","odds","of","null","if","needed","."],"function":"def write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, version_2_with_negative=False, null_score_diff_threshold=0.):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n print(\"Writing predictions to: %s\" % (output_prediction_file))\n print(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature['example_index']].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(tqdm(all_examples)):\n features = example_index_to_features[example_index]\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min null score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature['unique_id']]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature['tokens']):\n continue\n if end_index >= len(feature['tokens']):\n continue\n if str(start_index) not in feature['token_to_orig_map'] and \\\n start_index not in feature['token_to_orig_map']:\n continue\n if str(end_index) not in feature['token_to_orig_map'] and \\\n end_index not in feature['token_to_orig_map']:\n continue\n if not feature['token_is_max_context'].get(str(start_index), False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n if version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature['tokens'][pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature['token_to_orig_map'][str(pred.start_index)]\n orig_doc_end = feature['token_to_orig_map'][str(pred.end_index)]\n orig_tokens = example['doc_tokens'][orig_doc_start:(orig_doc_end + 1)]\n tok_text = \"\".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \"\".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n # if we didn't include the empty option in the n-best, include it\n if version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\",\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n\n # In very rare edge cases we could only have single null prediction.\n # So we just create a nonce prediction in this case to avoid failure.\n if len(nbest) == 1:\n nbest.insert(0, _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(_NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = float(probs[i])\n output[\"start_logit\"] = float(entry.start_logit)\n output[\"end_logit\"] = float(entry.end_logit)\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not version_2_with_negative:\n all_predictions[example['qid']] = nbest_json[0][\"text\"]\n all_nbest_json[example['qid']] = nbest_json\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)\n scores_diff_json[example['qid']] = score_diff\n if score_diff > null_score_diff_threshold:\n all_predictions[example['qid']] = \"\"\n else:\n all_predictions[example['qid']] = best_non_null_entry.text\n all_nbest_json[example['qid']] = nbest_json\n\n with open(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4, ensure_ascii=False) + \"\\n\")\n\n with open(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4, ensure_ascii=False) + \"\\n\")","function_tokens":["def","write_predictions","(","all_examples",",","all_features",",","all_results",",","n_best_size",",","max_answer_length",",","do_lower_case",",","output_prediction_file",",","output_nbest_file",",","version_2_with_negative","=","False",",","null_score_diff_threshold","=","0.",")",":","print","(","\"Writing predictions to: %s\"","%","(","output_prediction_file",")",")","print","(","\"Writing nbest to: %s\"","%","(","output_nbest_file",")",")","example_index_to_features","=","collections",".","defaultdict","(","list",")","for","feature","in","all_features",":","example_index_to_features","[","feature","[","'example_index'","]","]",".","append","(","feature",")","unique_id_to_result","=","{","}","for","result","in","all_results",":","unique_id_to_result","[","result",".","unique_id","]","=","result","_PrelimPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"PrelimPrediction\"",",","[","\"feature_index\"",",","\"start_index\"",",","\"end_index\"",",","\"start_logit\"",",","\"end_logit\"","]",")","all_predictions","=","collections",".","OrderedDict","(",")","all_nbest_json","=","collections",".","OrderedDict","(",")","scores_diff_json","=","collections",".","OrderedDict","(",")","for","(","example_index",",","example",")","in","enumerate","(","tqdm","(","all_examples",")",")",":","features","=","example_index_to_features","[","example_index","]","prelim_predictions","=","[","]","# keep track of the minimum score of null start+end of position 0","score_null","=","1000000","# large and positive","min_null_feature_index","=","0","# the paragraph slice with min null score","null_start_logit","=","0","# the start logit at the slice with min null score","null_end_logit","=","0","# the end logit at the slice with min null score","for","(","feature_index",",","feature",")","in","enumerate","(","features",")",":","result","=","unique_id_to_result","[","feature","[","'unique_id'","]","]","start_indexes","=","_get_best_indexes","(","result",".","start_logits",",","n_best_size",")","end_indexes","=","_get_best_indexes","(","result",".","end_logits",",","n_best_size",")","# if we could have irrelevant answers, get the min score of irrelevant","if","version_2_with_negative",":","feature_null_score","=","result",".","start_logits","[","0","]","+","result",".","end_logits","[","0","]","if","feature_null_score","<","score_null",":","score_null","=","feature_null_score","min_null_feature_index","=","feature_index","null_start_logit","=","result",".","start_logits","[","0","]","null_end_logit","=","result",".","end_logits","[","0","]","for","start_index","in","start_indexes",":","for","end_index","in","end_indexes",":","# We could hypothetically create invalid predictions, e.g., predict","# that the start of the span is in the question. We throw out all","# invalid predictions.","if","start_index",">=","len","(","feature","[","'tokens'","]",")",":","continue","if","end_index",">=","len","(","feature","[","'tokens'","]",")",":","continue","if","str","(","start_index",")","not","in","feature","[","'token_to_orig_map'","]","and","start_index","not","in","feature","[","'token_to_orig_map'","]",":","continue","if","str","(","end_index",")","not","in","feature","[","'token_to_orig_map'","]","and","end_index","not","in","feature","[","'token_to_orig_map'","]",":","continue","if","not","feature","[","'token_is_max_context'","]",".","get","(","str","(","start_index",")",",","False",")",":","continue","if","end_index","<","start_index",":","continue","length","=","end_index","-","start_index","+","1","if","length",">","max_answer_length",":","continue","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","feature_index",",","start_index","=","start_index",",","end_index","=","end_index",",","start_logit","=","result",".","start_logits","[","start_index","]",",","end_logit","=","result",".","end_logits","[","end_index","]",")",")","if","version_2_with_negative",":","prelim_predictions",".","append","(","_PrelimPrediction","(","feature_index","=","min_null_feature_index",",","start_index","=","0",",","end_index","=","0",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","prelim_predictions","=","sorted","(","prelim_predictions",",","key","=","lambda","x",":","(","x",".","start_logit","+","x",".","end_logit",")",",","reverse","=","True",")","_NbestPrediction","=","collections",".","namedtuple","(","# pylint: disable=invalid-name","\"NbestPrediction\"",",","[","\"text\"",",","\"start_logit\"",",","\"end_logit\"","]",")","seen_predictions","=","{","}","nbest","=","[","]","for","pred","in","prelim_predictions",":","if","len","(","nbest",")",">=","n_best_size",":","break","feature","=","features","[","pred",".","feature_index","]","if","pred",".","start_index",">","0",":","# this is a non-null prediction","tok_tokens","=","feature","[","'tokens'","]","[","pred",".","start_index",":","(","pred",".","end_index","+","1",")","]","orig_doc_start","=","feature","[","'token_to_orig_map'","]","[","str","(","pred",".","start_index",")","]","orig_doc_end","=","feature","[","'token_to_orig_map'","]","[","str","(","pred",".","end_index",")","]","orig_tokens","=","example","[","'doc_tokens'","]","[","orig_doc_start",":","(","orig_doc_end","+","1",")","]","tok_text","=","\"\"",".","join","(","tok_tokens",")","# De-tokenize WordPieces that have been split off.","tok_text","=","tok_text",".","replace","(","\" ##\"",",","\"\"",")","tok_text","=","tok_text",".","replace","(","\"##\"",",","\"\"",")","# Clean whitespace","tok_text","=","tok_text",".","strip","(",")","tok_text","=","\" \"",".","join","(","tok_text",".","split","(",")",")","orig_text","=","\"\"",".","join","(","orig_tokens",")","final_text","=","get_final_text","(","tok_text",",","orig_text",",","do_lower_case",")","if","final_text","in","seen_predictions",":","continue","seen_predictions","[","final_text","]","=","True","else",":","final_text","=","\"\"","seen_predictions","[","final_text","]","=","True","nbest",".","append","(","_NbestPrediction","(","text","=","final_text",",","start_logit","=","pred",".","start_logit",",","end_logit","=","pred",".","end_logit",")",")","# if we didn't include the empty option in the n-best, include it","if","version_2_with_negative",":","if","\"\"","not","in","seen_predictions",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"\"",",","start_logit","=","null_start_logit",",","end_logit","=","null_end_logit",")",")","# In very rare edge cases we could only have single null prediction.","# So we just create a nonce prediction in this case to avoid failure.","if","len","(","nbest",")","==","1",":","nbest",".","insert","(","0",",","_NbestPrediction","(","text","=","\"empty\"",",","start_logit","=","0.0",",","end_logit","=","0.0",")",")","# In very rare edge cases we could have no valid predictions. So we","# just create a nonce prediction in this case to avoid failure.","if","not","nbest",":","nbest",".","append","(","_NbestPrediction","(","text","=","\"empty\"",",","start_logit","=","0.0",",","end_logit","=","0.0",")",")","assert","len","(","nbest",")",">=","1","total_scores","=","[","]","best_non_null_entry","=","None","for","entry","in","nbest",":","total_scores",".","append","(","entry",".","start_logit","+","entry",".","end_logit",")","if","not","best_non_null_entry",":","if","entry",".","text",":","best_non_null_entry","=","entry","probs","=","_compute_softmax","(","total_scores",")","nbest_json","=","[","]","for","(","i",",","entry",")","in","enumerate","(","nbest",")",":","output","=","collections",".","OrderedDict","(",")","output","[","\"text\"","]","=","entry",".","text","output","[","\"probability\"","]","=","float","(","probs","[","i","]",")","output","[","\"start_logit\"","]","=","float","(","entry",".","start_logit",")","output","[","\"end_logit\"","]","=","float","(","entry",".","end_logit",")","nbest_json",".","append","(","output",")","assert","len","(","nbest_json",")",">=","1","if","not","version_2_with_negative",":","all_predictions","[","example","[","'qid'","]","]","=","nbest_json","[","0","]","[","\"text\"","]","all_nbest_json","[","example","[","'qid'","]","]","=","nbest_json","else",":","# predict \"\" iff the null score - the score of best non-null > threshold","score_diff","=","score_null","-","best_non_null_entry",".","start_logit","-","(","best_non_null_entry",".","end_logit",")","scores_diff_json","[","example","[","'qid'","]","]","=","score_diff","if","score_diff",">","null_score_diff_threshold",":","all_predictions","[","example","[","'qid'","]","]","=","\"\"","else",":","all_predictions","[","example","[","'qid'","]","]","=","best_non_null_entry",".","text","all_nbest_json","[","example","[","'qid'","]","]","=","nbest_json","with","open","(","output_prediction_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_predictions",",","indent","=","4",",","ensure_ascii","=","False",")","+","\"\\n\"",")","with","open","(","output_nbest_file",",","\"w\"",")","as","writer",":","writer",".","write","(","json",".","dumps","(","all_nbest_json",",","indent","=","4",",","ensure_ascii","=","False",")","+","\"\\n\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_output.py#L159-L342"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_output.py","language":"python","identifier":"get_final_text","parameters":"(pred_text, orig_text, do_lower_case, verbose_logging=False)","argument_list":"","return_statement":"return output_text","docstring":"Project the tokenized prediction back to the original text.","docstring_summary":"Project the tokenized prediction back to the original text.","docstring_tokens":["Project","the","tokenized","prediction","back","to","the","original","text","."],"function":"def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping\/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heuristic between\n # `pred_text` and `orig_text` to get a character-to-character alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \"\".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if verbose_logging:\n print(\"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if verbose_logging:\n print(\"Length not equal after stripping spaces: '%s' vs '%s'\" % (orig_ns_text, tok_ns_text))\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in tok_ns_to_s_map.items():\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if verbose_logging:\n print(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if verbose_logging:\n print(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text","function_tokens":["def","get_final_text","(","pred_text",",","orig_text",",","do_lower_case",",","verbose_logging","=","False",")",":","# When we created the data, we kept track of the alignment between original","# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So","# now `orig_text` contains the span of our original text corresponding to the","# span that we predicted.","#","# However, `orig_text` may contain extra characters that we don't want in","# our prediction.","#","# For example, let's say:","# pred_text = steve smith","# orig_text = Steve Smith's","#","# We don't want to return `orig_text` because it contains the extra \"'s\".","#","# We don't want to return `pred_text` because it's already been normalized","# (the SQuAD eval script also does punctuation stripping\/lower casing but","# our tokenizer does additional normalization like stripping accent","# characters).","#","# What we really want to return is \"Steve Smith\".","#","# Therefore, we have to apply a semi-complicated alignment heuristic between","# `pred_text` and `orig_text` to get a character-to-character alignment. This","# can fail in certain cases in which case we just return `orig_text`.","def","_strip_spaces","(","text",")",":","ns_chars","=","[","]","ns_to_s_map","=","collections",".","OrderedDict","(",")","for","(","i",",","c",")","in","enumerate","(","text",")",":","if","c","==","\" \"",":","continue","ns_to_s_map","[","len","(","ns_chars",")","]","=","i","ns_chars",".","append","(","c",")","ns_text","=","\"\"",".","join","(","ns_chars",")","return","(","ns_text",",","ns_to_s_map",")","# We first tokenize `orig_text`, strip whitespace from the result","# and `pred_text`, and check if they are the same length. If they are","# NOT the same length, the heuristic has failed. If they are the same","# length, we assume the characters are one-to-one aligned.","tokenizer","=","BasicTokenizer","(","do_lower_case","=","do_lower_case",")","tok_text","=","\"\"",".","join","(","tokenizer",".","tokenize","(","orig_text",")",")","start_position","=","tok_text",".","find","(","pred_text",")","if","start_position","==","-","1",":","if","verbose_logging",":","print","(","\"Unable to find text: '%s' in '%s'\"","%","(","pred_text",",","orig_text",")",")","return","orig_text","end_position","=","start_position","+","len","(","pred_text",")","-","1","(","orig_ns_text",",","orig_ns_to_s_map",")","=","_strip_spaces","(","orig_text",")","(","tok_ns_text",",","tok_ns_to_s_map",")","=","_strip_spaces","(","tok_text",")","if","len","(","orig_ns_text",")","!=","len","(","tok_ns_text",")",":","if","verbose_logging",":","print","(","\"Length not equal after stripping spaces: '%s' vs '%s'\"","%","(","orig_ns_text",",","tok_ns_text",")",")","return","orig_text","# We then project the characters in `pred_text` back to `orig_text` using","# the character-to-character alignment.","tok_s_to_ns_map","=","{","}","for","(","i",",","tok_index",")","in","tok_ns_to_s_map",".","items","(",")",":","tok_s_to_ns_map","[","tok_index","]","=","i","orig_start_position","=","None","if","start_position","in","tok_s_to_ns_map",":","ns_start_position","=","tok_s_to_ns_map","[","start_position","]","if","ns_start_position","in","orig_ns_to_s_map",":","orig_start_position","=","orig_ns_to_s_map","[","ns_start_position","]","if","orig_start_position","is","None",":","if","verbose_logging",":","print","(","\"Couldn't map start position\"",")","return","orig_text","orig_end_position","=","None","if","end_position","in","tok_s_to_ns_map",":","ns_end_position","=","tok_s_to_ns_map","[","end_position","]","if","ns_end_position","in","orig_ns_to_s_map",":","orig_end_position","=","orig_ns_to_s_map","[","ns_end_position","]","if","orig_end_position","is","None",":","if","verbose_logging",":","print","(","\"Couldn't map end position\"",")","return","orig_text","output_text","=","orig_text","[","orig_start_position",":","(","orig_end_position","+","1",")","]","return","output_text"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_output.py#L345-L436"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_output.py","language":"python","identifier":"_get_best_indexes","parameters":"(logits, n_best_size)","argument_list":"","return_statement":"return best_indexes","docstring":"Get the n-best logits from a list.","docstring_summary":"Get the n-best logits from a list.","docstring_tokens":["Get","the","n","-","best","logits","from","a","list","."],"function":"def _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes","function_tokens":["def","_get_best_indexes","(","logits",",","n_best_size",")",":","index_and_score","=","sorted","(","enumerate","(","logits",")",",","key","=","lambda","x",":","x","[","1","]",",","reverse","=","True",")","best_indexes","=","[","]","for","i","in","range","(","len","(","index_and_score",")",")",":","if","i",">=","n_best_size",":","break","best_indexes",".","append","(","index_and_score","[","i","]","[","0","]",")","return","best_indexes"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_output.py#L439-L448"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_output.py","language":"python","identifier":"_compute_softmax","parameters":"(scores)","argument_list":"","return_statement":"return probs","docstring":"Compute softmax probability over raw logits.","docstring_summary":"Compute softmax probability over raw logits.","docstring_tokens":["Compute","softmax","probability","over","raw","logits","."],"function":"def _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score \/ total_sum)\n return probs","function_tokens":["def","_compute_softmax","(","scores",")",":","if","not","scores",":","return","[","]","max_score","=","None","for","score","in","scores",":","if","max_score","is","None","or","score",">","max_score",":","max_score","=","score","exp_scores","=","[","]","total_sum","=","0.0","for","score","in","scores",":","x","=","math",".","exp","(","score","-","max_score",")","exp_scores",".","append","(","x",")","total_sum","+=","x","probs","=","[","]","for","score","in","exp_scores",":","probs",".","append","(","score","\/","total_sum",")","return","probs"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_output.py#L451-L471"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_preprocess.py","language":"python","identifier":"whitespace_tokenize","parameters":"(text)","argument_list":"","return_statement":"return tokens","docstring":"Runs basic whitespace cleaning and splitting on a peice of text.","docstring_summary":"Runs basic whitespace cleaning and splitting on a peice of text.","docstring_tokens":["Runs","basic","whitespace","cleaning","and","splitting","on","a","peice","of","text","."],"function":"def whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a peice of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens","function_tokens":["def","whitespace_tokenize","(","text",")",":","text","=","text",".","strip","(",")","if","not","text",":","return","[","]","tokens","=","text",".","split","(",")","return","tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_preprocess.py#L12-L18"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_preprocess.py","language":"python","identifier":"_improve_answer_span","parameters":"(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text)","argument_list":"","return_statement":"return (input_start, input_end)","docstring":"Returns tokenized answer spans that better match the annotated answer.","docstring_summary":"Returns tokenized answer spans that better match the annotated answer.","docstring_tokens":["Returns","tokenized","answer","spans","that","better","match","the","annotated","answer","."],"function":"def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)","function_tokens":["def","_improve_answer_span","(","doc_tokens",",","input_start",",","input_end",",","tokenizer",",","orig_answer_text",")",":","# The SQuAD annotations are character based. We first project them to","# whitespace-tokenized words. But then after WordPiece tokenization, we can","# often find a \"better match\". For example:","#","# Question: What year was John Smith born?","# Context: The leader was John Smith (1895-1943).","# Answer: 1895","#","# The original whitespace-tokenized answer will be \"(1895-1943).\". However","# after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match","# the exact answer, 1895.","#","# However, this is not always possible. Consider the following:","#","# Question: What country is the top exporter of electornics?","# Context: The Japanese electronics industry is the lagest in the world.","# Answer: Japan","#","# In this case, the annotator chose \"Japan\" as a character sub-span of","# the word \"Japanese\". Since our WordPiece tokenizer does not split","# \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare","# in SQuAD, but does happen.","tok_answer_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_answer_text",")",")","for","new_start","in","range","(","input_start",",","input_end","+","1",")",":","for","new_end","in","range","(","input_end",",","new_start","-","1",",","-","1",")",":","text_span","=","\" \"",".","join","(","doc_tokens","[","new_start",":","(","new_end","+","1",")","]",")","if","text_span","==","tok_answer_text",":","return","(","new_start",",","new_end",")","return","(","input_start",",","input_end",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_preprocess.py#L21-L55"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_preprocess.py","language":"python","identifier":"_check_is_max_context","parameters":"(doc_spans, cur_span_index, position)","argument_list":"","return_statement":"return cur_span_index == best_span_index","docstring":"Check if this is the 'max context' doc span for the token.","docstring_summary":"Check if this is the 'max context' doc span for the token.","docstring_tokens":["Check","if","this","is","the","max","context","doc","span","for","the","token","."],"function":"def _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index","function_tokens":["def","_check_is_max_context","(","doc_spans",",","cur_span_index",",","position",")",":","# Because of the sliding window approach taken to scoring documents, a single","# token can appear in multiple documents. E.g.","# Doc: the man went to the store and bought a gallon of milk","# Span A: the man went to the","# Span B: to the store and bought","# Span C: and bought a gallon of","# ...","#","# Now the word 'bought' will have two scores from spans B and C. We only","# want to consider the score with \"maximum context\", which we define as","# the *minimum* of its left and right context (the *sum* of left and","# right context will always be the same, of course).","#","# In the example the maximum context for 'bought' would be span C since","# it has 1 left context and 3 right context, while span B has 4 left context","# and 0 right context.","best_score","=","None","best_span_index","=","None","for","(","span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","end","=","doc_span",".","start","+","doc_span",".","length","-","1","if","position","<","doc_span",".","start",":","continue","if","position",">","end",":","continue","num_left_context","=","position","-","doc_span",".","start","num_right_context","=","end","-","position","score","=","min","(","num_left_context",",","num_right_context",")","+","0.01","*","doc_span",".","length","if","best_score","is","None","or","score",">","best_score",":","best_score","=","score","best_span_index","=","span_index","return","cur_span_index","==","best_span_index"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_preprocess.py#L58-L92"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_preprocess.py","language":"python","identifier":"Traditional2Simplified","parameters":"(sentence)","argument_list":"","return_statement":"return sentence","docstring":"\u5c06sentence\u4e2d\u7684\u7e41\u4f53\u5b57\u8f6c\u4e3a\u7b80\u4f53\u5b57\n :param sentence: \u5f85\u8f6c\u6362\u7684\u53e5\u5b50\n :return: \u5c06\u53e5\u5b50\u4e2d\u7e41\u4f53\u5b57\u8f6c\u6362\u4e3a\u7b80\u4f53\u5b57\u4e4b\u540e\u7684\u53e5\u5b50","docstring_summary":"\u5c06sentence\u4e2d\u7684\u7e41\u4f53\u5b57\u8f6c\u4e3a\u7b80\u4f53\u5b57\n :param sentence: \u5f85\u8f6c\u6362\u7684\u53e5\u5b50\n :return: \u5c06\u53e5\u5b50\u4e2d\u7e41\u4f53\u5b57\u8f6c\u6362\u4e3a\u7b80\u4f53\u5b57\u4e4b\u540e\u7684\u53e5\u5b50","docstring_tokens":["\u5c06sentence\u4e2d\u7684\u7e41\u4f53\u5b57\u8f6c\u4e3a\u7b80\u4f53\u5b57",":","param","sentence",":","\u5f85\u8f6c\u6362\u7684\u53e5\u5b50",":","return",":","\u5c06\u53e5\u5b50\u4e2d\u7e41\u4f53\u5b57\u8f6c\u6362\u4e3a\u7b80\u4f53\u5b57\u4e4b\u540e\u7684\u53e5\u5b50"],"function":"def Traditional2Simplified(sentence):\n '''\n \u5c06sentence\u4e2d\u7684\u7e41\u4f53\u5b57\u8f6c\u4e3a\u7b80\u4f53\u5b57\n :param sentence: \u5f85\u8f6c\u6362\u7684\u53e5\u5b50\n :return: \u5c06\u53e5\u5b50\u4e2d\u7e41\u4f53\u5b57\u8f6c\u6362\u4e3a\u7b80\u4f53\u5b57\u4e4b\u540e\u7684\u53e5\u5b50\n '''\n sentence = Converter('zh-hans').convert(sentence)\n return sentence","function_tokens":["def","Traditional2Simplified","(","sentence",")",":","sentence","=","Converter","(","'zh-hans'",")",".","convert","(","sentence",")","return","sentence"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/DRCD_preprocess.py#L95-L102"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_preprocess.py","language":"python","identifier":"_improve_answer_span","parameters":"(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text)","argument_list":"","return_statement":"return (input_start, input_end)","docstring":"Returns tokenized answer spans that better match the annotated answer.","docstring_summary":"Returns tokenized answer spans that better match the annotated answer.","docstring_tokens":["Returns","tokenized","answer","spans","that","better","match","the","annotated","answer","."],"function":"def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)","function_tokens":["def","_improve_answer_span","(","doc_tokens",",","input_start",",","input_end",",","tokenizer",",","orig_answer_text",")",":","# The SQuAD annotations are character based. We first project them to","# whitespace-tokenized words. But then after WordPiece tokenization, we can","# often find a \"better match\". For example:","#","# Question: What year was John Smith born?","# Context: The leader was John Smith (1895-1943).","# Answer: 1895","#","# The original whitespace-tokenized answer will be \"(1895-1943).\". However","# after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match","# the exact answer, 1895.","#","# However, this is not always possible. Consider the following:","#","# Question: What country is the top exporter of electornics?","# Context: The Japanese electronics industry is the lagest in the world.","# Answer: Japan","#","# In this case, the annotator chose \"Japan\" as a character sub-span of","# the word \"Japanese\". Since our WordPiece tokenizer does not split","# \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare","# in SQuAD, but does happen.","tok_answer_text","=","\" \"",".","join","(","tokenizer",".","tokenize","(","orig_answer_text",")",")","for","new_start","in","range","(","input_start",",","input_end","+","1",")",":","for","new_end","in","range","(","input_end",",","new_start","-","1",",","-","1",")",":","text_span","=","\" \"",".","join","(","doc_tokens","[","new_start",":","(","new_end","+","1",")","]",")","if","text_span","==","tok_answer_text",":","return","(","new_start",",","new_end",")","return","(","input_start",",","input_end",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_preprocess.py#L12-L46"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_preprocess.py","language":"python","identifier":"_check_is_max_context","parameters":"(doc_spans, cur_span_index, position)","argument_list":"","return_statement":"return cur_span_index == best_span_index","docstring":"Check if this is the 'max context' doc span for the token.","docstring_summary":"Check if this is the 'max context' doc span for the token.","docstring_tokens":["Check","if","this","is","the","max","context","doc","span","for","the","token","."],"function":"def _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index","function_tokens":["def","_check_is_max_context","(","doc_spans",",","cur_span_index",",","position",")",":","# Because of the sliding window approach taken to scoring documents, a single","# token can appear in multiple documents. E.g.","# Doc: the man went to the store and bought a gallon of milk","# Span A: the man went to the","# Span B: to the store and bought","# Span C: and bought a gallon of","# ...","#","# Now the word 'bought' will have two scores from spans B and C. We only","# want to consider the score with \"maximum context\", which we define as","# the *minimum* of its left and right context (the *sum* of left and","# right context will always be the same, of course).","#","# In the example the maximum context for 'bought' would be span C since","# it has 1 left context and 3 right context, while span B has 4 left context","# and 0 right context.","best_score","=","None","best_span_index","=","None","for","(","span_index",",","doc_span",")","in","enumerate","(","doc_spans",")",":","end","=","doc_span",".","start","+","doc_span",".","length","-","1","if","position","<","doc_span",".","start",":","continue","if","position",">","end",":","continue","num_left_context","=","position","-","doc_span",".","start","num_right_context","=","end","-","position","score","=","min","(","num_left_context",",","num_right_context",")","+","0.01","*","doc_span",".","length","if","best_score","is","None","or","score",">","best_score",":","best_score","=","score","best_span_index","=","span_index","return","cur_span_index","==","best_span_index"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/cmrc2018_preprocess.py#L49-L83"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/CHID_preprocess.py","language":"python","identifier":"read_chid_examples","parameters":"(input_data_file, input_label_file, is_training=True)","argument_list":"","return_statement":"return examples","docstring":"\u5c06\u539f\u59cb\u6570\u636e\u5904\u7406\u4e3a\u5982\u4e0b\u5f62\u5f0f\uff1a\n part_passage\u904d\u5386\u6bcf\u4e2ablak\u7684\u5468\u56f4\u4f4d\u7f6e\n :param input_data:\n :param is_training:\n :return:","docstring_summary":"\u5c06\u539f\u59cb\u6570\u636e\u5904\u7406\u4e3a\u5982\u4e0b\u5f62\u5f0f\uff1a\n part_passage\u904d\u5386\u6bcf\u4e2ablak\u7684\u5468\u56f4\u4f4d\u7f6e\n :param input_data:\n :param is_training:\n :return:","docstring_tokens":["\u5c06\u539f\u59cb\u6570\u636e\u5904\u7406\u4e3a\u5982\u4e0b\u5f62\u5f0f\uff1a","part_passage\u904d\u5386\u6bcf\u4e2ablak\u7684\u5468\u56f4\u4f4d\u7f6e",":","param","input_data",":",":","param","is_training",":",":","return",":"],"function":"def read_chid_examples(input_data_file, input_label_file, is_training=True):\n '''\n \u5c06\u539f\u59cb\u6570\u636e\u5904\u7406\u4e3a\u5982\u4e0b\u5f62\u5f0f\uff1a\n part_passage\u904d\u5386\u6bcf\u4e2ablak\u7684\u5468\u56f4\u4f4d\u7f6e\n :param input_data:\n :param is_training:\n :return:\n '''\n\n if is_training:\n input_label = json.load(open(input_label_file))\n input_data = open(input_data_file)\n\n def _is_chinese_char(cp):\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False\n\n def is_fuhao(c):\n if c == '\u3002' or c == '\uff0c' or c == '\uff01' or c == '\uff1f' or c == '\uff1b' or c == '\u3001' or c == '\uff1a' or c == '\uff08' or c == '\uff09' \\\n or c == '\uff0d' or c == '~' or c == '\u300c' or c == '\u300a' or c == '\u300b' or c == ',' or c == '\u300d' or c == '\"' or c == '\u201c' or c == '\u201d' \\\n or c == '$' or c == '\u300e' or c == '\u300f' or c == '\u2014' or c == ';' or c == '\u3002' or c == '(' or c == ')' or c == '-' or c == '\uff5e' or c == '\u3002' \\\n or c == '\u2018' or c == '\u2019':\n return True\n return False\n\n def _tokenize_chinese_chars(text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n is_blank = False\n for index, char in enumerate(text):\n cp = ord(char)\n if is_blank:\n output.append(char)\n if context[index - 12:index + 1].startswith(\"#idiom\"):\n is_blank = False\n output.append(SPIECE_UNDERLINE)\n else:\n if text[index:index + 6] == \"#idiom\":\n is_blank = True\n if len(output) > 0 and output[-1] != SPIECE_UNDERLINE:\n output.append(SPIECE_UNDERLINE)\n output.append(char)\n elif _is_chinese_char(cp) or is_fuhao(char):\n if len(output) > 0 and output[-1] != SPIECE_UNDERLINE:\n output.append(SPIECE_UNDERLINE)\n output.append(char)\n output.append(SPIECE_UNDERLINE)\n else:\n output.append(char)\n return \"\".join(output)\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F or c == SPIECE_UNDERLINE:\n return True\n return False\n\n examples = []\n example_id = 0\n for data in tqdm(input_data):\n\n data = eval(data)\n options = data['candidates']\n\n for context in data['content']:\n\n context = context.replace(\"\u201c\", \"\\\"\").replace(\"\u201d\", \"\\\"\").replace(\"\u2014\u2014\", \"--\"). \\\n replace(\"\u2014\", \"-\").replace(\"\u2015\", \"-\").replace(\"\u2026\", \"...\").replace(\"\u2018\", \"\\'\").replace(\"\u2019\", \"\\'\")\n context = _tokenize_chinese_chars(context)\n\n paragraph_text = context.strip()\n doc_tokens = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n\n tags = [blank for blank in doc_tokens if '#idiom' in blank]\n\n if is_training:\n for tag_index, tag in enumerate(tags):\n answer_index = input_label[tag]\n example = ChidExample(\n example_id=example_id,\n tag=tag,\n doc_tokens=doc_tokens,\n options=options,\n answer_index=answer_index)\n examples.append(example)\n else:\n for tag_index, tag in enumerate(tags):\n example = ChidExample(\n example_id=example_id,\n tag=tag,\n doc_tokens=doc_tokens,\n options=options)\n examples.append(example)\n else:\n example_id += 1\n else:\n print('\u539f\u59cb\u6837\u672c\u4e2a\u6570\uff1a{}'.format(example_id))\n\n print('\u5b9e\u9645\u751f\u6210\u603b\u6837\u4f8b\u6570\uff1a{}'.format(len(examples)))\n return examples","function_tokens":["def","read_chid_examples","(","input_data_file",",","input_label_file",",","is_training","=","True",")",":","if","is_training",":","input_label","=","json",".","load","(","open","(","input_label_file",")",")","input_data","=","open","(","input_data_file",")","def","_is_chinese_char","(","cp",")",":","if","(","(","cp",">=","0x4E00","and","cp","<=","0x9FFF",")","or","#","(","cp",">=","0x3400","and","cp","<=","0x4DBF",")","or","#","(","cp",">=","0x20000","and","cp","<=","0x2A6DF",")","or","#","(","cp",">=","0x2A700","and","cp","<=","0x2B73F",")","or","#","(","cp",">=","0x2B740","and","cp","<=","0x2B81F",")","or","#","(","cp",">=","0x2B820","and","cp","<=","0x2CEAF",")","or","(","cp",">=","0xF900","and","cp","<=","0xFAFF",")","or","#","(","cp",">=","0x2F800","and","cp","<=","0x2FA1F",")",")",":","#","return","True","return","False","def","is_fuhao","(","c",")",":","if","c","==","'\u3002' o"," c","="," '","' or "," ="," ","\uff01'","or c ","= ","\uff1f"," o"," c ==","'\uff1b"," ","r "," == '","' ","r","c ","= '\uff1a'","or","c","==","'\uff08' o"," c","="," '","' \\","","","","","or","c","==","'\uff0d' o"," c","="," '","' o"," c","="," '","' or "," ="," ","\u300a'","or c ","= ","\u300b"," o"," c ==","',"," ","r "," ==","'\u300d"," ","r "," == '","' ","r","c ","= '","' ","r","c ","= '\u201d'","\\","","","","or","c","==","'$'","or","c","==","'\u300e' o"," c","="," '","' or "," ="," ","\u2014'","or c ","= ",";"," o"," c ","= ","\u3002"," o"," c ==","'("," ","r "," ==","')"," ","r "," ==","'-"," ","r "," ==","'\uff5e"," ","r "," == '","' ","","","","or","c","==","'\u2018' o"," c","="," '","':","","return","True","return","False","def","_tokenize_chinese_chars","(","text",")",":","\"\"\"Adds whitespace around any CJK character.\"\"\"","output","=","[","]","is_blank","=","False","for","index",",","char","in","enumerate","(","text",")",":","cp","=","ord","(","char",")","if","is_blank",":","output",".","append","(","char",")","if","context","[","index","-","12",":","index","+","1","]",".","startswith","(","\"#idiom\"",")",":","is_blank","=","False","output",".","append","(","SPIECE_UNDERLINE",")","else",":","if","text","[","index",":","index","+","6","]","==","\"#idiom\"",":","is_blank","=","True","if","len","(","output",")",">","0","and","output","[","-","1","]","!=","SPIECE_UNDERLINE",":","output",".","append","(","SPIECE_UNDERLINE",")","output",".","append","(","char",")","elif","_is_chinese_char","(","cp",")","or","is_fuhao","(","char",")",":","if","len","(","output",")",">","0","and","output","[","-","1","]","!=","SPIECE_UNDERLINE",":","output",".","append","(","SPIECE_UNDERLINE",")","output",".","append","(","char",")","output",".","append","(","SPIECE_UNDERLINE",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")","def","is_whitespace","(","c",")",":","if","c","==","\" \"","or","c","==","\"\\t\"","or","c","==","\"\\r\"","or","c","==","\"\\n\"","or","ord","(","c",")","==","0x202F","or","c","==","SPIECE_UNDERLINE",":","return","True","return","False","examples","=","[","]","example_id","=","0","for","data","in","tqdm","(","input_data",")",":","data","=","eval","(","data",")","options","=","data","[","'candidates'","]","for","context","in","data","[","'content'","]",":","context","=","context",".","replace","(","\"\u201c\", ","\"","\"\").","r","e","place(\"","\u201d","\", \"\\","\"",").re","p","l","ace(\"\u2014\u2014","\"",", \"--\")."," ","","","","replace","(","\"\u2014\", ","\"","\").","r","e","place(\"","\u2015","\", \"-","\"",".re","p","l","ace(\"\u2026\"",","," \"...","\"",".repl","a","c","e(\"\u2018\", ","\"","\\'\").","r","plac","e","(","\"\u2019\", \"\\","'","\")","","","","context","=","_tokenize_chinese_chars","(","context",")","paragraph_text","=","context",".","strip","(",")","doc_tokens","=","[","]","prev_is_whitespace","=","True","for","c","in","paragraph_text",":","if","is_whitespace","(","c",")",":","prev_is_whitespace","=","True","else",":","if","prev_is_whitespace",":","doc_tokens",".","append","(","c",")","else",":","doc_tokens","[","-","1","]","+=","c","prev_is_whitespace","=","False","tags","=","[","blank","for","blank","in","doc_tokens","if","'#idiom'","in","blank","]","if","is_training",":","for","tag_index",",","tag","in","enumerate","(","tags",")",":","answer_index","=","input_label","[","tag","]","example","=","ChidExample","(","example_id","=","example_id",",","tag","=","tag",",","doc_tokens","=","doc_tokens",",","options","=","options",",","answer_index","=","answer_index",")","examples",".","append","(","example",")","else",":","for","tag_index",",","tag","in","enumerate","(","tags",")",":","example","=","ChidExample","(","example_id","=","example_id",",","tag","=","tag",",","doc_tokens","=","doc_tokens",",","options","=","options",")","examples",".","append","(","example",")","else",":","example_id","+=","1","else",":","print","(","'\u539f\u59cb\u6837\u672c\u4e2a\u6570\uff1a{}'.format(exampl","e","_id))","","","","","print","(","'\u5b9e\u9645\u751f\u6210\u603b\u6837\u4f8b\u6570\uff1a{}'.format(len(exampl","e","s)))","","","","","","","","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/CHID_preprocess.py#L76-L193"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/CHID_preprocess.py","language":"python","identifier":"convert_examples_to_features","parameters":"(examples, tokenizer, max_seq_length=128, max_num_choices=10)","argument_list":"","return_statement":"return features","docstring":"\u5c06\u6240\u6709\u5019\u9009\u7b54\u6848\u653e\u7f6e\u5728\u7247\u6bb5\u5f00\u5934","docstring_summary":"\u5c06\u6240\u6709\u5019\u9009\u7b54\u6848\u653e\u7f6e\u5728\u7247\u6bb5\u5f00\u5934","docstring_tokens":["\u5c06\u6240\u6709\u5019\u9009\u7b54\u6848\u653e\u7f6e\u5728\u7247\u6bb5\u5f00\u5934"],"function":"def convert_examples_to_features(examples, tokenizer, max_seq_length=128, max_num_choices=10):\n '''\n \u5c06\u6240\u6709\u5019\u9009\u7b54\u6848\u653e\u7f6e\u5728\u7247\u6bb5\u5f00\u5934\n '''\n\n def _loop(example, unique_id, label):\n '''\n :param example:\n :param unique_id:\n :return:\n input_ids = (C, seq_len)\n token_type_ids = (C, seq_len) = segment_id\n input_mask = (C, seq_len)\n labels = int\n choices_mask = (C)\n '''\n input_ids = []\n input_masks = []\n segment_ids = []\n choice_masks = [1] * len(example.options)\n\n tag = example.tag\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n if '#idiom' in token:\n sub_tokens = [str(token)]\n else:\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n all_doc_tokens.append(sub_token)\n\n pos = all_doc_tokens.index(tag)\n num_tokens = max_tokens_for_doc - 5 # [unused1]\u548csegA\u7684\u6210\u8bed\n tmp_l, tmp_r = add_tokens_for_around(all_doc_tokens, pos, num_tokens)\n num_l = len(tmp_l)\n num_r = len(tmp_r)\n\n tokens_l = []\n for token in tmp_l:\n if '#idiom' in token and token != tag:\n tokens_l.extend(['[MASK]'] * 4)\n else:\n tokens_l.append(token)\n tokens_l = tokens_l[-num_l:]\n del tmp_l\n\n tokens_r = []\n for token in tmp_r:\n if '#idiom' in token and token != tag:\n tokens_r.extend(['[MASK]'] * 4)\n else:\n tokens_r.append(token)\n tokens_r = tokens_r[: num_r]\n del tmp_r\n\n for i, elem in enumerate(example.options):\n option = tokenizer.tokenize(elem)\n tokens = ['[CLS]'] + option + ['[SEP]'] + tokens_l + ['[unused1]'] + tokens_r + ['[SEP]']\n\n input_id = tokenizer.convert_tokens_to_ids(tokens)\n input_mask = [1] * len(input_id)\n segment_id = [0] * len(input_id)\n\n while len(input_id) < max_seq_length:\n input_id.append(0)\n input_mask.append(0)\n segment_id.append(0)\n assert len(input_id) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_id) == max_seq_length\n\n input_ids.append(input_id)\n input_masks.append(input_mask)\n segment_ids.append(segment_id)\n\n if unique_id < 5:\n print(\"*** Example ***\")\n print(\"unique_id: {}\".format(unique_id))\n print(\"context_id: {}\".format(tag))\n print(\"label: {}\".format(label))\n print(\"tag_index: {}\".format(pos))\n print(\"tokens: {}\".format(\"\".join(tokens)))\n print(\"choice_masks: {}\".format(choice_masks))\n while len(input_ids) < max_num_choices:\n input_ids.append([0] * max_seq_length)\n input_masks.append([0] * max_seq_length)\n segment_ids.append([0] * max_seq_length)\n choice_masks.append(0)\n assert len(input_ids) == max_num_choices\n assert len(input_masks) == max_num_choices\n assert len(segment_ids) == max_num_choices\n assert len(choice_masks) == max_num_choices\n\n features.append(\n InputFeatures(\n unique_id=unique_id,\n example_id=example.example_id,\n tag=tag,\n tokens=tokens,\n input_ids=input_ids,\n input_masks=input_masks,\n segment_ids=segment_ids,\n choice_masks=choice_masks,\n label=label))\n\n max_tokens_for_doc = max_seq_length - 3 # [CLS] choice [SEP] document [SEP]\n features = []\n unique_id = 0\n\n for (example_index, example) in enumerate(tqdm(examples)):\n\n label = example.answer_index\n if label != None:\n _loop(example, unique_id, label)\n else:\n _loop(example, unique_id, None)\n unique_id += 1\n\n if unique_id % 12000 == 0:\n print(\"unique_id: %s\" % (unique_id))\n print(\"unique_id: %s\" % (unique_id))\n return features","function_tokens":["def","convert_examples_to_features","(","examples",",","tokenizer",",","max_seq_length","=","128",",","max_num_choices","=","10",")",":","def","_loop","(","example",",","unique_id",",","label",")",":","'''\n :param example:\n :param unique_id:\n :return:\n input_ids = (C, seq_len)\n token_type_ids = (C, seq_len) = segment_id\n input_mask = (C, seq_len)\n labels = int\n choices_mask = (C)\n '''","input_ids","=","[","]","input_masks","=","[","]","segment_ids","=","[","]","choice_masks","=","[","1","]","*","len","(","example",".","options",")","tag","=","example",".","tag","all_doc_tokens","=","[","]","for","(","i",",","token",")","in","enumerate","(","example",".","doc_tokens",")",":","if","'#idiom'","in","token",":","sub_tokens","=","[","str","(","token",")","]","else",":","sub_tokens","=","tokenizer",".","tokenize","(","token",")","for","sub_token","in","sub_tokens",":","all_doc_tokens",".","append","(","sub_token",")","pos","=","all_doc_tokens",".","index","(","tag",")","num_tokens","=","max_tokens_for_doc","-","5","# [unused1]\u548csegA\u7684\u6210\u8bed","tmp_l",",","tmp_r","=","add_tokens_for_around","(","all_doc_tokens",",","pos",",","num_tokens",")","num_l","=","len","(","tmp_l",")","num_r","=","len","(","tmp_r",")","tokens_l","=","[","]","for","token","in","tmp_l",":","if","'#idiom'","in","token","and","token","!=","tag",":","tokens_l",".","extend","(","[","'[MASK]'","]","*","4",")","else",":","tokens_l",".","append","(","token",")","tokens_l","=","tokens_l","[","-","num_l",":","]","del","tmp_l","tokens_r","=","[","]","for","token","in","tmp_r",":","if","'#idiom'","in","token","and","token","!=","tag",":","tokens_r",".","extend","(","[","'[MASK]'","]","*","4",")","else",":","tokens_r",".","append","(","token",")","tokens_r","=","tokens_r","[",":","num_r","]","del","tmp_r","for","i",",","elem","in","enumerate","(","example",".","options",")",":","option","=","tokenizer",".","tokenize","(","elem",")","tokens","=","[","'[CLS]'","]","+","option","+","[","'[SEP]'","]","+","tokens_l","+","[","'[unused1]'","]","+","tokens_r","+","[","'[SEP]'","]","input_id","=","tokenizer",".","convert_tokens_to_ids","(","tokens",")","input_mask","=","[","1","]","*","len","(","input_id",")","segment_id","=","[","0","]","*","len","(","input_id",")","while","len","(","input_id",")","<","max_seq_length",":","input_id",".","append","(","0",")","input_mask",".","append","(","0",")","segment_id",".","append","(","0",")","assert","len","(","input_id",")","==","max_seq_length","assert","len","(","input_mask",")","==","max_seq_length","assert","len","(","segment_id",")","==","max_seq_length","input_ids",".","append","(","input_id",")","input_masks",".","append","(","input_mask",")","segment_ids",".","append","(","segment_id",")","if","unique_id","<","5",":","print","(","\"*** Example ***\"",")","print","(","\"unique_id: {}\"",".","format","(","unique_id",")",")","print","(","\"context_id: {}\"",".","format","(","tag",")",")","print","(","\"label: {}\"",".","format","(","label",")",")","print","(","\"tag_index: {}\"",".","format","(","pos",")",")","print","(","\"tokens: {}\"",".","format","(","\"\"",".","join","(","tokens",")",")",")","print","(","\"choice_masks: {}\"",".","format","(","choice_masks",")",")","while","len","(","input_ids",")","<","max_num_choices",":","input_ids",".","append","(","[","0","]","*","max_seq_length",")","input_masks",".","append","(","[","0","]","*","max_seq_length",")","segment_ids",".","append","(","[","0","]","*","max_seq_length",")","choice_masks",".","append","(","0",")","assert","len","(","input_ids",")","==","max_num_choices","assert","len","(","input_masks",")","==","max_num_choices","assert","len","(","segment_ids",")","==","max_num_choices","assert","len","(","choice_masks",")","==","max_num_choices","features",".","append","(","InputFeatures","(","unique_id","=","unique_id",",","example_id","=","example",".","example_id",",","tag","=","tag",",","tokens","=","tokens",",","input_ids","=","input_ids",",","input_masks","=","input_masks",",","segment_ids","=","segment_ids",",","choice_masks","=","choice_masks",",","label","=","label",")",")","max_tokens_for_doc","=","max_seq_length","-","3","# [CLS] choice [SEP] document [SEP]","features","=","[","]","unique_id","=","0","for","(","example_index",",","example",")","in","enumerate","(","tqdm","(","examples",")",")",":","label","=","example",".","answer_index","if","label","!=","None",":","_loop","(","example",",","unique_id",",","label",")","else",":","_loop","(","example",",","unique_id",",","None",")","unique_id","+=","1","if","unique_id","%","12000","==","0",":","print","(","\"unique_id: %s\"","%","(","unique_id",")",")","print","(","\"unique_id: %s\"","%","(","unique_id",")",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/CHID_preprocess.py#L217-L338"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/preprocess\/CHID_preprocess.py","language":"python","identifier":"logits_matrix_to_array","parameters":"(logits_matrix, index_2_idiom)","argument_list":"","return_statement":"return results","docstring":"\u4ece\u77e9\u9635\u4e2d\u8ba1\u7b97\u5168\u5c40\u6982\u7387\u6700\u5927\u7684\u5e8f\u5217","docstring_summary":"\u4ece\u77e9\u9635\u4e2d\u8ba1\u7b97\u5168\u5c40\u6982\u7387\u6700\u5927\u7684\u5e8f\u5217","docstring_tokens":["\u4ece\u77e9\u9635\u4e2d\u8ba1\u7b97\u5168\u5c40\u6982\u7387\u6700\u5927\u7684\u5e8f\u5217"],"function":"def logits_matrix_to_array(logits_matrix, index_2_idiom):\n \"\"\"\u4ece\u77e9\u9635\u4e2d\u8ba1\u7b97\u5168\u5c40\u6982\u7387\u6700\u5927\u7684\u5e8f\u5217\"\"\"\n logits_matrix = np.array(logits_matrix)\n logits_matrix = np.transpose(logits_matrix)\n tmp = []\n for i, row in enumerate(logits_matrix):\n for j, col in enumerate(row):\n tmp.append((i, j, col))\n else:\n choice = set(range(i + 1))\n blanks = set(range(j + 1))\n tmp = sorted(tmp, key=lambda x: x[2], reverse=True)\n results = []\n for i, j, v in tmp:\n if (j in blanks) and (i in choice):\n results.append((i, j))\n blanks.remove(j)\n choice.remove(i)\n results = sorted(results, key=lambda x: x[1], reverse=False)\n results = [[index_2_idiom[j], i] for i, j in results]\n return results","function_tokens":["def","logits_matrix_to_array","(","logits_matrix",",","index_2_idiom",")",":","logits_matrix","=","np",".","array","(","logits_matrix",")","logits_matrix","=","np",".","transpose","(","logits_matrix",")","tmp","=","[","]","for","i",",","row","in","enumerate","(","logits_matrix",")",":","for","j",",","col","in","enumerate","(","row",")",":","tmp",".","append","(","(","i",",","j",",","col",")",")","else",":","choice","=","set","(","range","(","i","+","1",")",")","blanks","=","set","(","range","(","j","+","1",")",")","tmp","=","sorted","(","tmp",",","key","=","lambda","x",":","x","[","2","]",",","reverse","=","True",")","results","=","[","]","for","i",",","j",",","v","in","tmp",":","if","(","j","in","blanks",")","and","(","i","in","choice",")",":","results",".","append","(","(","i",",","j",")",")","blanks",".","remove","(","j",")","choice",".","remove","(","i",")","results","=","sorted","(","results",",","key","=","lambda","x",":","x","[","1","]",",","reverse","=","False",")","results","=","[","[","index_2_idiom","[","j","]",",","i","]","for","i",",","j","in","results","]","return","results"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/preprocess\/CHID_preprocess.py#L341-L361"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/pytorch_optimization.py","language":"python","identifier":"BERTAdam.step","parameters":"(self, closure=None)","argument_list":"","return_statement":"return loss","docstring":"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.","docstring_summary":"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.","docstring_tokens":["Performs","a","single","optimization","step",".","Arguments",":","closure","(","callable","optional",")",":","A","closure","that","reevaluates","the","model","and","returns","the","loss","."],"function":"def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['next_m'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['next_v'] = torch.zeros_like(p.data)\n\n next_m, next_v = state['next_m'], state['next_v']\n beta1, beta2 = group['b1'], group['b2']\n\n # Add grad clipping\n if group['max_grad_norm'] > 0:\n clip_grad_norm_(p, group['max_grad_norm'])\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n next_m.mul_(beta1).add_(1 - beta1, grad)\n next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n update = next_m \/ (next_v.sqrt() + group['e'])\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization\/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m\/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if group['weight_decay_rate'] > 0.0:\n update += group['weight_decay_rate'] * p.data\n\n schedule_fct = SCHEDULES[group['schedule']]\n if group['cycle_step'] is not None and state['step'] > group['cycle_step']:\n lr_scheduled = group['lr'] * (1 - ((state['step'] % group['cycle_step']) \/ group['cycle_step']))\n elif group['t_total'] != -1 and group['schedule'] != 'warmup_fix':\n lr_scheduled = group['lr'] * schedule_fct(state['step'] \/ group['t_total'], group['warmup'])\n elif group['schedule'] == 'warmup_fix':\n lr_scheduled = group['lr'] * schedule_fct(state['step'], group['warmup'] * group['t_total'])\n else:\n lr_scheduled = group['lr']\n\n update_with_lr = lr_scheduled * update\n p.data.add_(-update_with_lr)\n\n state['step'] += 1\n\n return loss","function_tokens":["def","step","(","self",",","closure","=","None",")",":","loss","=","None","if","closure","is","not","None",":","loss","=","closure","(",")","for","group","in","self",".","param_groups",":","for","p","in","group","[","'params'","]",":","if","p",".","grad","is","None",":","continue","grad","=","p",".","grad",".","data","if","grad",".","is_sparse",":","raise","RuntimeError","(","'Adam does not support sparse gradients, please consider SparseAdam instead'",")","state","=","self",".","state","[","p","]","# State initialization","if","len","(","state",")","==","0",":","state","[","'step'","]","=","0","# Exponential moving average of gradient values","state","[","'next_m'","]","=","torch",".","zeros_like","(","p",".","data",")","# Exponential moving average of squared gradient values","state","[","'next_v'","]","=","torch",".","zeros_like","(","p",".","data",")","next_m",",","next_v","=","state","[","'next_m'","]",",","state","[","'next_v'","]","beta1",",","beta2","=","group","[","'b1'","]",",","group","[","'b2'","]","# Add grad clipping","if","group","[","'max_grad_norm'","]",">","0",":","clip_grad_norm_","(","p",",","group","[","'max_grad_norm'","]",")","# Decay the first and second moment running average coefficient","# In-place operations to update the averages at the same time","next_m",".","mul_","(","beta1",")",".","add_","(","1","-","beta1",",","grad",")","next_v",".","mul_","(","beta2",")",".","addcmul_","(","1","-","beta2",",","grad",",","grad",")","update","=","next_m","\/","(","next_v",".","sqrt","(",")","+","group","[","'e'","]",")","# Just adding the square of the weights to the loss function is *not*","# the correct way of using L2 regularization\/weight decay with Adam,","# since that will interact with the m and v parameters in strange ways.","#","# Instead we want ot decay the weights in a manner that doesn't interact","# with the m\/v parameters. This is equivalent to adding the square","# of the weights to the loss with plain (non-momentum) SGD.","if","group","[","'weight_decay_rate'","]",">","0.0",":","update","+=","group","[","'weight_decay_rate'","]","*","p",".","data","schedule_fct","=","SCHEDULES","[","group","[","'schedule'","]","]","if","group","[","'cycle_step'","]","is","not","None","and","state","[","'step'","]",">","group","[","'cycle_step'","]",":","lr_scheduled","=","group","[","'lr'","]","*","(","1","-","(","(","state","[","'step'","]","%","group","[","'cycle_step'","]",")","\/","group","[","'cycle_step'","]",")",")","elif","group","[","'t_total'","]","!=","-","1","and","group","[","'schedule'","]","!=","'warmup_fix'",":","lr_scheduled","=","group","[","'lr'","]","*","schedule_fct","(","state","[","'step'","]","\/","group","[","'t_total'","]",",","group","[","'warmup'","]",")","elif","group","[","'schedule'","]","==","'warmup_fix'",":","lr_scheduled","=","group","[","'lr'","]","*","schedule_fct","(","state","[","'step'","]",",","group","[","'warmup'","]","*","group","[","'t_total'","]",")","else",":","lr_scheduled","=","group","[","'lr'","]","update_with_lr","=","lr_scheduled","*","update","p",".","data",".","add_","(","-","update_with_lr",")","state","[","'step'","]","+=","1","return","loss"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/pytorch_optimization.py#L89-L155"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py","language":"python","identifier":"url_to_filename","parameters":"(url: str, etag: str = None)","argument_list":"","return_statement":"return filename","docstring":"Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.","docstring_summary":"Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.","docstring_tokens":["Convert","url","into","a","hashed","filename","in","a","repeatable","way",".","If","etag","is","specified","append","its","hash","to","the","url","s","delimited","by","a","period","."],"function":"def url_to_filename(url: str, etag: str = None) -> str:\n \"\"\"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n \"\"\"\n url_bytes = url.encode('utf-8')\n url_hash = sha256(url_bytes)\n filename = url_hash.hexdigest()\n\n if etag:\n etag_bytes = etag.encode('utf-8')\n etag_hash = sha256(etag_bytes)\n filename += '.' + etag_hash.hexdigest()\n\n return filename","function_tokens":["def","url_to_filename","(","url",":","str",",","etag",":","str","=","None",")","->","str",":","url_bytes","=","url",".","encode","(","'utf-8'",")","url_hash","=","sha256","(","url_bytes",")","filename","=","url_hash",".","hexdigest","(",")","if","etag",":","etag_bytes","=","etag",".","encode","(","'utf-8'",")","etag_hash","=","sha256","(","etag_bytes",")","filename","+=","'.'","+","etag_hash",".","hexdigest","(",")","return","filename"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py#L29-L44"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py","language":"python","identifier":"filename_to_url","parameters":"(filename: str, cache_dir: Union[str, Path] = None)","argument_list":"","return_statement":"return url, etag","docstring":"Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.","docstring_summary":"Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.","docstring_tokens":["Return","the","url","and","etag","(","which","may","be","None",")","stored","for","filename",".","Raise","FileNotFoundError","if","filename","or","its","stored","metadata","do","not","exist","."],"function":"def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:\n \"\"\"\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.\n \"\"\"\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n cache_path = os.path.join(cache_dir, filename)\n if not os.path.exists(cache_path):\n raise FileNotFoundError(\"file {} not found\".format(cache_path))\n\n meta_path = cache_path + '.json'\n if not os.path.exists(meta_path):\n raise FileNotFoundError(\"file {} not found\".format(meta_path))\n\n with open(meta_path) as meta_file:\n metadata = json.load(meta_file)\n url = metadata['url']\n etag = metadata['etag']\n\n return url, etag","function_tokens":["def","filename_to_url","(","filename",":","str",",","cache_dir",":","Union","[","str",",","Path","]","=","None",")","->","Tuple","[","str",",","str","]",":","if","cache_dir","is","None",":","cache_dir","=","PYTORCH_PRETRAINED_BERT_CACHE","if","isinstance","(","cache_dir",",","Path",")",":","cache_dir","=","str","(","cache_dir",")","cache_path","=","os",".","path",".","join","(","cache_dir",",","filename",")","if","not","os",".","path",".","exists","(","cache_path",")",":","raise","FileNotFoundError","(","\"file {} not found\"",".","format","(","cache_path",")",")","meta_path","=","cache_path","+","'.json'","if","not","os",".","path",".","exists","(","meta_path",")",":","raise","FileNotFoundError","(","\"file {} not found\"",".","format","(","meta_path",")",")","with","open","(","meta_path",")","as","meta_file",":","metadata","=","json",".","load","(","meta_file",")","url","=","metadata","[","'url'","]","etag","=","metadata","[","'etag'","]","return","url",",","etag"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py#L47-L70"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py","language":"python","identifier":"cached_path","parameters":"(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None)","argument_list":"","return_statement":"","docstring":"Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.","docstring_summary":"Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.","docstring_tokens":["Given","something","that","might","be","a","URL","(","or","might","be","a","local","path",")","determine","which",".","If","it","s","a","URL","download","the","file","and","cache","it","and","return","the","path","to","the","cached","file",".","If","it","s","already","a","local","path","make","sure","the","file","exists","and","then","return","the","path","."],"function":"def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str:\n \"\"\"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n \"\"\"\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in ('http', 'https', 's3'):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == '':\n # File, but it doesn't exist.\n raise FileNotFoundError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))","function_tokens":["def","cached_path","(","url_or_filename",":","Union","[","str",",","Path","]",",","cache_dir",":","Union","[","str",",","Path","]","=","None",")","->","str",":","if","cache_dir","is","None",":","cache_dir","=","PYTORCH_PRETRAINED_BERT_CACHE","if","isinstance","(","url_or_filename",",","Path",")",":","url_or_filename","=","str","(","url_or_filename",")","if","isinstance","(","cache_dir",",","Path",")",":","cache_dir","=","str","(","cache_dir",")","parsed","=","urlparse","(","url_or_filename",")","if","parsed",".","scheme","in","(","'http'",",","'https'",",","'s3'",")",":","# URL, so get it from the cache (downloading if necessary)","return","get_from_cache","(","url_or_filename",",","cache_dir",")","elif","os",".","path",".","exists","(","url_or_filename",")",":","# File, and it exists.","return","url_or_filename","elif","parsed",".","scheme","==","''",":","# File, but it doesn't exist.","raise","FileNotFoundError","(","\"file {} not found\"",".","format","(","url_or_filename",")",")","else",":","# Something unknown","raise","ValueError","(","\"unable to parse {} as a URL or as a local path\"",".","format","(","url_or_filename",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py#L73-L100"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py","language":"python","identifier":"split_s3_path","parameters":"(url: str)","argument_list":"","return_statement":"return bucket_name, s3_path","docstring":"Split a full s3 path into the bucket name and path.","docstring_summary":"Split a full s3 path into the bucket name and path.","docstring_tokens":["Split","a","full","s3","path","into","the","bucket","name","and","path","."],"function":"def split_s3_path(url: str) -> Tuple[str, str]:\n \"\"\"Split a full s3 path into the bucket name and path.\"\"\"\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '\/' at beginning of path.\n if s3_path.startswith(\"\/\"):\n s3_path = s3_path[1:]\n return bucket_name, s3_path","function_tokens":["def","split_s3_path","(","url",":","str",")","->","Tuple","[","str",",","str","]",":","parsed","=","urlparse","(","url",")","if","not","parsed",".","netloc","or","not","parsed",".","path",":","raise","ValueError","(","\"bad s3 path {}\"",".","format","(","url",")",")","bucket_name","=","parsed",".","netloc","s3_path","=","parsed",".","path","# Remove '\/' at beginning of path.","if","s3_path",".","startswith","(","\"\/\"",")",":","s3_path","=","s3_path","[","1",":","]","return","bucket_name",",","s3_path"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py#L103-L113"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py","language":"python","identifier":"s3_request","parameters":"(func: Callable)","argument_list":"","return_statement":"return wrapper","docstring":"Wrapper function for s3 requests in order to create more helpful error\n messages.","docstring_summary":"Wrapper function for s3 requests in order to create more helpful error\n messages.","docstring_tokens":["Wrapper","function","for","s3","requests","in","order","to","create","more","helpful","error","messages","."],"function":"def s3_request(func: Callable):\n \"\"\"\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n \"\"\"\n\n @wraps(func)\n def wrapper(url: str, *args, **kwargs):\n try:\n return func(url, *args, **kwargs)\n except ClientError as exc:\n if int(exc.response[\"Error\"][\"Code\"]) == 404:\n raise FileNotFoundError(\"file {} not found\".format(url))\n else:\n raise\n\n return wrapper","function_tokens":["def","s3_request","(","func",":","Callable",")",":","@","wraps","(","func",")","def","wrapper","(","url",":","str",",","*","args",",","*","*","kwargs",")",":","try",":","return","func","(","url",",","*","args",",","*","*","kwargs",")","except","ClientError","as","exc",":","if","int","(","exc",".","response","[","\"Error\"","]","[","\"Code\"","]",")","==","404",":","raise","FileNotFoundError","(","\"file {} not found\"",".","format","(","url",")",")","else",":","raise","return","wrapper"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py#L116-L132"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py","language":"python","identifier":"s3_etag","parameters":"(url: str)","argument_list":"","return_statement":"return s3_object.e_tag","docstring":"Check ETag on S3 object.","docstring_summary":"Check ETag on S3 object.","docstring_tokens":["Check","ETag","on","S3","object","."],"function":"def s3_etag(url: str) -> Optional[str]:\n \"\"\"Check ETag on S3 object.\"\"\"\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_object = s3_resource.Object(bucket_name, s3_path)\n return s3_object.e_tag","function_tokens":["def","s3_etag","(","url",":","str",")","->","Optional","[","str","]",":","s3_resource","=","boto3",".","resource","(","\"s3\"",")","bucket_name",",","s3_path","=","split_s3_path","(","url",")","s3_object","=","s3_resource",".","Object","(","bucket_name",",","s3_path",")","return","s3_object",".","e_tag"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py#L136-L141"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py","language":"python","identifier":"s3_get","parameters":"(url: str, temp_file: IO)","argument_list":"","return_statement":"","docstring":"Pull a file directly from S3.","docstring_summary":"Pull a file directly from S3.","docstring_tokens":["Pull","a","file","directly","from","S3","."],"function":"def s3_get(url: str, temp_file: IO) -> None:\n \"\"\"Pull a file directly from S3.\"\"\"\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)","function_tokens":["def","s3_get","(","url",":","str",",","temp_file",":","IO",")","->","None",":","s3_resource","=","boto3",".","resource","(","\"s3\"",")","bucket_name",",","s3_path","=","split_s3_path","(","url",")","s3_resource",".","Bucket","(","bucket_name",")",".","download_fileobj","(","s3_path",",","temp_file",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py#L145-L149"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py","language":"python","identifier":"get_from_cache","parameters":"(url: str, cache_dir: Union[str, Path] = None)","argument_list":"","return_statement":"return cache_path","docstring":"Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.","docstring_summary":"Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.","docstring_tokens":["Given","a","URL","look","for","the","corresponding","dataset","in","the","local","cache",".","If","it","s","not","there","download","it",".","Then","return","the","path","to","the","cached","file","."],"function":"def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:\n \"\"\"\n Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n \"\"\"\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n os.makedirs(cache_dir, exist_ok=True)\n\n # Get eTag to add to filename, if it exists.\n if url.startswith(\"s3:\/\/\"):\n etag = s3_etag(url)\n else:\n response = requests.head(url, allow_redirects=True)\n if response.status_code != 200:\n raise IOError(\"HEAD request failed for url {} with status code {}\"\n .format(url, response.status_code))\n etag = response.headers.get(\"ETag\")\n\n filename = url_to_filename(url, etag)\n\n # get cache path to put the file\n cache_path = os.path.join(cache_dir, filename)\n\n if not os.path.exists(cache_path):\n # Download to temporary file, then copy to cache dir once finished.\n # Otherwise you get corrupt cache entries if the download gets interrupted.\n with tempfile.NamedTemporaryFile() as temp_file:\n logger.info(\"%s not found in cache, downloading to %s\", url, temp_file.name)\n\n # GET file object\n if url.startswith(\"s3:\/\/\"):\n s3_get(url, temp_file)\n else:\n http_get(url, temp_file)\n\n # we are copying the file before closing it, so flush to avoid truncation\n temp_file.flush()\n # shutil.copyfileobj() starts at the current position, so go to the start\n temp_file.seek(0)\n\n logger.info(\"copying %s to cache at %s\", temp_file.name, cache_path)\n with open(cache_path, 'wb') as cache_file:\n shutil.copyfileobj(temp_file, cache_file)\n\n logger.info(\"creating metadata file for %s\", cache_path)\n meta = {'url': url, 'etag': etag}\n meta_path = cache_path + '.json'\n with open(meta_path, 'w') as meta_file:\n json.dump(meta, meta_file)\n\n logger.info(\"removing temp file %s\", temp_file.name)\n\n return cache_path","function_tokens":["def","get_from_cache","(","url",":","str",",","cache_dir",":","Union","[","str",",","Path","]","=","None",")","->","str",":","if","cache_dir","is","None",":","cache_dir","=","PYTORCH_PRETRAINED_BERT_CACHE","if","isinstance","(","cache_dir",",","Path",")",":","cache_dir","=","str","(","cache_dir",")","os",".","makedirs","(","cache_dir",",","exist_ok","=","True",")","# Get eTag to add to filename, if it exists.","if","url",".","startswith","(","\"s3:\/\/\"",")",":","etag","=","s3_etag","(","url",")","else",":","response","=","requests",".","head","(","url",",","allow_redirects","=","True",")","if","response",".","status_code","!=","200",":","raise","IOError","(","\"HEAD request failed for url {} with status code {}\"",".","format","(","url",",","response",".","status_code",")",")","etag","=","response",".","headers",".","get","(","\"ETag\"",")","filename","=","url_to_filename","(","url",",","etag",")","# get cache path to put the file","cache_path","=","os",".","path",".","join","(","cache_dir",",","filename",")","if","not","os",".","path",".","exists","(","cache_path",")",":","# Download to temporary file, then copy to cache dir once finished.","# Otherwise you get corrupt cache entries if the download gets interrupted.","with","tempfile",".","NamedTemporaryFile","(",")","as","temp_file",":","logger",".","info","(","\"%s not found in cache, downloading to %s\"",",","url",",","temp_file",".","name",")","# GET file object","if","url",".","startswith","(","\"s3:\/\/\"",")",":","s3_get","(","url",",","temp_file",")","else",":","http_get","(","url",",","temp_file",")","# we are copying the file before closing it, so flush to avoid truncation","temp_file",".","flush","(",")","# shutil.copyfileobj() starts at the current position, so go to the start","temp_file",".","seek","(","0",")","logger",".","info","(","\"copying %s to cache at %s\"",",","temp_file",".","name",",","cache_path",")","with","open","(","cache_path",",","'wb'",")","as","cache_file",":","shutil",".","copyfileobj","(","temp_file",",","cache_file",")","logger",".","info","(","\"creating metadata file for %s\"",",","cache_path",")","meta","=","{","'url'",":","url",",","'etag'",":","etag","}","meta_path","=","cache_path","+","'.json'","with","open","(","meta_path",",","'w'",")","as","meta_file",":","json",".","dump","(","meta",",","meta_file",")","logger",".","info","(","\"removing temp file %s\"",",","temp_file",".","name",")","return","cache_path"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py#L164-L220"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py","language":"python","identifier":"read_set_from_file","parameters":"(filename: str)","argument_list":"","return_statement":"return collection","docstring":"Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.","docstring_summary":"Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.","docstring_tokens":["Extract","a","de","-","duped","collection","(","set",")","of","text","from","a","file",".","Expected","file","format","is","one","item","per","line","."],"function":"def read_set_from_file(filename: str) -> Set[str]:\n '''\n Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.\n '''\n collection = set()\n with open(filename, 'r', encoding='utf-8') as file_:\n for line in file_:\n collection.add(line.rstrip())\n return collection","function_tokens":["def","read_set_from_file","(","filename",":","str",")","->","Set","[","str","]",":","collection","=","set","(",")","with","open","(","filename",",","'r'",",","encoding","=","'utf-8'",")","as","file_",":","for","line","in","file_",":","collection",".","add","(","line",".","rstrip","(",")",")","return","collection"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/file_utils.py#L223-L232"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"load_vocab","parameters":"(vocab_file)","argument_list":"","return_statement":"return vocab","docstring":"Loads a vocabulary file into a dictionary.","docstring_summary":"Loads a vocabulary file into a dictionary.","docstring_tokens":["Loads","a","vocabulary","file","into","a","dictionary","."],"function":"def load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with open(vocab_file, \"r\", encoding=\"utf-8\") as reader:\n while True:\n token = reader.readline()\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab","function_tokens":["def","load_vocab","(","vocab_file",")",":","vocab","=","collections",".","OrderedDict","(",")","index","=","0","with","open","(","vocab_file",",","\"r\"",",","encoding","=","\"utf-8\"",")","as","reader",":","while","True",":","token","=","reader",".","readline","(",")","if","not","token",":","break","token","=","token",".","strip","(",")","vocab","[","token","]","=","index","index","+=","1","return","vocab"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L43-L55"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"whitespace_tokenize","parameters":"(text)","argument_list":"","return_statement":"return tokens","docstring":"Runs basic whitespace cleaning and splitting on a peice of text.","docstring_summary":"Runs basic whitespace cleaning and splitting on a peice of text.","docstring_tokens":["Runs","basic","whitespace","cleaning","and","splitting","on","a","peice","of","text","."],"function":"def whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a peice of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens","function_tokens":["def","whitespace_tokenize","(","text",")",":","text","=","text",".","strip","(",")","if","not","text",":","return","[","]","tokens","=","text",".","split","(",")","return","tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L58-L64"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"printable_text","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_summary":"Returns text encoded in a way suitable for print or `tf.logging`.","docstring_tokens":["Returns","text","encoded","in","a","way","suitable","for","print","or","tf",".","logging","."],"function":"def printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","printable_text","(","text",")",":","# These functions want `str` for both Python2 and Python3, but in one case","# it's a Unicode string and in the other it's a byte string.","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","unicode",")",":","return","text",".","encode","(","\"utf-8\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L67-L87"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"convert_to_unicode","parameters":"(text)","argument_list":"","return_statement":"","docstring":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_summary":"Converts `text` to Unicode (if it's not already), assuming utf-8 input.","docstring_tokens":["Converts","text","to","Unicode","(","if","it","s","not","already",")","assuming","utf","-","8","input","."],"function":"def convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")","function_tokens":["def","convert_to_unicode","(","text",")",":","if","six",".","PY3",":","if","isinstance","(","text",",","str",")",":","return","text","elif","isinstance","(","text",",","bytes",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","elif","six",".","PY2",":","if","isinstance","(","text",",","str",")",":","return","text",".","decode","(","\"utf-8\"",",","\"ignore\"",")","elif","isinstance","(","text",",","unicode",")",":","return","text","else",":","raise","ValueError","(","\"Unsupported string type: %s\"","%","(","type","(","text",")",")",")","else",":","raise","ValueError","(","\"Not running on Python2 or Python 3?\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L90-L107"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"_is_whitespace","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a whitespace character.","docstring_summary":"Checks whether `chars` is a whitespace character.","docstring_tokens":["Checks","whether","chars","is","a","whitespace","character","."],"function":"def _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False","function_tokens":["def","_is_whitespace","(","char",")",":","# \\t, \\n, and \\r are technically contorl characters but we treat them","# as whitespace since they are generally considered as such.","if","char","==","\" \"","or","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Zs\"",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L351-L360"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"_is_control","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a control character.","docstring_summary":"Checks whether `chars` is a control character.","docstring_tokens":["Checks","whether","chars","is","a","control","character","."],"function":"def _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False","function_tokens":["def","_is_control","(","char",")",":","# These are technically control characters but we count them as whitespace","# characters.","if","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","False","cat","=","unicodedata",".","category","(","char",")","if","cat",".","startswith","(","\"C\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L363-L372"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"_is_punctuation","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a punctuation character.","docstring_summary":"Checks whether `chars` is a punctuation character.","docstring_tokens":["Checks","whether","chars","is","a","punctuation","character","."],"function":"def _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter\/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False","function_tokens":["def","_is_punctuation","(","char",")",":","cp","=","ord","(","char",")","# We treat all non-letter\/number ASCII as punctuation.","# Characters such as \"^\", \"$\", and \"`\" are not in the Unicode","# Punctuation class but we treat them as punctuation anyways, for","# consistency.","if","(","(","cp",">=","33","and","cp","<=","47",")","or","(","cp",">=","58","and","cp","<=","64",")","or","(","cp",">=","91","and","cp","<=","96",")","or","(","cp",">=","123","and","cp","<=","126",")",")",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat",".","startswith","(","\"P\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L375-L388"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"BertTokenizer.convert_tokens_to_ids","parameters":"(self, tokens)","argument_list":"","return_statement":"return ids","docstring":"Converts a sequence of tokens into ids using the vocab.","docstring_summary":"Converts a sequence of tokens into ids using the vocab.","docstring_tokens":["Converts","a","sequence","of","tokens","into","ids","using","the","vocab","."],"function":"def convert_tokens_to_ids(self, tokens):\n \"\"\"Converts a sequence of tokens into ids using the vocab.\"\"\"\n ids = []\n for token in tokens:\n ids.append(self.vocab[token])\n return ids","function_tokens":["def","convert_tokens_to_ids","(","self",",","tokens",")",":","ids","=","[","]","for","token","in","tokens",":","ids",".","append","(","self",".","vocab","[","token","]",")","return","ids"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L131-L136"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"BertTokenizer.convert_ids_to_tokens","parameters":"(self, ids)","argument_list":"","return_statement":"return tokens","docstring":"Converts a sequence of ids in wordpiece tokens using the vocab.","docstring_summary":"Converts a sequence of ids in wordpiece tokens using the vocab.","docstring_tokens":["Converts","a","sequence","of","ids","in","wordpiece","tokens","using","the","vocab","."],"function":"def convert_ids_to_tokens(self, ids):\n \"\"\"Converts a sequence of ids in wordpiece tokens using the vocab.\"\"\"\n tokens = []\n for i in ids:\n tokens.append(self.ids_to_tokens[i])\n return tokens","function_tokens":["def","convert_ids_to_tokens","(","self",",","ids",")",":","tokens","=","[","]","for","i","in","ids",":","tokens",".","append","(","self",".","ids_to_tokens","[","i","]",")","return","tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L138-L143"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"BertTokenizer.from_pretrained","parameters":"(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs)","argument_list":"","return_statement":"return tokenizer","docstring":"Instantiate a PreTrainedBertModel from a pre-trained model file.\n Download and cache the pre-trained model file if needed.","docstring_summary":"Instantiate a PreTrainedBertModel from a pre-trained model file.\n Download and cache the pre-trained model file if needed.","docstring_tokens":["Instantiate","a","PreTrainedBertModel","from","a","pre","-","trained","model","file",".","Download","and","cache","the","pre","-","trained","model","file","if","needed","."],"function":"def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs):\n \"\"\"\n Instantiate a PreTrainedBertModel from a pre-trained model file.\n Download and cache the pre-trained model file if needed.\n \"\"\"\n if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP:\n vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name]\n else:\n vocab_file = pretrained_model_name\n if os.path.isdir(vocab_file):\n vocab_file = os.path.join(vocab_file, VOCAB_NAME)\n # redirect to the cache, if necessary\n try:\n resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)\n except FileNotFoundError:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name,\n ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),\n vocab_file))\n return None\n if resolved_vocab_file == vocab_file:\n logger.info(\"loading vocabulary file {}\".format(vocab_file))\n else:\n logger.info(\"loading vocabulary file {} from cache at {}\".format(\n vocab_file, resolved_vocab_file))\n # Instantiate tokenizer.\n tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)\n return tokenizer","function_tokens":["def","from_pretrained","(","cls",",","pretrained_model_name",",","cache_dir","=","None",",","*","inputs",",","*","*","kwargs",")",":","if","pretrained_model_name","in","PRETRAINED_VOCAB_ARCHIVE_MAP",":","vocab_file","=","PRETRAINED_VOCAB_ARCHIVE_MAP","[","pretrained_model_name","]","else",":","vocab_file","=","pretrained_model_name","if","os",".","path",".","isdir","(","vocab_file",")",":","vocab_file","=","os",".","path",".","join","(","vocab_file",",","VOCAB_NAME",")","# redirect to the cache, if necessary","try",":","resolved_vocab_file","=","cached_path","(","vocab_file",",","cache_dir","=","cache_dir",")","except","FileNotFoundError",":","logger",".","error","(","\"Model name '{}' was not found in model name list ({}). \"","\"We assumed '{}' was a path or url but couldn't find any file \"","\"associated to this path or url.\"",".","format","(","pretrained_model_name",",","', '",".","join","(","PRETRAINED_VOCAB_ARCHIVE_MAP",".","keys","(",")",")",",","vocab_file",")",")","return","None","if","resolved_vocab_file","==","vocab_file",":","logger",".","info","(","\"loading vocabulary file {}\"",".","format","(","vocab_file",")",")","else",":","logger",".","info","(","\"loading vocabulary file {} from cache at {}\"",".","format","(","vocab_file",",","resolved_vocab_file",")",")","# Instantiate tokenizer.","tokenizer","=","cls","(","resolved_vocab_file",",","*","inputs",",","*","*","kwargs",")","return","tokenizer"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L146-L176"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"BasicTokenizer.__init__","parameters":"(self, do_lower_case=True)","argument_list":"","return_statement":"","docstring":"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.","docstring_summary":"Constructs a BasicTokenizer.","docstring_tokens":["Constructs","a","BasicTokenizer","."],"function":"def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case","function_tokens":["def","__init__","(","self",",","do_lower_case","=","True",")",":","self",".","do_lower_case","=","do_lower_case"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L182-L188"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"BasicTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text.","docstring_summary":"Tokenizes a piece of text.","docstring_tokens":["Tokenizes","a","piece","of","text","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = self._clean_text(text)\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","text","=","self",".","_clean_text","(","text",")","# This was added on November 1st, 2018 for the multilingual and Chinese","# models. This is also applied to the English models now, but it doesn't","# matter since the English models were not trained on any Chinese data","# and generally don't have any Chinese data in them (there are Chinese","# characters in the vocabulary because Wikipedia does have some Chinese","# words in the English Wikipedia.).","text","=","self",".","_tokenize_chinese_chars","(","text",")","orig_tokens","=","whitespace_tokenize","(","text",")","split_tokens","=","[","]","for","token","in","orig_tokens",":","if","self",".","do_lower_case",":","token","=","token",".","lower","(",")","token","=","self",".","_run_strip_accents","(","token",")","split_tokens",".","extend","(","self",".","_run_split_on_punc","(","token",")",")","output_tokens","=","whitespace_tokenize","(","\" \"",".","join","(","split_tokens",")",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L190-L209"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"BasicTokenizer._run_strip_accents","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Strips accents from a piece of text.","docstring_summary":"Strips accents from a piece of text.","docstring_tokens":["Strips","accents","from","a","piece","of","text","."],"function":"def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_run_strip_accents","(","self",",","text",")",":","text","=","unicodedata",".","normalize","(","\"NFD\"",",","text",")","output","=","[","]","for","char","in","text",":","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Mn\"",":","continue","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L211-L220"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"BasicTokenizer._run_split_on_punc","parameters":"(self, text)","argument_list":"","return_statement":"return [\"\".join(x) for x in output]","docstring":"Splits punctuation on a piece of text.","docstring_summary":"Splits punctuation on a piece of text.","docstring_tokens":["Splits","punctuation","on","a","piece","of","text","."],"function":"def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]","function_tokens":["def","_run_split_on_punc","(","self",",","text",")",":","chars","=","list","(","text",")","i","=","0","start_new_word","=","True","output","=","[","]","while","i","<","len","(","chars",")",":","char","=","chars","[","i","]","if","_is_punctuation","(","char",")",":","output",".","append","(","[","char","]",")","start_new_word","=","True","else",":","if","start_new_word",":","output",".","append","(","[","]",")","start_new_word","=","False","output","[","-","1","]",".","append","(","char",")","i","+=","1","return","[","\"\"",".","join","(","x",")","for","x","in","output","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L222-L240"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"BasicTokenizer._tokenize_chinese_chars","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Adds whitespace around any CJK character.","docstring_summary":"Adds whitespace around any CJK character.","docstring_tokens":["Adds","whitespace","around","any","CJK","character","."],"function":"def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_tokenize_chinese_chars","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","self",".","_is_chinese_char","(","cp",")",":","output",".","append","(","\" \"",")","output",".","append","(","char",")","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L242-L253"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"BasicTokenizer._is_chinese_char","parameters":"(self, cp)","argument_list":"","return_statement":"return False","docstring":"Checks whether CP is the codepoint of a CJK character.","docstring_summary":"Checks whether CP is the codepoint of a CJK character.","docstring_tokens":["Checks","whether","CP","is","the","codepoint","of","a","CJK","character","."],"function":"def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False","function_tokens":["def","_is_chinese_char","(","self",",","cp",")",":","# This defines a \"chinese character\" as anything in the CJK Unicode block:","# https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)","#","# Note that the CJK Unicode block is NOT all Japanese and Korean characters,","# despite its name. The modern Korean Hangul alphabet is a different block,","# as is Japanese Hiragana and Katakana. Those alphabets are used to write","# space-separated words, so they are not treated specially and handled","# like the all of the other languages.","if","(","(","cp",">=","0x4E00","and","cp","<=","0x9FFF",")","or","#","(","cp",">=","0x3400","and","cp","<=","0x4DBF",")","or","#","(","cp",">=","0x20000","and","cp","<=","0x2A6DF",")","or","#","(","cp",">=","0x2A700","and","cp","<=","0x2B73F",")","or","#","(","cp",">=","0x2B740","and","cp","<=","0x2B81F",")","or","#","(","cp",">=","0x2B820","and","cp","<=","0x2CEAF",")","or","(","cp",">=","0xF900","and","cp","<=","0xFAFF",")","or","#","(","cp",">=","0x2F800","and","cp","<=","0x2FA1F",")",")",":","#","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L255-L275"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"BasicTokenizer._clean_text","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Performs invalid character removal and whitespace cleanup on text.","docstring_summary":"Performs invalid character removal and whitespace cleanup on text.","docstring_tokens":["Performs","invalid","character","removal","and","whitespace","cleanup","on","text","."],"function":"def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_clean_text","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","cp","==","0","or","cp","==","0xfffd","or","_is_control","(","char",")",":","continue","if","_is_whitespace","(","char",")",":","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L277-L288"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py","language":"python","identifier":"WordpieceTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.","docstring_summary":"Tokenizes a piece of text into its word pieces.","docstring_tokens":["Tokenizes","a","piece","of","text","into","its","word","pieces","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","output_tokens","=","[","]","for","token","in","whitespace_tokenize","(","text",")",":","chars","=","list","(","token",")","if","len","(","chars",")",">","self",".","max_input_chars_per_word",":","output_tokens",".","append","(","self",".","unk_token",")","continue","is_bad","=","False","start","=","0","sub_tokens","=","[","]","while","start","<","len","(","chars",")",":","end","=","len","(","chars",")","cur_substr","=","None","while","start","<","end",":","substr","=","\"\"",".","join","(","chars","[","start",":","end","]",")","if","start",">","0",":","substr","=","\"##\"","+","substr","if","substr","in","self",".","vocab",":","cur_substr","=","substr","break","end","-=","1","if","cur_substr","is","None",":","is_bad","=","True","break","sub_tokens",".","append","(","cur_substr",")","start","=","end","if","is_bad",":","output_tokens",".","append","(","self",".","unk_token",")","else",":","output_tokens",".","extend","(","sub_tokens",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/official_tokenization.py#L299-L348"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/mrc_pytorch\/tools\/utils.py","language":"python","identifier":"get_assigment_map_from_checkpoint","parameters":"(tvars, init_checkpoint)","argument_list":"","return_statement":"return assignment_map, initialized_variable_names, new_variable_names, unused_variable_names","docstring":"Compute the union of the current variables and checkpoint variables.","docstring_summary":"Compute the union of the current variables and checkpoint variables.","docstring_tokens":["Compute","the","union","of","the","current","variables","and","checkpoint","variables","."],"function":"def get_assigment_map_from_checkpoint(tvars, init_checkpoint):\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n initialized_variable_names = {}\n new_variable_names = set()\n unused_variable_names = set()\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n if 'adam' not in name:\n unused_variable_names.add(name)\n continue\n # assignment_map[name] = name\n assignment_map[name] = name_to_variable[name]\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n for name in name_to_variable:\n if name not in initialized_variable_names:\n new_variable_names.add(name)\n return assignment_map, initialized_variable_names, new_variable_names, unused_variable_names","function_tokens":["def","get_assigment_map_from_checkpoint","(","tvars",",","init_checkpoint",")",":","initialized_variable_names","=","{","}","new_variable_names","=","set","(",")","unused_variable_names","=","set","(",")","name_to_variable","=","collections",".","OrderedDict","(",")","for","var","in","tvars",":","name","=","var",".","name","m","=","re",".","match","(","\"^(.*):\\\\d+$\"",",","name",")","if","m","is","not","None",":","name","=","m",".","group","(","1",")","name_to_variable","[","name","]","=","var","init_vars","=","tf",".","train",".","list_variables","(","init_checkpoint",")","assignment_map","=","collections",".","OrderedDict","(",")","for","x","in","init_vars",":","(","name",",","var",")","=","(","x","[","0","]",",","x","[","1","]",")","if","name","not","in","name_to_variable",":","if","'adam'","not","in","name",":","unused_variable_names",".","add","(","name",")","continue","# assignment_map[name] = name","assignment_map","[","name","]","=","name_to_variable","[","name","]","initialized_variable_names","[","name","]","=","1","initialized_variable_names","[","name","+","\":0\"","]","=","1","for","name","in","name_to_variable",":","if","name","not","in","initialized_variable_names",":","new_variable_names",".","add","(","name",")","return","assignment_map",",","initialized_variable_names",",","new_variable_names",",","unused_variable_names"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/mrc_pytorch\/tools\/utils.py#L46-L77"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/run_classifier.py","language":"python","identifier":"train","parameters":"(args, train_dataset, model, tokenizer)","argument_list":"","return_statement":"return global_step, tr_loss \/ global_step","docstring":"Train the model","docstring_summary":"Train the model","docstring_tokens":["Train","the","model"],"function":"def train(args, train_dataset, model, tokenizer):\n \"\"\" Train the model \"\"\"\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size,\n collate_fn=xlnet_collate_fn if args.model_type in ['xlnet'] else collate_fn)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps \/\/ (len(train_dataloader) \/\/ args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) \/\/ args.gradient_accumulation_steps * args.num_train_epochs\n args.warmup_steps = int(t_total * args.warmup_proportion)\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https:\/\/www.github.com\/nvidia\/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\n output_device=args.local_rank,\n find_unused_parameters=True)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size * args.gradient_accumulation_steps * (\n torch.distributed.get_world_size() if args.local_rank != -1 else 1))\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n seed_everything(args.seed) # Added here for reproductibility (even between python 2 and 3)\n for _ in range(int(args.num_train_epochs)):\n pbar = ProgressBar(n_total=len(train_dataloader), desc='Training')\n for step, batch in enumerate(train_dataloader):\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if args.model_type != 'distilbert':\n inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'albert',\n 'roberta'] else None # XLM, DistilBERT don't use segment_ids\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss \/ args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n pbar(step, {'loss': loss.item()})\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n print(\" \")\n # Log metrics\n if args.local_rank == -1: # Only evaluate when single GPU otherwise metrics may not average well\n evaluate(args, model, tokenizer)\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = model.module if hasattr(model,\n 'module') else model # Take care of distributed\/parallel training\n model_to_save.save_pretrained(output_dir)\n torch.save(args, os.path.join(output_dir, 'training_args.bin'))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n tokenizer.save_vocabulary(vocab_path=output_dir)\n print(\" \")\n if 'cuda' in str(args.device):\n torch.cuda.empty_cache()\n return global_step, tr_loss \/ global_step","function_tokens":["def","train","(","args",",","train_dataset",",","model",",","tokenizer",")",":","args",".","train_batch_size","=","args",".","per_gpu_train_batch_size","*","max","(","1",",","args",".","n_gpu",")","train_sampler","=","RandomSampler","(","train_dataset",")","if","args",".","local_rank","==","-","1","else","DistributedSampler","(","train_dataset",")","train_dataloader","=","DataLoader","(","train_dataset",",","sampler","=","train_sampler",",","batch_size","=","args",".","train_batch_size",",","collate_fn","=","xlnet_collate_fn","if","args",".","model_type","in","[","'xlnet'","]","else","collate_fn",")","if","args",".","max_steps",">","0",":","t_total","=","args",".","max_steps","args",".","num_train_epochs","=","args",".","max_steps","\/\/","(","len","(","train_dataloader",")","\/\/","args",".","gradient_accumulation_steps",")","+","1","else",":","t_total","=","len","(","train_dataloader",")","\/\/","args",".","gradient_accumulation_steps","*","args",".","num_train_epochs","args",".","warmup_steps","=","int","(","t_total","*","args",".","warmup_proportion",")","# Prepare optimizer and schedule (linear warmup and decay)","no_decay","=","[","'bias'",",","'LayerNorm.weight'","]","optimizer_grouped_parameters","=","[","{","'params'",":","[","p","for","n",",","p","in","model",".","named_parameters","(",")","if","not","any","(","nd","in","n","for","nd","in","no_decay",")","]",",","'weight_decay'",":","args",".","weight_decay","}",",","{","'params'",":","[","p","for","n",",","p","in","model",".","named_parameters","(",")","if","any","(","nd","in","n","for","nd","in","no_decay",")","]",",","'weight_decay'",":","0.0","}","]","optimizer","=","AdamW","(","optimizer_grouped_parameters",",","lr","=","args",".","learning_rate",",","eps","=","args",".","adam_epsilon",")","scheduler","=","WarmupLinearSchedule","(","optimizer",",","warmup_steps","=","args",".","warmup_steps",",","t_total","=","t_total",")","if","args",".","fp16",":","try",":","from","apex","import","amp","except","ImportError",":","raise","ImportError","(","\"Please install apex from https:\/\/www.github.com\/nvidia\/apex to use fp16 training.\"",")","model",",","optimizer","=","amp",".","initialize","(","model",",","optimizer",",","opt_level","=","args",".","fp16_opt_level",")","# multi-gpu training (should be after apex fp16 initialization)","if","args",".","n_gpu",">","1",":","model","=","torch",".","nn",".","DataParallel","(","model",")","# Distributed training (should be after apex fp16 initialization)","if","args",".","local_rank","!=","-","1",":","model","=","torch",".","nn",".","parallel",".","DistributedDataParallel","(","model",",","device_ids","=","[","args",".","local_rank","]",",","output_device","=","args",".","local_rank",",","find_unused_parameters","=","True",")","# Train!","logger",".","info","(","\"***** Running training *****\"",")","logger",".","info","(","\" Num examples = %d\"",",","len","(","train_dataset",")",")","logger",".","info","(","\" Num Epochs = %d\"",",","args",".","num_train_epochs",")","logger",".","info","(","\" Instantaneous batch size per GPU = %d\"",",","args",".","per_gpu_train_batch_size",")","logger",".","info","(","\" Total train batch size (w. parallel, distributed & accumulation) = %d\"",",","args",".","train_batch_size","*","args",".","gradient_accumulation_steps","*","(","torch",".","distributed",".","get_world_size","(",")","if","args",".","local_rank","!=","-","1","else","1",")",")","logger",".","info","(","\" Gradient Accumulation steps = %d\"",",","args",".","gradient_accumulation_steps",")","logger",".","info","(","\" Total optimization steps = %d\"",",","t_total",")","global_step","=","0","tr_loss",",","logging_loss","=","0.0",",","0.0","model",".","zero_grad","(",")","seed_everything","(","args",".","seed",")","# Added here for reproductibility (even between python 2 and 3)","for","_","in","range","(","int","(","args",".","num_train_epochs",")",")",":","pbar","=","ProgressBar","(","n_total","=","len","(","train_dataloader",")",",","desc","=","'Training'",")","for","step",",","batch","in","enumerate","(","train_dataloader",")",":","model",".","train","(",")","batch","=","tuple","(","t",".","to","(","args",".","device",")","for","t","in","batch",")","inputs","=","{","'input_ids'",":","batch","[","0","]",",","'attention_mask'",":","batch","[","1","]",",","'labels'",":","batch","[","3","]","}","if","args",".","model_type","!=","'distilbert'",":","inputs","[","'token_type_ids'","]","=","batch","[","2","]","if","args",".","model_type","in","[","'bert'",",","'xlnet'",",","'albert'",",","'roberta'","]","else","None","# XLM, DistilBERT don't use segment_ids","outputs","=","model","(","*","*","inputs",")","loss","=","outputs","[","0","]","# model outputs are always tuple in transformers (see doc)","if","args",".","n_gpu",">","1",":","loss","=","loss",".","mean","(",")","# mean() to average on multi-gpu parallel training","if","args",".","gradient_accumulation_steps",">","1",":","loss","=","loss","\/","args",".","gradient_accumulation_steps","if","args",".","fp16",":","with","amp",".","scale_loss","(","loss",",","optimizer",")","as","scaled_loss",":","scaled_loss",".","backward","(",")","torch",".","nn",".","utils",".","clip_grad_norm_","(","amp",".","master_params","(","optimizer",")",",","args",".","max_grad_norm",")","else",":","loss",".","backward","(",")","torch",".","nn",".","utils",".","clip_grad_norm_","(","model",".","parameters","(",")",",","args",".","max_grad_norm",")","pbar","(","step",",","{","'loss'",":","loss",".","item","(",")","}",")","tr_loss","+=","loss",".","item","(",")","if","(","step","+","1",")","%","args",".","gradient_accumulation_steps","==","0",":","optimizer",".","step","(",")","scheduler",".","step","(",")","# Update learning rate schedule","model",".","zero_grad","(",")","global_step","+=","1","if","args",".","local_rank","in","[","-","1",",","0","]","and","args",".","logging_steps",">","0","and","global_step","%","args",".","logging_steps","==","0",":","print","(","\" \"",")","# Log metrics","if","args",".","local_rank","==","-","1",":","# Only evaluate when single GPU otherwise metrics may not average well","evaluate","(","args",",","model",",","tokenizer",")","if","args",".","local_rank","in","[","-","1",",","0","]","and","args",".","save_steps",">","0","and","global_step","%","args",".","save_steps","==","0",":","# Save model checkpoint","output_dir","=","os",".","path",".","join","(","args",".","output_dir",",","'checkpoint-{}'",".","format","(","global_step",")",")","if","not","os",".","path",".","exists","(","output_dir",")",":","os",".","makedirs","(","output_dir",")","model_to_save","=","model",".","module","if","hasattr","(","model",",","'module'",")","else","model","# Take care of distributed\/parallel training","model_to_save",".","save_pretrained","(","output_dir",")","torch",".","save","(","args",",","os",".","path",".","join","(","output_dir",",","'training_args.bin'",")",")","logger",".","info","(","\"Saving model checkpoint to %s\"",",","output_dir",")","tokenizer",".","save_vocabulary","(","vocab_path","=","output_dir",")","print","(","\" \"",")","if","'cuda'","in","str","(","args",".","device",")",":","torch",".","cuda",".","empty_cache","(",")","return","global_step",",","tr_loss","\/","global_step"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/run_classifier.py#L48-L157"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/convert_ernie_original_pad_checkpoint_to_pytorch.py","language":"python","identifier":"build_params_map","parameters":"(attention_num=12)","argument_list":"","return_statement":"return weight_map","docstring":"build params map from paddle-paddle's ERNIE to transformer's BERT\n :return:","docstring_summary":"build params map from paddle-paddle's ERNIE to transformer's BERT\n :return:","docstring_tokens":["build","params","map","from","paddle","-","paddle","s","ERNIE","to","transformer","s","BERT",":","return",":"],"function":"def build_params_map(attention_num=12):\n \"\"\"\n build params map from paddle-paddle's ERNIE to transformer's BERT\n :return:\n \"\"\"\n weight_map = collections.OrderedDict({\n 'word_emb.weight': \"bert.embeddings.word_embeddings.weight\",\n 'pos_emb.weight': \"bert.embeddings.position_embeddings.weight\",\n 'sent_emb.weight': \"bert.embeddings.token_type_embeddings.weight\",\n 'ln.weight': 'bert.embeddings.LayerNorm.gamma',\n 'ln.bias': 'bert.embeddings.LayerNorm.beta',\n })\n # add attention layers\n for i in range(attention_num):\n weight_map[f'encoder_stack.block.{i}.attn.q.weight'] = f'bert.encoder.layer.{i}.attention.self.query.weight'\n weight_map[f'encoder_stack.block.{i}.attn.q.bias'] = f'bert.encoder.layer.{i}.attention.self.query.bias'\n weight_map[f'encoder_stack.block.{i}.attn.k.weight'] = f'bert.encoder.layer.{i}.attention.self.key.weight'\n weight_map[f'encoder_stack.block.{i}.attn.k.bias'] = f'bert.encoder.layer.{i}.attention.self.key.bias'\n weight_map[f'encoder_stack.block.{i}.attn.v.weight'] = f'bert.encoder.layer.{i}.attention.self.value.weight'\n weight_map[f'encoder_stack.block.{i}.attn.v.bias'] = f'bert.encoder.layer.{i}.attention.self.value.bias'\n weight_map[f'encoder_stack.block.{i}.attn.o.weight'] = f'bert.encoder.layer.{i}.attention.output.dense.weight'\n weight_map[f'encoder_stack.block.{i}.attn.o.bias'] = f'bert.encoder.layer.{i}.attention.output.dense.bias'\n weight_map[f'encoder_stack.block.{i}.ln1.weight'] = f'bert.encoder.layer.{i}.attention.output.LayerNorm.gamma'\n weight_map[f'encoder_stack.block.{i}.ln1.bias'] = f'bert.encoder.layer.{i}.attention.output.LayerNorm.beta'\n weight_map[f'encoder_stack.block.{i}.ffn.i.weight'] = f'bert.encoder.layer.{i}.intermediate.dense.weight'\n weight_map[f'encoder_stack.block.{i}.ffn.i.bias'] = f'bert.encoder.layer.{i}.intermediate.dense.bias'\n weight_map[f'encoder_stack.block.{i}.ffn.o.weight'] = f'bert.encoder.layer.{i}.output.dense.weight'\n weight_map[f'encoder_stack.block.{i}.ffn.o.bias'] = f'bert.encoder.layer.{i}.output.dense.bias'\n weight_map[f'encoder_stack.block.{i}.ln2.weight'] = f'bert.encoder.layer.{i}.output.LayerNorm.gamma'\n weight_map[f'encoder_stack.block.{i}.ln2.bias'] = f'bert.encoder.layer.{i}.output.LayerNorm.beta'\n # add pooler\n weight_map.update(\n {\n 'pooler.weight': 'bert.pooler.dense.weight',\n 'pooler.bias': 'bert.pooler.dense.bias',\n 'mlm.weight': 'cls.predictions.transform.dense.weight',\n 'mlm.bias': 'cls.predictions.transform.dense.bias',\n 'mlm_ln.weight': 'cls.predictions.transform.LayerNorm.gamma',\n 'mlm_ln.bias': 'cls.predictions.transform.LayerNorm.beta',\n 'mlm_bias': 'cls.predictions.bias'\n }\n )\n return weight_map","function_tokens":["def","build_params_map","(","attention_num","=","12",")",":","weight_map","=","collections",".","OrderedDict","(","{","'word_emb.weight'",":","\"bert.embeddings.word_embeddings.weight\"",",","'pos_emb.weight'",":","\"bert.embeddings.position_embeddings.weight\"",",","'sent_emb.weight'",":","\"bert.embeddings.token_type_embeddings.weight\"",",","'ln.weight'",":","'bert.embeddings.LayerNorm.gamma'",",","'ln.bias'",":","'bert.embeddings.LayerNorm.beta'",",","}",")","# add attention layers","for","i","in","range","(","attention_num",")",":","weight_map","[","f'encoder_stack.block.{i}.attn.q.weight'","]","=","f'bert.encoder.layer.{i}.attention.self.query.weight'","weight_map","[","f'encoder_stack.block.{i}.attn.q.bias'","]","=","f'bert.encoder.layer.{i}.attention.self.query.bias'","weight_map","[","f'encoder_stack.block.{i}.attn.k.weight'","]","=","f'bert.encoder.layer.{i}.attention.self.key.weight'","weight_map","[","f'encoder_stack.block.{i}.attn.k.bias'","]","=","f'bert.encoder.layer.{i}.attention.self.key.bias'","weight_map","[","f'encoder_stack.block.{i}.attn.v.weight'","]","=","f'bert.encoder.layer.{i}.attention.self.value.weight'","weight_map","[","f'encoder_stack.block.{i}.attn.v.bias'","]","=","f'bert.encoder.layer.{i}.attention.self.value.bias'","weight_map","[","f'encoder_stack.block.{i}.attn.o.weight'","]","=","f'bert.encoder.layer.{i}.attention.output.dense.weight'","weight_map","[","f'encoder_stack.block.{i}.attn.o.bias'","]","=","f'bert.encoder.layer.{i}.attention.output.dense.bias'","weight_map","[","f'encoder_stack.block.{i}.ln1.weight'","]","=","f'bert.encoder.layer.{i}.attention.output.LayerNorm.gamma'","weight_map","[","f'encoder_stack.block.{i}.ln1.bias'","]","=","f'bert.encoder.layer.{i}.attention.output.LayerNorm.beta'","weight_map","[","f'encoder_stack.block.{i}.ffn.i.weight'","]","=","f'bert.encoder.layer.{i}.intermediate.dense.weight'","weight_map","[","f'encoder_stack.block.{i}.ffn.i.bias'","]","=","f'bert.encoder.layer.{i}.intermediate.dense.bias'","weight_map","[","f'encoder_stack.block.{i}.ffn.o.weight'","]","=","f'bert.encoder.layer.{i}.output.dense.weight'","weight_map","[","f'encoder_stack.block.{i}.ffn.o.bias'","]","=","f'bert.encoder.layer.{i}.output.dense.bias'","weight_map","[","f'encoder_stack.block.{i}.ln2.weight'","]","=","f'bert.encoder.layer.{i}.output.LayerNorm.gamma'","weight_map","[","f'encoder_stack.block.{i}.ln2.bias'","]","=","f'bert.encoder.layer.{i}.output.LayerNorm.beta'","# add pooler","weight_map",".","update","(","{","'pooler.weight'",":","'bert.pooler.dense.weight'",",","'pooler.bias'",":","'bert.pooler.dense.bias'",",","'mlm.weight'",":","'cls.predictions.transform.dense.weight'",",","'mlm.bias'",":","'cls.predictions.transform.dense.bias'",",","'mlm_ln.weight'",":","'cls.predictions.transform.LayerNorm.gamma'",",","'mlm_ln.bias'",":","'cls.predictions.transform.LayerNorm.beta'",",","'mlm_bias'",":","'cls.predictions.bias'","}",")","return","weight_map"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/convert_ernie_original_pad_checkpoint_to_pytorch.py#L24-L66"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"load_vocab","parameters":"(vocab_file)","argument_list":"","return_statement":"return vocab","docstring":"Loads a vocabulary file into a dictionary.","docstring_summary":"Loads a vocabulary file into a dictionary.","docstring_tokens":["Loads","a","vocabulary","file","into","a","dictionary","."],"function":"def load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n with open(vocab_file, \"r\", encoding=\"utf-8\") as reader:\n tokens = reader.readlines()\n for index, token in enumerate(tokens):\n token = token.rstrip('\\n')\n vocab[token] = index\n return vocab","function_tokens":["def","load_vocab","(","vocab_file",")",":","vocab","=","collections",".","OrderedDict","(",")","with","open","(","vocab_file",",","\"r\"",",","encoding","=","\"utf-8\"",")","as","reader",":","tokens","=","reader",".","readlines","(",")","for","index",",","token","in","enumerate","(","tokens",")",":","token","=","token",".","rstrip","(","'\\n'",")","vocab","[","token","]","=","index","return","vocab"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L89-L97"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"whitespace_tokenize","parameters":"(text)","argument_list":"","return_statement":"return tokens","docstring":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_summary":"Runs basic whitespace cleaning and splitting on a piece of text.","docstring_tokens":["Runs","basic","whitespace","cleaning","and","splitting","on","a","piece","of","text","."],"function":"def whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens","function_tokens":["def","whitespace_tokenize","(","text",")",":","text","=","text",".","strip","(",")","if","not","text",":","return","[","]","tokens","=","text",".","split","(",")","return","tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L100-L106"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"_is_whitespace","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a whitespace character.","docstring_summary":"Checks whether `chars` is a whitespace character.","docstring_tokens":["Checks","whether","chars","is","a","whitespace","character","."],"function":"def _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False","function_tokens":["def","_is_whitespace","(","char",")",":","# \\t, \\n, and \\r are technically contorl characters but we treat them","# as whitespace since they are generally considered as such.","if","char","==","\" \"","or","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Zs\"",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L465-L474"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"_is_control","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a control character.","docstring_summary":"Checks whether `chars` is a control character.","docstring_tokens":["Checks","whether","chars","is","a","control","character","."],"function":"def _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False","function_tokens":["def","_is_control","(","char",")",":","# These are technically control characters but we count them as whitespace","# characters.","if","char","==","\"\\t\"","or","char","==","\"\\n\"","or","char","==","\"\\r\"",":","return","False","cat","=","unicodedata",".","category","(","char",")","if","cat",".","startswith","(","\"C\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L477-L486"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"_is_punctuation","parameters":"(char)","argument_list":"","return_statement":"return False","docstring":"Checks whether `chars` is a punctuation character.","docstring_summary":"Checks whether `chars` is a punctuation character.","docstring_tokens":["Checks","whether","chars","is","a","punctuation","character","."],"function":"def _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter\/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False","function_tokens":["def","_is_punctuation","(","char",")",":","cp","=","ord","(","char",")","# We treat all non-letter\/number ASCII as punctuation.","# Characters such as \"^\", \"$\", and \"`\" are not in the Unicode","# Punctuation class but we treat them as punctuation anyways, for","# consistency.","if","(","(","cp",">=","33","and","cp","<=","47",")","or","(","cp",">=","58","and","cp","<=","64",")","or","(","cp",">=","91","and","cp","<=","96",")","or","(","cp",">=","123","and","cp","<=","126",")",")",":","return","True","cat","=","unicodedata",".","category","(","char",")","if","cat",".","startswith","(","\"P\"",")",":","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L489-L502"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BertTokenizer.__init__","parameters":"(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None,\n unk_token=\"[UNK]\", sep_token=\"[SEP]\", pad_token=\"[PAD]\", cls_token=\"[CLS]\",\n mask_token=\"[MASK]\", tokenize_chinese_chars=True, **kwargs)","argument_list":"","return_statement":"","docstring":"Constructs a BertTokenizer.\n\n Args:\n **vocab_file**: Path to a one-wordpiece-per-line vocabulary file\n **do_lower_case**: (`optional`) boolean (default True)\n Whether to lower case the input\n Only has an effect when do_basic_tokenize=True\n **do_basic_tokenize**: (`optional`) boolean (default True)\n Whether to do basic tokenization before wordpiece.\n **never_split**: (`optional`) list of string\n List of tokens which will never be split during tokenization.\n Only has an effect when do_basic_tokenize=True\n **tokenize_chinese_chars**: (`optional`) boolean (default True)\n Whether to tokenize Chinese characters.\n This should likely be deactivated for Japanese:\n see: https:\/\/github.com\/huggingface\/pytorch-pretrained-BERT\/issues\/328","docstring_summary":"Constructs a BertTokenizer.","docstring_tokens":["Constructs","a","BertTokenizer","."],"function":"def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None,\n unk_token=\"[UNK]\", sep_token=\"[SEP]\", pad_token=\"[PAD]\", cls_token=\"[CLS]\",\n mask_token=\"[MASK]\", tokenize_chinese_chars=True, **kwargs):\n \"\"\"Constructs a BertTokenizer.\n\n Args:\n **vocab_file**: Path to a one-wordpiece-per-line vocabulary file\n **do_lower_case**: (`optional`) boolean (default True)\n Whether to lower case the input\n Only has an effect when do_basic_tokenize=True\n **do_basic_tokenize**: (`optional`) boolean (default True)\n Whether to do basic tokenization before wordpiece.\n **never_split**: (`optional`) list of string\n List of tokens which will never be split during tokenization.\n Only has an effect when do_basic_tokenize=True\n **tokenize_chinese_chars**: (`optional`) boolean (default True)\n Whether to tokenize Chinese characters.\n This should likely be deactivated for Japanese:\n see: https:\/\/github.com\/huggingface\/pytorch-pretrained-BERT\/issues\/328\n \"\"\"\n super(BertTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token,\n pad_token=pad_token, cls_token=cls_token,\n mask_token=mask_token, **kwargs)\n self.max_len_single_sentence = self.max_len - 2 # take into account special tokens\n self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens\n\n if not os.path.isfile(vocab_file):\n raise ValueError(\n \"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained \"\n \"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(vocab_file))\n self.vocab = load_vocab(vocab_file)\n self.ids_to_tokens = collections.OrderedDict(\n [(ids, tok) for tok, ids in self.vocab.items()])\n self.do_basic_tokenize = do_basic_tokenize\n if do_basic_tokenize:\n self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,\n never_split=never_split,\n tokenize_chinese_chars=tokenize_chinese_chars)\n self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)","function_tokens":["def","__init__","(","self",",","vocab_file",",","do_lower_case","=","True",",","do_basic_tokenize","=","True",",","never_split","=","None",",","unk_token","=","\"[UNK]\"",",","sep_token","=","\"[SEP]\"",",","pad_token","=","\"[PAD]\"",",","cls_token","=","\"[CLS]\"",",","mask_token","=","\"[MASK]\"",",","tokenize_chinese_chars","=","True",",","*","*","kwargs",")",":","super","(","BertTokenizer",",","self",")",".","__init__","(","unk_token","=","unk_token",",","sep_token","=","sep_token",",","pad_token","=","pad_token",",","cls_token","=","cls_token",",","mask_token","=","mask_token",",","*","*","kwargs",")","self",".","max_len_single_sentence","=","self",".","max_len","-","2","# take into account special tokens","self",".","max_len_sentences_pair","=","self",".","max_len","-","3","# take into account special tokens","if","not","os",".","path",".","isfile","(","vocab_file",")",":","raise","ValueError","(","\"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained \"","\"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`\"",".","format","(","vocab_file",")",")","self",".","vocab","=","load_vocab","(","vocab_file",")","self",".","ids_to_tokens","=","collections",".","OrderedDict","(","[","(","ids",",","tok",")","for","tok",",","ids","in","self",".","vocab",".","items","(",")","]",")","self",".","do_basic_tokenize","=","do_basic_tokenize","if","do_basic_tokenize",":","self",".","basic_tokenizer","=","BasicTokenizer","(","do_lower_case","=","do_lower_case",",","never_split","=","never_split",",","tokenize_chinese_chars","=","tokenize_chinese_chars",")","self",".","wordpiece_tokenizer","=","WordpieceTokenizer","(","vocab","=","self",".","vocab",",","unk_token","=","self",".","unk_token",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L129-L167"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BertTokenizer._convert_token_to_id","parameters":"(self, token)","argument_list":"","return_statement":"return self.vocab.get(token, self.vocab.get(self.unk_token))","docstring":"Converts a token (str\/unicode) in an id using the vocab.","docstring_summary":"Converts a token (str\/unicode) in an id using the vocab.","docstring_tokens":["Converts","a","token","(","str","\/","unicode",")","in","an","id","using","the","vocab","."],"function":"def _convert_token_to_id(self, token):\n \"\"\" Converts a token (str\/unicode) in an id using the vocab. \"\"\"\n return self.vocab.get(token, self.vocab.get(self.unk_token))","function_tokens":["def","_convert_token_to_id","(","self",",","token",")",":","return","self",".","vocab",".","get","(","token",",","self",".","vocab",".","get","(","self",".","unk_token",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L183-L185"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BertTokenizer._convert_id_to_token","parameters":"(self, index)","argument_list":"","return_statement":"return self.ids_to_tokens.get(index, self.unk_token)","docstring":"Converts an index (integer) in a token (string\/unicode) using the vocab.","docstring_summary":"Converts an index (integer) in a token (string\/unicode) using the vocab.","docstring_tokens":["Converts","an","index","(","integer",")","in","a","token","(","string","\/","unicode",")","using","the","vocab","."],"function":"def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (string\/unicode) using the vocab.\"\"\"\n return self.ids_to_tokens.get(index, self.unk_token)","function_tokens":["def","_convert_id_to_token","(","self",",","index",")",":","return","self",".","ids_to_tokens",".","get","(","index",",","self",".","unk_token",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L187-L189"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BertTokenizer.convert_tokens_to_string","parameters":"(self, tokens)","argument_list":"","return_statement":"return out_string","docstring":"Converts a sequence of tokens (string) in a single string.","docstring_summary":"Converts a sequence of tokens (string) in a single string.","docstring_tokens":["Converts","a","sequence","of","tokens","(","string",")","in","a","single","string","."],"function":"def convert_tokens_to_string(self, tokens):\n \"\"\" Converts a sequence of tokens (string) in a single string. \"\"\"\n out_string = ' '.join(tokens).replace(' ##', '').strip()\n return out_string","function_tokens":["def","convert_tokens_to_string","(","self",",","tokens",")",":","out_string","=","' '",".","join","(","tokens",")",".","replace","(","' ##'",",","''",")",".","strip","(",")","return","out_string"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L191-L194"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BertTokenizer.build_inputs_with_special_tokens","parameters":"(self, token_ids_0, token_ids_1=None)","argument_list":"","return_statement":"return cls + token_ids_0 + sep + token_ids_1 + sep","docstring":"Build model inputs from a sequence or a pair of sequence for sequence classification tasks\n by concatenating and adding special tokens.\n A BERT sequence has the following format:\n single sequence: [CLS] X [SEP]\n pair of sequences: [CLS] A [SEP] B [SEP]","docstring_summary":"Build model inputs from a sequence or a pair of sequence for sequence classification tasks\n by concatenating and adding special tokens.\n A BERT sequence has the following format:\n single sequence: [CLS] X [SEP]\n pair of sequences: [CLS] A [SEP] B [SEP]","docstring_tokens":["Build","model","inputs","from","a","sequence","or","a","pair","of","sequence","for","sequence","classification","tasks","by","concatenating","and","adding","special","tokens",".","A","BERT","sequence","has","the","following","format",":","single","sequence",":","[","CLS","]","X","[","SEP","]","pair","of","sequences",":","[","CLS","]","A","[","SEP","]","B","[","SEP","]"],"function":"def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks\n by concatenating and adding special tokens.\n A BERT sequence has the following format:\n single sequence: [CLS] X [SEP]\n pair of sequences: [CLS] A [SEP] B [SEP]\n \"\"\"\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + token_ids_1 + sep","function_tokens":["def","build_inputs_with_special_tokens","(","self",",","token_ids_0",",","token_ids_1","=","None",")",":","if","token_ids_1","is","None",":","return","[","self",".","cls_token_id","]","+","token_ids_0","+","[","self",".","sep_token_id","]","cls","=","[","self",".","cls_token_id","]","sep","=","[","self",".","sep_token_id","]","return","cls","+","token_ids_0","+","sep","+","token_ids_1","+","sep"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L196-L208"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BertTokenizer.get_special_tokens_mask","parameters":"(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False)","argument_list":"","return_statement":"return [1] + ([0] * len(token_ids_0)) + [1]","docstring":"Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.\n\n Args:\n token_ids_0: list of ids (must not contain special tokens)\n token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids\n for sequence pairs\n already_has_special_tokens: (default False) Set to True if the token list is already formated with\n special tokens for the model\n\n Returns:\n A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token.","docstring_summary":"Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.","docstring_tokens":["Retrieves","sequence","ids","from","a","token","list","that","has","no","special","tokens","added",".","This","method","is","called","when","adding","special","tokens","using","the","tokenizer","prepare_for_model","or","encode_plus","methods","."],"function":"def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):\n \"\"\"\n Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.\n\n Args:\n token_ids_0: list of ids (must not contain special tokens)\n token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids\n for sequence pairs\n already_has_special_tokens: (default False) Set to True if the token list is already formated with\n special tokens for the model\n\n Returns:\n A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token.\n \"\"\"\n\n if already_has_special_tokens:\n if token_ids_1 is not None:\n raise ValueError(\"You should not supply a second sequence if the provided sequence of \"\n \"ids is already formated with special tokens for the model.\")\n return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))\n\n if token_ids_1 is not None:\n return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]\n return [1] + ([0] * len(token_ids_0)) + [1]","function_tokens":["def","get_special_tokens_mask","(","self",",","token_ids_0",",","token_ids_1","=","None",",","already_has_special_tokens","=","False",")",":","if","already_has_special_tokens",":","if","token_ids_1","is","not","None",":","raise","ValueError","(","\"You should not supply a second sequence if the provided sequence of \"","\"ids is already formated with special tokens for the model.\"",")","return","list","(","map","(","lambda","x",":","1","if","x","in","[","self",".","sep_token_id",",","self",".","cls_token_id","]","else","0",",","token_ids_0",")",")","if","token_ids_1","is","not","None",":","return","[","1","]","+","(","[","0","]","*","len","(","token_ids_0",")",")","+","[","1","]","+","(","[","0","]","*","len","(","token_ids_1",")",")","+","[","1","]","return","[","1","]","+","(","[","0","]","*","len","(","token_ids_0",")",")","+","[","1","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L210-L234"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BertTokenizer.create_token_type_ids_from_sequences","parameters":"(self, token_ids_0, token_ids_1=None)","argument_list":"","return_statement":"return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]","docstring":"Creates a mask from the two sequences passed to be used in a sequence-pair classification task.\n A BERT sequence pair mask has the following format:\n 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence\n\n if token_ids_1 is None, only returns the first portion of the mask (0's).","docstring_summary":"Creates a mask from the two sequences passed to be used in a sequence-pair classification task.\n A BERT sequence pair mask has the following format:\n 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence","docstring_tokens":["Creates","a","mask","from","the","two","sequences","passed","to","be","used","in","a","sequence","-","pair","classification","task",".","A","BERT","sequence","pair","mask","has","the","following","format",":","0","0","0","0","0","0","0","0","0","0","1","1","1","1","1","1","1","1","1","1","1","|","first","sequence","|","second","sequence"],"function":"def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):\n \"\"\"\n Creates a mask from the two sequences passed to be used in a sequence-pair classification task.\n A BERT sequence pair mask has the following format:\n 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence\n\n if token_ids_1 is None, only returns the first portion of the mask (0's).\n \"\"\"\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]","function_tokens":["def","create_token_type_ids_from_sequences","(","self",",","token_ids_0",",","token_ids_1","=","None",")",":","sep","=","[","self",".","sep_token_id","]","cls","=","[","self",".","cls_token_id","]","if","token_ids_1","is","None",":","return","len","(","cls","+","token_ids_0","+","sep",")","*","[","0","]","return","len","(","cls","+","token_ids_0","+","sep",")","*","[","0","]","+","len","(","token_ids_1","+","sep",")","*","[","1","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L236-L249"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BertTokenizer.save_vocabulary","parameters":"(self, vocab_path)","argument_list":"","return_statement":"return (vocab_file,)","docstring":"Save the tokenizer vocabulary to a directory or file.","docstring_summary":"Save the tokenizer vocabulary to a directory or file.","docstring_tokens":["Save","the","tokenizer","vocabulary","to","a","directory","or","file","."],"function":"def save_vocabulary(self, vocab_path):\n \"\"\"Save the tokenizer vocabulary to a directory or file.\"\"\"\n index = 0\n if os.path.isdir(vocab_path):\n vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file'])\n else:\n vocab_file = vocab_path\n with open(vocab_file, \"w\", encoding=\"utf-8\") as writer:\n for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\"Saving vocabulary to {}: vocabulary indices are not consecutive.\"\n \" Please check that the vocabulary is not corrupted!\".format(vocab_file))\n index = token_index\n writer.write(token + u'\\n')\n index += 1\n return (vocab_file,)","function_tokens":["def","save_vocabulary","(","self",",","vocab_path",")",":","index","=","0","if","os",".","path",".","isdir","(","vocab_path",")",":","vocab_file","=","os",".","path",".","join","(","vocab_path",",","VOCAB_FILES_NAMES","[","'vocab_file'","]",")","else",":","vocab_file","=","vocab_path","with","open","(","vocab_file",",","\"w\"",",","encoding","=","\"utf-8\"",")","as","writer",":","for","token",",","token_index","in","sorted","(","self",".","vocab",".","items","(",")",",","key","=","lambda","kv",":","kv","[","1","]",")",":","if","index","!=","token_index",":","logger",".","warning","(","\"Saving vocabulary to {}: vocabulary indices are not consecutive.\"","\" Please check that the vocabulary is not corrupted!\"",".","format","(","vocab_file",")",")","index","=","token_index","writer",".","write","(","token","+","u'\\n'",")","index","+=","1","return","(","vocab_file",",",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L251-L266"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BasicTokenizer.__init__","parameters":"(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True)","argument_list":"","return_statement":"","docstring":"Constructs a BasicTokenizer.\n\n Args:\n **do_lower_case**: Whether to lower case the input.\n **never_split**: (`optional`) list of str\n Kept for backward compatibility purposes.\n Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)\n List of token not to split.\n **tokenize_chinese_chars**: (`optional`) boolean (default True)\n Whether to tokenize Chinese characters.\n This should likely be deactivated for Japanese:\n see: https:\/\/github.com\/huggingface\/pytorch-pretrained-BERT\/issues\/328","docstring_summary":"Constructs a BasicTokenizer.","docstring_tokens":["Constructs","a","BasicTokenizer","."],"function":"def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):\n \"\"\" Constructs a BasicTokenizer.\n\n Args:\n **do_lower_case**: Whether to lower case the input.\n **never_split**: (`optional`) list of str\n Kept for backward compatibility purposes.\n Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)\n List of token not to split.\n **tokenize_chinese_chars**: (`optional`) boolean (default True)\n Whether to tokenize Chinese characters.\n This should likely be deactivated for Japanese:\n see: https:\/\/github.com\/huggingface\/pytorch-pretrained-BERT\/issues\/328\n \"\"\"\n if never_split is None:\n never_split = []\n self.do_lower_case = do_lower_case\n self.never_split = never_split\n self.tokenize_chinese_chars = tokenize_chinese_chars","function_tokens":["def","__init__","(","self",",","do_lower_case","=","True",",","never_split","=","None",",","tokenize_chinese_chars","=","True",")",":","if","never_split","is","None",":","never_split","=","[","]","self",".","do_lower_case","=","do_lower_case","self",".","never_split","=","never_split","self",".","tokenize_chinese_chars","=","tokenize_chinese_chars"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L272-L290"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BasicTokenizer.tokenize","parameters":"(self, text, never_split=None)","argument_list":"","return_statement":"return output_tokens","docstring":"Basic Tokenization of a piece of text.\n Split on \"white spaces\" only, for sub-word tokenization, see WordPieceTokenizer.\n\n Args:\n **never_split**: (`optional`) list of str\n Kept for backward compatibility purposes.\n Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)\n List of token not to split.","docstring_summary":"Basic Tokenization of a piece of text.\n Split on \"white spaces\" only, for sub-word tokenization, see WordPieceTokenizer.","docstring_tokens":["Basic","Tokenization","of","a","piece","of","text",".","Split","on","white","spaces","only","for","sub","-","word","tokenization","see","WordPieceTokenizer","."],"function":"def tokenize(self, text, never_split=None):\n \"\"\" Basic Tokenization of a piece of text.\n Split on \"white spaces\" only, for sub-word tokenization, see WordPieceTokenizer.\n\n Args:\n **never_split**: (`optional`) list of str\n Kept for backward compatibility purposes.\n Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)\n List of token not to split.\n \"\"\"\n never_split = self.never_split + (never_split if never_split is not None else [])\n text = self._clean_text(text)\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n if self.tokenize_chinese_chars:\n text = self._tokenize_chinese_chars(text)\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case and token not in never_split:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",",","never_split","=","None",")",":","never_split","=","self",".","never_split","+","(","never_split","if","never_split","is","not","None","else","[","]",")","text","=","self",".","_clean_text","(","text",")","# This was added on November 1st, 2018 for the multilingual and Chinese","# models. This is also applied to the English models now, but it doesn't","# matter since the English models were not trained on any Chinese data","# and generally don't have any Chinese data in them (there are Chinese","# characters in the vocabulary because Wikipedia does have some Chinese","# words in the English Wikipedia.).","if","self",".","tokenize_chinese_chars",":","text","=","self",".","_tokenize_chinese_chars","(","text",")","orig_tokens","=","whitespace_tokenize","(","text",")","split_tokens","=","[","]","for","token","in","orig_tokens",":","if","self",".","do_lower_case","and","token","not","in","never_split",":","token","=","token",".","lower","(",")","token","=","self",".","_run_strip_accents","(","token",")","split_tokens",".","extend","(","self",".","_run_split_on_punc","(","token",")",")","output_tokens","=","whitespace_tokenize","(","\" \"",".","join","(","split_tokens",")",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L292-L321"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BasicTokenizer._run_strip_accents","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Strips accents from a piece of text.","docstring_summary":"Strips accents from a piece of text.","docstring_tokens":["Strips","accents","from","a","piece","of","text","."],"function":"def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_run_strip_accents","(","self",",","text",")",":","text","=","unicodedata",".","normalize","(","\"NFD\"",",","text",")","output","=","[","]","for","char","in","text",":","cat","=","unicodedata",".","category","(","char",")","if","cat","==","\"Mn\"",":","continue","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L323-L332"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BasicTokenizer._run_split_on_punc","parameters":"(self, text, never_split=None)","argument_list":"","return_statement":"return [\"\".join(x) for x in output]","docstring":"Splits punctuation on a piece of text.","docstring_summary":"Splits punctuation on a piece of text.","docstring_tokens":["Splits","punctuation","on","a","piece","of","text","."],"function":"def _run_split_on_punc(self, text, never_split=None):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n if never_split is not None and text in never_split:\n return [text]\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]","function_tokens":["def","_run_split_on_punc","(","self",",","text",",","never_split","=","None",")",":","if","never_split","is","not","None","and","text","in","never_split",":","return","[","text","]","chars","=","list","(","text",")","i","=","0","start_new_word","=","True","output","=","[","]","while","i","<","len","(","chars",")",":","char","=","chars","[","i","]","if","_is_punctuation","(","char",")",":","output",".","append","(","[","char","]",")","start_new_word","=","True","else",":","if","start_new_word",":","output",".","append","(","[","]",")","start_new_word","=","False","output","[","-","1","]",".","append","(","char",")","i","+=","1","return","[","\"\"",".","join","(","x",")","for","x","in","output","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L334-L354"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BasicTokenizer._tokenize_chinese_chars","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Adds whitespace around any CJK character.","docstring_summary":"Adds whitespace around any CJK character.","docstring_tokens":["Adds","whitespace","around","any","CJK","character","."],"function":"def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_tokenize_chinese_chars","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","self",".","_is_chinese_char","(","cp",")",":","output",".","append","(","\" \"",")","output",".","append","(","char",")","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L356-L367"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BasicTokenizer._is_chinese_char","parameters":"(self, cp)","argument_list":"","return_statement":"return False","docstring":"Checks whether CP is the codepoint of a CJK character.","docstring_summary":"Checks whether CP is the codepoint of a CJK character.","docstring_tokens":["Checks","whether","CP","is","the","codepoint","of","a","CJK","character","."],"function":"def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False","function_tokens":["def","_is_chinese_char","(","self",",","cp",")",":","# This defines a \"chinese character\" as anything in the CJK Unicode block:","# https:\/\/en.wikipedia.org\/wiki\/CJK_Unified_Ideographs_(Unicode_block)","#","# Note that the CJK Unicode block is NOT all Japanese and Korean characters,","# despite its name. The modern Korean Hangul alphabet is a different block,","# as is Japanese Hiragana and Katakana. Those alphabets are used to write","# space-separated words, so they are not treated specially and handled","# like the all of the other languages.","if","(","(","cp",">=","0x4E00","and","cp","<=","0x9FFF",")","or","#","(","cp",">=","0x3400","and","cp","<=","0x4DBF",")","or","#","(","cp",">=","0x20000","and","cp","<=","0x2A6DF",")","or","#","(","cp",">=","0x2A700","and","cp","<=","0x2B73F",")","or","#","(","cp",">=","0x2B740","and","cp","<=","0x2B81F",")","or","#","(","cp",">=","0x2B820","and","cp","<=","0x2CEAF",")","or","(","cp",">=","0xF900","and","cp","<=","0xFAFF",")","or","#","(","cp",">=","0x2F800","and","cp","<=","0x2FA1F",")",")",":","#","return","True","return","False"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L369-L389"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"BasicTokenizer._clean_text","parameters":"(self, text)","argument_list":"","return_statement":"return \"\".join(output)","docstring":"Performs invalid character removal and whitespace cleanup on text.","docstring_summary":"Performs invalid character removal and whitespace cleanup on text.","docstring_tokens":["Performs","invalid","character","removal","and","whitespace","cleanup","on","text","."],"function":"def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)","function_tokens":["def","_clean_text","(","self",",","text",")",":","output","=","[","]","for","char","in","text",":","cp","=","ord","(","char",")","if","cp","==","0","or","cp","==","0xfffd","or","_is_control","(","char",")",":","continue","if","_is_whitespace","(","char",")",":","output",".","append","(","\" \"",")","else",":","output",".","append","(","char",")","return","\"\"",".","join","(","output",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L391-L402"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py","language":"python","identifier":"WordpieceTokenizer.tokenize","parameters":"(self, text)","argument_list":"","return_statement":"return output_tokens","docstring":"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer`.\n\n Returns:\n A list of wordpiece tokens.","docstring_summary":"Tokenizes a piece of text into its word pieces.","docstring_tokens":["Tokenizes","a","piece","of","text","into","its","word","pieces","."],"function":"def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer`.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens","function_tokens":["def","tokenize","(","self",",","text",")",":","output_tokens","=","[","]","for","token","in","whitespace_tokenize","(","text",")",":","chars","=","list","(","token",")","if","len","(","chars",")",">","self",".","max_input_chars_per_word",":","output_tokens",".","append","(","self",".","unk_token",")","continue","is_bad","=","False","start","=","0","sub_tokens","=","[","]","while","start","<","len","(","chars",")",":","end","=","len","(","chars",")","cur_substr","=","None","while","start","<","end",":","substr","=","\"\"",".","join","(","chars","[","start",":","end","]",")","if","start",">","0",":","substr","=","\"##\"","+","substr","if","substr","in","self",".","vocab",":","cur_substr","=","substr","break","end","-=","1","if","cur_substr","is","None",":","is_bad","=","True","break","sub_tokens",".","append","(","cur_substr",")","start","=","end","if","is_bad",":","output_tokens",".","append","(","self",".","unk_token",")","else",":","output_tokens",".","extend","(","sub_tokens",")","return","output_tokens"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_bert.py#L413-L462"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_ctrl.py","language":"python","identifier":"CTRLConfig.__init__","parameters":"(\n self,\n vocab_size_or_config_json_file=246534,\n n_positions=256,\n n_ctx=256,\n n_embd=1280,\n dff=8192,\n n_layer=48,\n n_head=16,\n resid_pdrop=0.1,\n embd_pdrop=0.1,\n attn_pdrop=0.1,\n layer_norm_epsilon=1e-6,\n initializer_range=0.02,\n\n num_labels=1,\n summary_type='cls_index',\n summary_use_proj=True,\n summary_activation=None,\n summary_proj_to_labels=True,\n summary_first_dropout=0.1,\n **kwargs\n )","argument_list":"","return_statement":"","docstring":"Constructs CTRLConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `CTRLModel` or a configuration json file.\n n_positions: Number of positional embeddings.\n n_ctx: Size of the causal mask (usually same as n_positions).\n dff: Size of the inner dimension of the FFN.\n n_embd: Dimensionality of the embeddings and hidden states.\n n_layer: Number of hidden layers in the Transformer encoder.\n n_head: Number of attention heads for each attention layer in\n the Transformer encoder.\n layer_norm_epsilon: epsilon to use in the layer norm layers\n resid_pdrop: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attn_pdrop: The dropout ratio for the attention\n probabilities.\n embd_pdrop: The dropout ratio for the embeddings.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.","docstring_summary":"Constructs CTRLConfig.","docstring_tokens":["Constructs","CTRLConfig","."],"function":"def __init__(\n self,\n vocab_size_or_config_json_file=246534,\n n_positions=256,\n n_ctx=256,\n n_embd=1280,\n dff=8192,\n n_layer=48,\n n_head=16,\n resid_pdrop=0.1,\n embd_pdrop=0.1,\n attn_pdrop=0.1,\n layer_norm_epsilon=1e-6,\n initializer_range=0.02,\n\n num_labels=1,\n summary_type='cls_index',\n summary_use_proj=True,\n summary_activation=None,\n summary_proj_to_labels=True,\n summary_first_dropout=0.1,\n **kwargs\n ):\n \"\"\"Constructs CTRLConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `CTRLModel` or a configuration json file.\n n_positions: Number of positional embeddings.\n n_ctx: Size of the causal mask (usually same as n_positions).\n dff: Size of the inner dimension of the FFN.\n n_embd: Dimensionality of the embeddings and hidden states.\n n_layer: Number of hidden layers in the Transformer encoder.\n n_head: Number of attention heads for each attention layer in\n the Transformer encoder.\n layer_norm_epsilon: epsilon to use in the layer norm layers\n resid_pdrop: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attn_pdrop: The dropout ratio for the attention\n probabilities.\n embd_pdrop: The dropout ratio for the embeddings.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n super(CTRLConfig, self).__init__(**kwargs)\n\n self.vocab_size = vocab_size_or_config_json_file if isinstance(vocab_size_or_config_json_file, int) else -1\n self.n_ctx = n_ctx\n self.n_positions = n_positions\n self.n_embd = n_embd\n self.n_layer = n_layer\n self.n_head = n_head\n self.dff = dff\n self.resid_pdrop = resid_pdrop\n self.embd_pdrop = embd_pdrop\n self.attn_pdrop = attn_pdrop\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_range = initializer_range\n\n self.num_labels = num_labels\n self.summary_type = summary_type\n self.summary_use_proj = summary_use_proj\n self.summary_activation = summary_activation\n self.summary_first_dropout = summary_first_dropout\n self.summary_proj_to_labels = summary_proj_to_labels\n if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2\n and isinstance(vocab_size_or_config_json_file, unicode)):\n with open(vocab_size_or_config_json_file, \"r\", encoding=\"utf-8\") as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif not isinstance(vocab_size_or_config_json_file, int):\n raise ValueError(\n \"First argument must be either a vocabulary size (int)\"\n \"or the path to a pretrained model config file (str)\"\n )","function_tokens":["def","__init__","(","self",",","vocab_size_or_config_json_file","=","246534",",","n_positions","=","256",",","n_ctx","=","256",",","n_embd","=","1280",",","dff","=","8192",",","n_layer","=","48",",","n_head","=","16",",","resid_pdrop","=","0.1",",","embd_pdrop","=","0.1",",","attn_pdrop","=","0.1",",","layer_norm_epsilon","=","1e-6",",","initializer_range","=","0.02",",","num_labels","=","1",",","summary_type","=","'cls_index'",",","summary_use_proj","=","True",",","summary_activation","=","None",",","summary_proj_to_labels","=","True",",","summary_first_dropout","=","0.1",",","*","*","kwargs",")",":","super","(","CTRLConfig",",","self",")",".","__init__","(","*","*","kwargs",")","self",".","vocab_size","=","vocab_size_or_config_json_file","if","isinstance","(","vocab_size_or_config_json_file",",","int",")","else","-","1","self",".","n_ctx","=","n_ctx","self",".","n_positions","=","n_positions","self",".","n_embd","=","n_embd","self",".","n_layer","=","n_layer","self",".","n_head","=","n_head","self",".","dff","=","dff","self",".","resid_pdrop","=","resid_pdrop","self",".","embd_pdrop","=","embd_pdrop","self",".","attn_pdrop","=","attn_pdrop","self",".","layer_norm_epsilon","=","layer_norm_epsilon","self",".","initializer_range","=","initializer_range","self",".","num_labels","=","num_labels","self",".","summary_type","=","summary_type","self",".","summary_use_proj","=","summary_use_proj","self",".","summary_activation","=","summary_activation","self",".","summary_first_dropout","=","summary_first_dropout","self",".","summary_proj_to_labels","=","summary_proj_to_labels","if","isinstance","(","vocab_size_or_config_json_file",",","str",")","or","(","sys",".","version_info","[","0","]","==","2","and","isinstance","(","vocab_size_or_config_json_file",",","unicode",")",")",":","with","open","(","vocab_size_or_config_json_file",",","\"r\"",",","encoding","=","\"utf-8\"",")","as","reader",":","json_config","=","json",".","loads","(","reader",".","read","(",")",")","for","key",",","value","in","json_config",".","items","(",")",":","self",".","__dict__","[","key","]","=","value","elif","not","isinstance","(","vocab_size_or_config_json_file",",","int",")",":","raise","ValueError","(","\"First argument must be either a vocabulary size (int)\"","\"or the path to a pretrained model config file (str)\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_ctrl.py#L53-L127"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/optimization.py","language":"python","identifier":"AdamW.step","parameters":"(self, closure=None)","argument_list":"","return_statement":"return loss","docstring":"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.","docstring_summary":"Performs a single optimization step.","docstring_tokens":["Performs","a","single","optimization","step","."],"function":"def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n exp_avg.mul_(beta1).add_(1.0 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n step_size = group['lr']\n if group['correct_bias']: # No bias correction for Bert\n bias_correction1 = 1.0 - beta1 ** state['step']\n bias_correction2 = 1.0 - beta2 ** state['step']\n step_size = step_size * math.sqrt(bias_correction2) \/ bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization\/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m\/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n # Add weight decay at the end (fixed version)\n if group['weight_decay'] > 0.0:\n p.data.add_(-group['lr'] * group['weight_decay'], p.data)\n\n return loss","function_tokens":["def","step","(","self",",","closure","=","None",")",":","loss","=","None","if","closure","is","not","None",":","loss","=","closure","(",")","for","group","in","self",".","param_groups",":","for","p","in","group","[","'params'","]",":","if","p",".","grad","is","None",":","continue","grad","=","p",".","grad",".","data","if","grad",".","is_sparse",":","raise","RuntimeError","(","'Adam does not support sparse gradients, please consider SparseAdam instead'",")","state","=","self",".","state","[","p","]","# State initialization","if","len","(","state",")","==","0",":","state","[","'step'","]","=","0","# Exponential moving average of gradient values","state","[","'exp_avg'","]","=","torch",".","zeros_like","(","p",".","data",")","# Exponential moving average of squared gradient values","state","[","'exp_avg_sq'","]","=","torch",".","zeros_like","(","p",".","data",")","exp_avg",",","exp_avg_sq","=","state","[","'exp_avg'","]",",","state","[","'exp_avg_sq'","]","beta1",",","beta2","=","group","[","'betas'","]","state","[","'step'","]","+=","1","# Decay the first and second moment running average coefficient","# In-place operations to update the averages at the same time","exp_avg",".","mul_","(","beta1",")",".","add_","(","1.0","-","beta1",",","grad",")","exp_avg_sq",".","mul_","(","beta2",")",".","addcmul_","(","1.0","-","beta2",",","grad",",","grad",")","denom","=","exp_avg_sq",".","sqrt","(",")",".","add_","(","group","[","'eps'","]",")","step_size","=","group","[","'lr'","]","if","group","[","'correct_bias'","]",":","# No bias correction for Bert","bias_correction1","=","1.0","-","beta1","**","state","[","'step'","]","bias_correction2","=","1.0","-","beta2","**","state","[","'step'","]","step_size","=","step_size","*","math",".","sqrt","(","bias_correction2",")","\/","bias_correction1","p",".","data",".","addcdiv_","(","-","step_size",",","exp_avg",",","denom",")","# Just adding the square of the weights to the loss function is *not*","# the correct way of using L2 regularization\/weight decay with Adam,","# since that will interact with the m and v parameters in strange ways.","#","# Instead we want to decay the weights in a manner that doesn't interact","# with the m\/v parameters. This is equivalent to adding the square","# of the weights to the loss with plain (non-momentum) SGD.","# Add weight decay at the end (fixed version)","if","group","[","'weight_decay'","]",">","0.0",":","p",".","data",".","add_","(","-","group","[","'lr'","]","*","group","[","'weight_decay'","]",",","p",".","data",")","return","loss"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/optimization.py#L130-L189"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_ctrl.py","language":"python","identifier":"CTRLPreTrainedModel._init_weights","parameters":"(self, module)","argument_list":"","return_statement":"","docstring":"Initialize the weights.","docstring_summary":"Initialize the weights.","docstring_tokens":["Initialize","the","weights","."],"function":"def _init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)","function_tokens":["def","_init_weights","(","self",",","module",")",":","if","isinstance","(","module",",","(","nn",".","Linear",",","nn",".","Embedding",",","Conv1D",")",")",":","# Slightly different from the TF version which uses truncated_normal for initialization","# cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","if","isinstance","(","module",",","(","nn",".","Linear",",","Conv1D",")",")","and","module",".","bias","is","not","None",":","module",".","bias",".","data",".","zero_","(",")","elif","isinstance","(","module",",","nn",".","LayerNorm",")",":","module",".","bias",".","data",".","zero_","(",")","module",".","weight",".","data",".","fill_","(","1.0",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_ctrl.py#L177-L188"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_xlnet.py","language":"python","identifier":"XLNetConfig.__init__","parameters":"(self,\n vocab_size_or_config_json_file=32000,\n d_model=1024,\n n_layer=24,\n n_head=16,\n d_inner=4096,\n max_position_embeddings=512,\n ff_activation=\"gelu\",\n untie_r=True,\n attn_type=\"bi\",\n\n initializer_range=0.02,\n layer_norm_eps=1e-12,\n\n dropout=0.1,\n mem_len=None,\n reuse_len=None,\n bi_data=False,\n clamp_len=-1,\n same_length=False,\n\n finetuning_task=None,\n num_labels=2,\n summary_type='last',\n summary_use_proj=True,\n summary_activation='tanh',\n summary_last_dropout=0.1,\n start_n_top=5,\n end_n_top=5,\n **kwargs)","argument_list":"","return_statement":"","docstring":"Constructs XLNetConfig.","docstring_summary":"Constructs XLNetConfig.","docstring_tokens":["Constructs","XLNetConfig","."],"function":"def __init__(self,\n vocab_size_or_config_json_file=32000,\n d_model=1024,\n n_layer=24,\n n_head=16,\n d_inner=4096,\n max_position_embeddings=512,\n ff_activation=\"gelu\",\n untie_r=True,\n attn_type=\"bi\",\n\n initializer_range=0.02,\n layer_norm_eps=1e-12,\n\n dropout=0.1,\n mem_len=None,\n reuse_len=None,\n bi_data=False,\n clamp_len=-1,\n same_length=False,\n\n finetuning_task=None,\n num_labels=2,\n summary_type='last',\n summary_use_proj=True,\n summary_activation='tanh',\n summary_last_dropout=0.1,\n start_n_top=5,\n end_n_top=5,\n **kwargs):\n \"\"\"Constructs XLNetConfig.\n \"\"\"\n super(XLNetConfig, self).__init__(**kwargs)\n\n if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2\n and isinstance(vocab_size_or_config_json_file, unicode)):\n with open(vocab_size_or_config_json_file, \"r\", encoding='utf-8') as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n setattr(config, key, value)\n elif isinstance(vocab_size_or_config_json_file, int):\n self.n_token = vocab_size_or_config_json_file\n self.d_model = d_model\n self.n_layer = n_layer\n self.n_head = n_head\n assert d_model % n_head == 0\n self.d_head = d_model \/\/ n_head\n self.ff_activation = ff_activation\n self.d_inner = d_inner\n self.untie_r = untie_r\n self.attn_type = attn_type\n\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n\n self.dropout = dropout\n self.mem_len = mem_len\n self.reuse_len = reuse_len\n self.bi_data = bi_data\n self.clamp_len = clamp_len\n self.same_length = same_length\n\n self.finetuning_task = finetuning_task\n self.num_labels = num_labels\n self.summary_type = summary_type\n self.summary_use_proj = summary_use_proj\n self.summary_activation = summary_activation\n self.summary_last_dropout = summary_last_dropout\n self.start_n_top = start_n_top\n self.end_n_top = end_n_top\n else:\n raise ValueError(\"First argument must be either a vocabulary size (int)\"\n \" or the path to a pretrained model config file (str)\")","function_tokens":["def","__init__","(","self",",","vocab_size_or_config_json_file","=","32000",",","d_model","=","1024",",","n_layer","=","24",",","n_head","=","16",",","d_inner","=","4096",",","max_position_embeddings","=","512",",","ff_activation","=","\"gelu\"",",","untie_r","=","True",",","attn_type","=","\"bi\"",",","initializer_range","=","0.02",",","layer_norm_eps","=","1e-12",",","dropout","=","0.1",",","mem_len","=","None",",","reuse_len","=","None",",","bi_data","=","False",",","clamp_len","=","-","1",",","same_length","=","False",",","finetuning_task","=","None",",","num_labels","=","2",",","summary_type","=","'last'",",","summary_use_proj","=","True",",","summary_activation","=","'tanh'",",","summary_last_dropout","=","0.1",",","start_n_top","=","5",",","end_n_top","=","5",",","*","*","kwargs",")",":","super","(","XLNetConfig",",","self",")",".","__init__","(","*","*","kwargs",")","if","isinstance","(","vocab_size_or_config_json_file",",","str",")","or","(","sys",".","version_info","[","0","]","==","2","and","isinstance","(","vocab_size_or_config_json_file",",","unicode",")",")",":","with","open","(","vocab_size_or_config_json_file",",","\"r\"",",","encoding","=","'utf-8'",")","as","reader",":","json_config","=","json",".","loads","(","reader",".","read","(",")",")","for","key",",","value","in","json_config",".","items","(",")",":","setattr","(","config",",","key",",","value",")","elif","isinstance","(","vocab_size_or_config_json_file",",","int",")",":","self",".","n_token","=","vocab_size_or_config_json_file","self",".","d_model","=","d_model","self",".","n_layer","=","n_layer","self",".","n_head","=","n_head","assert","d_model","%","n_head","==","0","self",".","d_head","=","d_model","\/\/","n_head","self",".","ff_activation","=","ff_activation","self",".","d_inner","=","d_inner","self",".","untie_r","=","untie_r","self",".","attn_type","=","attn_type","self",".","initializer_range","=","initializer_range","self",".","layer_norm_eps","=","layer_norm_eps","self",".","dropout","=","dropout","self",".","mem_len","=","mem_len","self",".","reuse_len","=","reuse_len","self",".","bi_data","=","bi_data","self",".","clamp_len","=","clamp_len","self",".","same_length","=","same_length","self",".","finetuning_task","=","finetuning_task","self",".","num_labels","=","num_labels","self",".","summary_type","=","summary_type","self",".","summary_use_proj","=","summary_use_proj","self",".","summary_activation","=","summary_activation","self",".","summary_last_dropout","=","summary_last_dropout","self",".","start_n_top","=","start_n_top","self",".","end_n_top","=","end_n_top","else",":","raise","ValueError","(","\"First argument must be either a vocabulary size (int)\"","\" or the path to a pretrained model config file (str)\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_xlnet.py#L74-L146"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_auto.py","language":"python","identifier":"AutoTokenizer.from_pretrained","parameters":"(cls, pretrained_model_name_or_path, *inputs, **kwargs)","argument_list":"","return_statement":"","docstring":"r\"\"\" Instantiate a one of the tokenizer classes of the library\n from a pre-trained model vocabulary.\n\n The tokenizer class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertTokenizer (DistilBert model)\n - contains `roberta`: RobertaTokenizer (XLM model)\n - contains `bert`: BertTokenizer (Bert model)\n - contains `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model)\n - contains `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model)\n - contains `ctrl`: CTRLTokenizer (Salesforce CTRL model)\n - contains `transfo-xl`: TransfoXLTokenizer (Transformer-XL model)\n - contains `xlnet`: XLNetTokenizer (XLNet model)\n - contains `xlm`: XLMTokenizer (XLM model)\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.\n - a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``.\/my_model_directory\/``.\n - (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``.\/my_model_directory\/vocab.txt``.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the vocabulary files and override the cached versions if they exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http:\/\/hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.\n\n kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.\n\n Examples::\n\n tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') # Download vocabulary from S3 and cache.\n tokenizer = AutoTokenizer.from_pretrained('.\/test\/bert_saved_model\/') # E.g. tokenizer was saved using `save_pretrained('.\/test\/saved_model\/')`","docstring_summary":"r\"\"\" Instantiate a one of the tokenizer classes of the library\n from a pre-trained model vocabulary.","docstring_tokens":["r","Instantiate","a","one","of","the","tokenizer","classes","of","the","library","from","a","pre","-","trained","model","vocabulary","."],"function":"def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):\n r\"\"\" Instantiate a one of the tokenizer classes of the library\n from a pre-trained model vocabulary.\n\n The tokenizer class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertTokenizer (DistilBert model)\n - contains `roberta`: RobertaTokenizer (XLM model)\n - contains `bert`: BertTokenizer (Bert model)\n - contains `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model)\n - contains `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model)\n - contains `ctrl`: CTRLTokenizer (Salesforce CTRL model)\n - contains `transfo-xl`: TransfoXLTokenizer (Transformer-XL model)\n - contains `xlnet`: XLNetTokenizer (XLNet model)\n - contains `xlm`: XLMTokenizer (XLM model)\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.\n - a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``.\/my_model_directory\/``.\n - (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``.\/my_model_directory\/vocab.txt``.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the vocabulary files and override the cached versions if they exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http:\/\/hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.\n\n kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.\n\n Examples::\n\n tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') # Download vocabulary from S3 and cache.\n tokenizer = AutoTokenizer.from_pretrained('.\/test\/bert_saved_model\/') # E.g. tokenizer was saved using `save_pretrained('.\/test\/saved_model\/')`\n\n \"\"\"\n if 'distilbert' in pretrained_model_name_or_path:\n return DistilBertTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)\n elif 'roberta' in pretrained_model_name_or_path:\n return RobertaTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)\n elif 'bert' in pretrained_model_name_or_path:\n return BertTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)\n elif 'openai-gpt' in pretrained_model_name_or_path:\n return OpenAIGPTTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)\n elif 'gpt2' in pretrained_model_name_or_path:\n return GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)\n elif 'transfo-xl' in pretrained_model_name_or_path:\n return TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)\n elif 'xlnet' in pretrained_model_name_or_path:\n return XLNetTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)\n elif 'xlm' in pretrained_model_name_or_path:\n return XLMTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)\n elif 'ctrl' in pretrained_model_name_or_path:\n return CTRLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)\n raise ValueError(\"Unrecognized model identifier in {}. Should contains one of \"\n \"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', \"\n \"'xlm', 'roberta', 'ctrl'\".format(pretrained_model_name_or_path))","function_tokens":["def","from_pretrained","(","cls",",","pretrained_model_name_or_path",",","*","inputs",",","*","*","kwargs",")",":","if","'distilbert'","in","pretrained_model_name_or_path",":","return","DistilBertTokenizer",".","from_pretrained","(","pretrained_model_name_or_path",",","*","inputs",",","*","*","kwargs",")","elif","'roberta'","in","pretrained_model_name_or_path",":","return","RobertaTokenizer",".","from_pretrained","(","pretrained_model_name_or_path",",","*","inputs",",","*","*","kwargs",")","elif","'bert'","in","pretrained_model_name_or_path",":","return","BertTokenizer",".","from_pretrained","(","pretrained_model_name_or_path",",","*","inputs",",","*","*","kwargs",")","elif","'openai-gpt'","in","pretrained_model_name_or_path",":","return","OpenAIGPTTokenizer",".","from_pretrained","(","pretrained_model_name_or_path",",","*","inputs",",","*","*","kwargs",")","elif","'gpt2'","in","pretrained_model_name_or_path",":","return","GPT2Tokenizer",".","from_pretrained","(","pretrained_model_name_or_path",",","*","inputs",",","*","*","kwargs",")","elif","'transfo-xl'","in","pretrained_model_name_or_path",":","return","TransfoXLTokenizer",".","from_pretrained","(","pretrained_model_name_or_path",",","*","inputs",",","*","*","kwargs",")","elif","'xlnet'","in","pretrained_model_name_or_path",":","return","XLNetTokenizer",".","from_pretrained","(","pretrained_model_name_or_path",",","*","inputs",",","*","*","kwargs",")","elif","'xlm'","in","pretrained_model_name_or_path",":","return","XLMTokenizer",".","from_pretrained","(","pretrained_model_name_or_path",",","*","inputs",",","*","*","kwargs",")","elif","'ctrl'","in","pretrained_model_name_or_path",":","return","CTRLTokenizer",".","from_pretrained","(","pretrained_model_name_or_path",",","*","inputs",",","*","*","kwargs",")","raise","ValueError","(","\"Unrecognized model identifier in {}. Should contains one of \"","\"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', \"","\"'xlm', 'roberta', 'ctrl'\"",".","format","(","pretrained_model_name_or_path",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/tokenization_auto.py#L61-L124"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_gpt2.py","language":"python","identifier":"GPT2Config.__init__","parameters":"(\n self,\n vocab_size_or_config_json_file=50257,\n n_positions=1024,\n n_ctx=1024,\n n_embd=768,\n n_layer=12,\n n_head=12,\n resid_pdrop=0.1,\n embd_pdrop=0.1,\n attn_pdrop=0.1,\n layer_norm_epsilon=1e-5,\n initializer_range=0.02,\n\n num_labels=1,\n summary_type='cls_index',\n summary_use_proj=True,\n summary_activation=None,\n summary_proj_to_labels=True,\n summary_first_dropout=0.1,\n **kwargs\n )","argument_list":"","return_statement":"","docstring":"Constructs GPT2Config.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.\n n_positions: Number of positional embeddings.\n n_ctx: Size of the causal mask (usually same as n_positions).\n n_embd: Dimensionality of the embeddings and hidden states.\n n_layer: Number of hidden layers in the Transformer encoder.\n n_head: Number of attention heads for each attention layer in\n the Transformer encoder.\n layer_norm_epsilon: epsilon to use in the layer norm layers\n resid_pdrop: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attn_pdrop: The dropout ratio for the attention\n probabilities.\n embd_pdrop: The dropout ratio for the embeddings.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.","docstring_summary":"Constructs GPT2Config.","docstring_tokens":["Constructs","GPT2Config","."],"function":"def __init__(\n self,\n vocab_size_or_config_json_file=50257,\n n_positions=1024,\n n_ctx=1024,\n n_embd=768,\n n_layer=12,\n n_head=12,\n resid_pdrop=0.1,\n embd_pdrop=0.1,\n attn_pdrop=0.1,\n layer_norm_epsilon=1e-5,\n initializer_range=0.02,\n\n num_labels=1,\n summary_type='cls_index',\n summary_use_proj=True,\n summary_activation=None,\n summary_proj_to_labels=True,\n summary_first_dropout=0.1,\n **kwargs\n ):\n \"\"\"Constructs GPT2Config.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.\n n_positions: Number of positional embeddings.\n n_ctx: Size of the causal mask (usually same as n_positions).\n n_embd: Dimensionality of the embeddings and hidden states.\n n_layer: Number of hidden layers in the Transformer encoder.\n n_head: Number of attention heads for each attention layer in\n the Transformer encoder.\n layer_norm_epsilon: epsilon to use in the layer norm layers\n resid_pdrop: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attn_pdrop: The dropout ratio for the attention\n probabilities.\n embd_pdrop: The dropout ratio for the embeddings.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n super(GPT2Config, self).__init__(**kwargs)\n\n if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2\n and isinstance(vocab_size_or_config_json_file, unicode)):\n with open(vocab_size_or_config_json_file, \"r\", encoding=\"utf-8\") as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif isinstance(vocab_size_or_config_json_file, int):\n self.vocab_size = vocab_size_or_config_json_file\n self.n_ctx = n_ctx\n self.n_positions = n_positions\n self.n_embd = n_embd\n self.n_layer = n_layer\n self.n_head = n_head\n self.resid_pdrop = resid_pdrop\n self.embd_pdrop = embd_pdrop\n self.attn_pdrop = attn_pdrop\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_range = initializer_range\n\n self.num_labels = num_labels\n self.summary_type = summary_type\n self.summary_use_proj = summary_use_proj\n self.summary_activation = summary_activation\n self.summary_first_dropout = summary_first_dropout\n self.summary_proj_to_labels = summary_proj_to_labels\n else:\n raise ValueError(\n \"First argument must be either a vocabulary size (int)\"\n \"or the path to a pretrained model config file (str)\"\n )","function_tokens":["def","__init__","(","self",",","vocab_size_or_config_json_file","=","50257",",","n_positions","=","1024",",","n_ctx","=","1024",",","n_embd","=","768",",","n_layer","=","12",",","n_head","=","12",",","resid_pdrop","=","0.1",",","embd_pdrop","=","0.1",",","attn_pdrop","=","0.1",",","layer_norm_epsilon","=","1e-5",",","initializer_range","=","0.02",",","num_labels","=","1",",","summary_type","=","'cls_index'",",","summary_use_proj","=","True",",","summary_activation","=","None",",","summary_proj_to_labels","=","True",",","summary_first_dropout","=","0.1",",","*","*","kwargs",")",":","super","(","GPT2Config",",","self",")",".","__init__","(","*","*","kwargs",")","if","isinstance","(","vocab_size_or_config_json_file",",","str",")","or","(","sys",".","version_info","[","0","]","==","2","and","isinstance","(","vocab_size_or_config_json_file",",","unicode",")",")",":","with","open","(","vocab_size_or_config_json_file",",","\"r\"",",","encoding","=","\"utf-8\"",")","as","reader",":","json_config","=","json",".","loads","(","reader",".","read","(",")",")","for","key",",","value","in","json_config",".","items","(",")",":","self",".","__dict__","[","key","]","=","value","elif","isinstance","(","vocab_size_or_config_json_file",",","int",")",":","self",".","vocab_size","=","vocab_size_or_config_json_file","self",".","n_ctx","=","n_ctx","self",".","n_positions","=","n_positions","self",".","n_embd","=","n_embd","self",".","n_layer","=","n_layer","self",".","n_head","=","n_head","self",".","resid_pdrop","=","resid_pdrop","self",".","embd_pdrop","=","embd_pdrop","self",".","attn_pdrop","=","attn_pdrop","self",".","layer_norm_epsilon","=","layer_norm_epsilon","self",".","initializer_range","=","initializer_range","self",".","num_labels","=","num_labels","self",".","summary_type","=","summary_type","self",".","summary_use_proj","=","summary_use_proj","self",".","summary_activation","=","summary_activation","self",".","summary_first_dropout","=","summary_first_dropout","self",".","summary_proj_to_labels","=","summary_proj_to_labels","else",":","raise","ValueError","(","\"First argument must be either a vocabulary size (int)\"","\"or the path to a pretrained model config file (str)\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_gpt2.py#L56-L128"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_distilbert.py","language":"python","identifier":"Embeddings.forward","parameters":"(self, input_ids)","argument_list":"","return_statement":"return embeddings","docstring":"Parameters\n ----------\n input_ids: torch.tensor(bs, max_seq_length)\n The token ids to embed.\n\n Outputs\n -------\n embeddings: torch.tensor(bs, max_seq_length, dim)\n The embedded tokens (plus position embeddings, no token_type embeddings)","docstring_summary":"Parameters\n ----------\n input_ids: torch.tensor(bs, max_seq_length)\n The token ids to embed.","docstring_tokens":["Parameters","----------","input_ids",":","torch",".","tensor","(","bs","max_seq_length",")","The","token","ids","to","embed","."],"function":"def forward(self, input_ids):\n \"\"\"\n Parameters\n ----------\n input_ids: torch.tensor(bs, max_seq_length)\n The token ids to embed.\n\n Outputs\n -------\n embeddings: torch.tensor(bs, max_seq_length, dim)\n The embedded tokens (plus position embeddings, no token_type embeddings)\n \"\"\"\n seq_length = input_ids.size(1)\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)\n\n word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)\n position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)\n\n embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim)\n embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)\n embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)\n return embeddings","function_tokens":["def","forward","(","self",",","input_ids",")",":","seq_length","=","input_ids",".","size","(","1",")","position_ids","=","torch",".","arange","(","seq_length",",","dtype","=","torch",".","long",",","device","=","input_ids",".","device",")","# (max_seq_length)","position_ids","=","position_ids",".","unsqueeze","(","0",")",".","expand_as","(","input_ids",")","# (bs, max_seq_length)","word_embeddings","=","self",".","word_embeddings","(","input_ids",")","# (bs, max_seq_length, dim)","position_embeddings","=","self",".","position_embeddings","(","position_ids",")","# (bs, max_seq_length, dim)","embeddings","=","word_embeddings","+","position_embeddings","# (bs, max_seq_length, dim)","embeddings","=","self",".","LayerNorm","(","embeddings",")","# (bs, max_seq_length, dim)","embeddings","=","self",".","dropout","(","embeddings",")","# (bs, max_seq_length, dim)","return","embeddings"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_distilbert.py#L76-L98"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_distilbert.py","language":"python","identifier":"MultiHeadSelfAttention.forward","parameters":"(self, query, key, value, mask, head_mask = None)","argument_list":"","return_statement":"","docstring":"Parameters\n ----------\n query: torch.tensor(bs, seq_length, dim)\n key: torch.tensor(bs, seq_length, dim)\n value: torch.tensor(bs, seq_length, dim)\n mask: torch.tensor(bs, seq_length)\n\n Outputs\n -------\n weights: torch.tensor(bs, n_heads, seq_length, seq_length)\n Attention weights\n context: torch.tensor(bs, seq_length, dim)\n Contextualized layer. Optional: only if `output_attentions=True`","docstring_summary":"Parameters\n ----------\n query: torch.tensor(bs, seq_length, dim)\n key: torch.tensor(bs, seq_length, dim)\n value: torch.tensor(bs, seq_length, dim)\n mask: torch.tensor(bs, seq_length)","docstring_tokens":["Parameters","----------","query",":","torch",".","tensor","(","bs","seq_length","dim",")","key",":","torch",".","tensor","(","bs","seq_length","dim",")","value",":","torch",".","tensor","(","bs","seq_length","dim",")","mask",":","torch",".","tensor","(","bs","seq_length",")"],"function":"def forward(self, query, key, value, mask, head_mask = None):\n \"\"\"\n Parameters\n ----------\n query: torch.tensor(bs, seq_length, dim)\n key: torch.tensor(bs, seq_length, dim)\n value: torch.tensor(bs, seq_length, dim)\n mask: torch.tensor(bs, seq_length)\n\n Outputs\n -------\n weights: torch.tensor(bs, n_heads, seq_length, seq_length)\n Attention weights\n context: torch.tensor(bs, seq_length, dim)\n Contextualized layer. Optional: only if `output_attentions=True`\n \"\"\"\n bs, q_length, dim = query.size()\n k_length = key.size(1)\n # assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)\n # assert key.size() == value.size()\n\n dim_per_head = self.dim \/\/ self.n_heads\n\n mask_reshp = (bs, 1, 1, k_length)\n\n def shape(x):\n \"\"\" separate heads \"\"\"\n return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)\n\n def unshape(x):\n \"\"\" group heads \"\"\"\n return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)\n\n q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)\n k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)\n v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)\n\n q = q \/ math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)\n scores = torch.matmul(q, k.transpose(2,3)) # (bs, n_heads, q_length, k_length)\n mask = (mask==0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)\n scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length)\n\n weights = nn.Softmax(dim=-1)(scores) # (bs, n_heads, q_length, k_length)\n weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)\n\n # Mask heads if we want to\n if head_mask is not None:\n weights = weights * head_mask\n\n context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)\n context = unshape(context) # (bs, q_length, dim)\n context = self.out_lin(context) # (bs, q_length, dim)\n\n if self.output_attentions:\n return (context, weights)\n else:\n return (context,)","function_tokens":["def","forward","(","self",",","query",",","key",",","value",",","mask",",","head_mask","=","None",")",":","bs",",","q_length",",","dim","=","query",".","size","(",")","k_length","=","key",".","size","(","1",")","# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)","# assert key.size() == value.size()","dim_per_head","=","self",".","dim","\/\/","self",".","n_heads","mask_reshp","=","(","bs",",","1",",","1",",","k_length",")","def","shape","(","x",")",":","\"\"\" separate heads \"\"\"","return","x",".","view","(","bs",",","-","1",",","self",".","n_heads",",","dim_per_head",")",".","transpose","(","1",",","2",")","def","unshape","(","x",")",":","\"\"\" group heads \"\"\"","return","x",".","transpose","(","1",",","2",")",".","contiguous","(",")",".","view","(","bs",",","-","1",",","self",".","n_heads","*","dim_per_head",")","q","=","shape","(","self",".","q_lin","(","query",")",")","# (bs, n_heads, q_length, dim_per_head)","k","=","shape","(","self",".","k_lin","(","key",")",")","# (bs, n_heads, k_length, dim_per_head)","v","=","shape","(","self",".","v_lin","(","value",")",")","# (bs, n_heads, k_length, dim_per_head)","q","=","q","\/","math",".","sqrt","(","dim_per_head",")","# (bs, n_heads, q_length, dim_per_head)","scores","=","torch",".","matmul","(","q",",","k",".","transpose","(","2",",","3",")",")","# (bs, n_heads, q_length, k_length)","mask","=","(","mask","==","0",")",".","view","(","mask_reshp",")",".","expand_as","(","scores",")","# (bs, n_heads, q_length, k_length)","scores",".","masked_fill_","(","mask",",","-","float","(","'inf'",")",")","# (bs, n_heads, q_length, k_length)","weights","=","nn",".","Softmax","(","dim","=","-","1",")","(","scores",")","# (bs, n_heads, q_length, k_length)","weights","=","self",".","dropout","(","weights",")","# (bs, n_heads, q_length, k_length)","# Mask heads if we want to","if","head_mask","is","not","None",":","weights","=","weights","*","head_mask","context","=","torch",".","matmul","(","weights",",","v",")","# (bs, n_heads, q_length, dim_per_head)","context","=","unshape","(","context",")","# (bs, q_length, dim)","context","=","self",".","out_lin","(","context",")","# (bs, q_length, dim)","if","self",".","output_attentions",":","return","(","context",",","weights",")","else",":","return","(","context",",",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_distilbert.py#L139-L195"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_distilbert.py","language":"python","identifier":"TransformerBlock.forward","parameters":"(self, x, attn_mask=None, head_mask=None)","argument_list":"","return_statement":"return output","docstring":"Parameters\n ----------\n x: torch.tensor(bs, seq_length, dim)\n attn_mask: torch.tensor(bs, seq_length)\n\n Outputs\n -------\n sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length)\n The attention weights\n ffn_output: torch.tensor(bs, seq_length, dim)\n The output of the transformer block contextualization.","docstring_summary":"Parameters\n ----------\n x: torch.tensor(bs, seq_length, dim)\n attn_mask: torch.tensor(bs, seq_length)","docstring_tokens":["Parameters","----------","x",":","torch",".","tensor","(","bs","seq_length","dim",")","attn_mask",":","torch",".","tensor","(","bs","seq_length",")"],"function":"def forward(self, x, attn_mask=None, head_mask=None):\n \"\"\"\n Parameters\n ----------\n x: torch.tensor(bs, seq_length, dim)\n attn_mask: torch.tensor(bs, seq_length)\n\n Outputs\n -------\n sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length)\n The attention weights\n ffn_output: torch.tensor(bs, seq_length, dim)\n The output of the transformer block contextualization.\n \"\"\"\n # Self-Attention\n sa_output = self.attention(query=x, key=x, value=x, mask=attn_mask, head_mask=head_mask)\n if self.output_attentions:\n sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)\n else: # To handle these `output_attention` or `output_hidden_states` cases returning tuples\n assert type(sa_output) == tuple\n sa_output = sa_output[0]\n sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)\n\n # Feed Forward Network\n ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)\n ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)\n\n output = (ffn_output,)\n if self.output_attentions:\n output = (sa_weights,) + output\n return output","function_tokens":["def","forward","(","self",",","x",",","attn_mask","=","None",",","head_mask","=","None",")",":","# Self-Attention","sa_output","=","self",".","attention","(","query","=","x",",","key","=","x",",","value","=","x",",","mask","=","attn_mask",",","head_mask","=","head_mask",")","if","self",".","output_attentions",":","sa_output",",","sa_weights","=","sa_output","# (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)","else",":","# To handle these `output_attention` or `output_hidden_states` cases returning tuples","assert","type","(","sa_output",")","==","tuple","sa_output","=","sa_output","[","0","]","sa_output","=","self",".","sa_layer_norm","(","sa_output","+","x",")","# (bs, seq_length, dim)","# Feed Forward Network","ffn_output","=","self",".","ffn","(","sa_output",")","# (bs, seq_length, dim)","ffn_output","=","self",".","output_layer_norm","(","ffn_output","+","sa_output",")","# (bs, seq_length, dim)","output","=","(","ffn_output",",",")","if","self",".","output_attentions",":","output","=","(","sa_weights",",",")","+","output","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_distilbert.py#L232-L262"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_distilbert.py","language":"python","identifier":"Transformer.forward","parameters":"(self, x, attn_mask=None, head_mask=None)","argument_list":"","return_statement":"return outputs","docstring":"Parameters\n ----------\n x: torch.tensor(bs, seq_length, dim)\n Input sequence embedded.\n attn_mask: torch.tensor(bs, seq_length)\n Attention mask on the sequence.\n\n Outputs\n -------\n hidden_state: torch.tensor(bs, seq_length, dim)\n Sequence of hiddens states in the last (top) layer\n all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]\n Tuple of length n_layers with the hidden states from each layer.\n Optional: only if output_hidden_states=True\n all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]\n Tuple of length n_layers with the attention weights from each layer\n Optional: only if output_attentions=True","docstring_summary":"Parameters\n ----------\n x: torch.tensor(bs, seq_length, dim)\n Input sequence embedded.\n attn_mask: torch.tensor(bs, seq_length)\n Attention mask on the sequence.","docstring_tokens":["Parameters","----------","x",":","torch",".","tensor","(","bs","seq_length","dim",")","Input","sequence","embedded",".","attn_mask",":","torch",".","tensor","(","bs","seq_length",")","Attention","mask","on","the","sequence","."],"function":"def forward(self, x, attn_mask=None, head_mask=None):\n \"\"\"\n Parameters\n ----------\n x: torch.tensor(bs, seq_length, dim)\n Input sequence embedded.\n attn_mask: torch.tensor(bs, seq_length)\n Attention mask on the sequence.\n\n Outputs\n -------\n hidden_state: torch.tensor(bs, seq_length, dim)\n Sequence of hiddens states in the last (top) layer\n all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]\n Tuple of length n_layers with the hidden states from each layer.\n Optional: only if output_hidden_states=True\n all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]\n Tuple of length n_layers with the attention weights from each layer\n Optional: only if output_attentions=True\n \"\"\"\n all_hidden_states = ()\n all_attentions = ()\n\n hidden_state = x\n for i, layer_module in enumerate(self.layer):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_state,)\n\n layer_outputs = layer_module(x=hidden_state,\n attn_mask=attn_mask,\n head_mask=head_mask[i])\n hidden_state = layer_outputs[-1]\n\n if self.output_attentions:\n assert len(layer_outputs) == 2\n attentions = layer_outputs[0]\n all_attentions = all_attentions + (attentions,)\n else:\n assert len(layer_outputs) == 1\n\n # Add last layer\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_state,)\n\n outputs = (hidden_state,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs","function_tokens":["def","forward","(","self",",","x",",","attn_mask","=","None",",","head_mask","=","None",")",":","all_hidden_states","=","(",")","all_attentions","=","(",")","hidden_state","=","x","for","i",",","layer_module","in","enumerate","(","self",".","layer",")",":","if","self",".","output_hidden_states",":","all_hidden_states","=","all_hidden_states","+","(","hidden_state",",",")","layer_outputs","=","layer_module","(","x","=","hidden_state",",","attn_mask","=","attn_mask",",","head_mask","=","head_mask","[","i","]",")","hidden_state","=","layer_outputs","[","-","1","]","if","self",".","output_attentions",":","assert","len","(","layer_outputs",")","==","2","attentions","=","layer_outputs","[","0","]","all_attentions","=","all_attentions","+","(","attentions",",",")","else",":","assert","len","(","layer_outputs",")","==","1","# Add last layer","if","self",".","output_hidden_states",":","all_hidden_states","=","all_hidden_states","+","(","hidden_state",",",")","outputs","=","(","hidden_state",",",")","if","self",".","output_hidden_states",":","outputs","=","outputs","+","(","all_hidden_states",",",")","if","self",".","output_attentions",":","outputs","=","outputs","+","(","all_attentions",",",")","return","outputs"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_distilbert.py#L275-L324"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_distilbert.py","language":"python","identifier":"DistilBertPreTrainedModel._init_weights","parameters":"(self, module)","argument_list":"","return_statement":"","docstring":"Initialize the weights.","docstring_summary":"Initialize the weights.","docstring_tokens":["Initialize","the","weights","."],"function":"def _init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, nn.Embedding):\n if module.weight.requires_grad:\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()","function_tokens":["def","_init_weights","(","self",",","module",")",":","if","isinstance","(","module",",","nn",".","Embedding",")",":","if","module",".","weight",".","requires_grad",":","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","if","isinstance","(","module",",","nn",".","Linear",")",":","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","elif","isinstance","(","module",",","nn",".","LayerNorm",")",":","module",".","bias",".","data",".","zero_","(",")","module",".","weight",".","data",".","fill_","(","1.0",")","if","isinstance","(","module",",","nn",".","Linear",")","and","module",".","bias","is","not","None",":","module",".","bias",".","data",".","zero_","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_distilbert.py#L340-L352"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_gpt2.py","language":"python","identifier":"load_tf_weights_in_gpt2","parameters":"(model, config, gpt2_checkpoint_path)","argument_list":"","return_statement":"return model","docstring":"Load tf checkpoints in a pytorch model","docstring_summary":"Load tf checkpoints in a pytorch model","docstring_tokens":["Load","tf","checkpoints","in","a","pytorch","model"],"function":"def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https:\/\/www.tensorflow.org\/install\/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(gpt2_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array.squeeze())\n\n for name, array in zip(names, arrays):\n name = name[6:] # skip \"model\/\"\n name = name.split('\/')\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+\\d+', m_name):\n l = re.split(r'(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'w' or l[0] == 'g':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'b':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'wpe' or l[0] == 'wte':\n pointer = getattr(pointer, l[0])\n pointer = getattr(pointer, 'weight')\n else:\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model","function_tokens":["def","load_tf_weights_in_gpt2","(","model",",","config",",","gpt2_checkpoint_path",")",":","try",":","import","re","import","numpy","as","np","import","tensorflow","as","tf","except","ImportError",":","logger",".","error","(","\"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"","\"https:\/\/www.tensorflow.org\/install\/ for installation instructions.\"",")","raise","tf_path","=","os",".","path",".","abspath","(","gpt2_checkpoint_path",")","logger",".","info","(","\"Converting TensorFlow checkpoint from {}\"",".","format","(","tf_path",")",")","# Load weights from TF model","init_vars","=","tf",".","train",".","list_variables","(","tf_path",")","names","=","[","]","arrays","=","[","]","for","name",",","shape","in","init_vars",":","logger",".","info","(","\"Loading TF weight {} with shape {}\"",".","format","(","name",",","shape",")",")","array","=","tf",".","train",".","load_variable","(","tf_path",",","name",")","names",".","append","(","name",")","arrays",".","append","(","array",".","squeeze","(",")",")","for","name",",","array","in","zip","(","names",",","arrays",")",":","name","=","name","[","6",":","]","# skip \"model\/\"","name","=","name",".","split","(","'\/'",")","pointer","=","model","for","m_name","in","name",":","if","re",".","fullmatch","(","r'[A-Za-z]+\\d+'",",","m_name",")",":","l","=","re",".","split","(","r'(\\d+)'",",","m_name",")","else",":","l","=","[","m_name","]","if","l","[","0","]","==","'w'","or","l","[","0","]","==","'g'",":","pointer","=","getattr","(","pointer",",","'weight'",")","elif","l","[","0","]","==","'b'",":","pointer","=","getattr","(","pointer",",","'bias'",")","elif","l","[","0","]","==","'wpe'","or","l","[","0","]","==","'wte'",":","pointer","=","getattr","(","pointer",",","l","[","0","]",")","pointer","=","getattr","(","pointer",",","'weight'",")","else",":","pointer","=","getattr","(","pointer",",","l","[","0","]",")","if","len","(","l",")",">=","2",":","num","=","int","(","l","[","1","]",")","pointer","=","pointer","[","num","]","try",":","assert","pointer",".","shape","==","array",".","shape","except","AssertionError","as","e",":","e",".","args","+=","(","pointer",".","shape",",","array",".","shape",")","raise","logger",".","info","(","\"Initialize PyTorch weight {}\"",".","format","(","name",")",")","pointer",".","data","=","torch",".","from_numpy","(","array",")","return","model"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_gpt2.py#L44-L95"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_gpt2.py","language":"python","identifier":"GPT2PreTrainedModel._init_weights","parameters":"(self, module)","argument_list":"","return_statement":"","docstring":"Initialize the weights.","docstring_summary":"Initialize the weights.","docstring_tokens":["Initialize","the","weights","."],"function":"def _init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)","function_tokens":["def","_init_weights","(","self",",","module",")",":","if","isinstance","(","module",",","(","nn",".","Linear",",","nn",".","Embedding",",","Conv1D",")",")",":","# Slightly different from the TF version which uses truncated_normal for initialization","# cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","if","isinstance","(","module",",","(","nn",".","Linear",",","Conv1D",")",")","and","module",".","bias","is","not","None",":","module",".","bias",".","data",".","zero_","(",")","elif","isinstance","(","module",",","nn",".","LayerNorm",")",":","module",".","bias",".","data",".","zero_","(",")","module",".","weight",".","data",".","fill_","(","1.0",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_gpt2.py#L254-L265"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py","language":"python","identifier":"build_tf_xlnet_to_pytorch_map","parameters":"(model, config, tf_weights=None)","argument_list":"","return_statement":"return tf_to_pt_map","docstring":"A map of modules from TF to PyTorch.\n I use a map to keep the PyTorch model as\n identical to the original PyTorch model as possible.","docstring_summary":"A map of modules from TF to PyTorch.\n I use a map to keep the PyTorch model as\n identical to the original PyTorch model as possible.","docstring_tokens":["A","map","of","modules","from","TF","to","PyTorch",".","I","use","a","map","to","keep","the","PyTorch","model","as","identical","to","the","original","PyTorch","model","as","possible","."],"function":"def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):\n \"\"\" A map of modules from TF to PyTorch.\n I use a map to keep the PyTorch model as\n identical to the original PyTorch model as possible.\n \"\"\"\n\n tf_to_pt_map = {}\n\n if hasattr(model, 'transformer'):\n if hasattr(model, 'lm_loss'):\n # We will load also the output bias\n tf_to_pt_map['model\/lm_loss\/bias'] = model.lm_loss.bias\n if hasattr(model, 'sequence_summary') and 'model\/sequnece_summary\/summary\/kernel' in tf_weights:\n # We will load also the sequence summary\n tf_to_pt_map['model\/sequnece_summary\/summary\/kernel'] = model.sequence_summary.summary.weight\n tf_to_pt_map['model\/sequnece_summary\/summary\/bias'] = model.sequence_summary.summary.bias\n if hasattr(model, 'logits_proj') and config.finetuning_task is not None \\\n and 'model\/regression_{}\/logit\/kernel'.format(config.finetuning_task) in tf_weights:\n tf_to_pt_map['model\/regression_{}\/logit\/kernel'.format(config.finetuning_task)] = model.logits_proj.weight\n tf_to_pt_map['model\/regression_{}\/logit\/bias'.format(config.finetuning_task)] = model.logits_proj.bias\n\n # Now load the rest of the transformer\n model = model.transformer\n\n # Embeddings and output\n tf_to_pt_map.update({'model\/transformer\/word_embedding\/lookup_table': model.word_embedding.weight,\n 'model\/transformer\/mask_emb\/mask_emb': model.mask_emb})\n\n # Transformer blocks\n for i, b in enumerate(model.layer):\n layer_str = \"model\/transformer\/layer_%d\/\" % i\n tf_to_pt_map.update({\n layer_str + \"rel_attn\/LayerNorm\/gamma\": b.rel_attn.layer_norm.weight,\n layer_str + \"rel_attn\/LayerNorm\/beta\": b.rel_attn.layer_norm.bias,\n layer_str + \"rel_attn\/o\/kernel\": b.rel_attn.o,\n layer_str + \"rel_attn\/q\/kernel\": b.rel_attn.q,\n layer_str + \"rel_attn\/k\/kernel\": b.rel_attn.k,\n layer_str + \"rel_attn\/r\/kernel\": b.rel_attn.r,\n layer_str + \"rel_attn\/v\/kernel\": b.rel_attn.v,\n layer_str + \"ff\/LayerNorm\/gamma\": b.ff.layer_norm.weight,\n layer_str + \"ff\/LayerNorm\/beta\": b.ff.layer_norm.bias,\n layer_str + \"ff\/layer_1\/kernel\": b.ff.layer_1.weight,\n layer_str + \"ff\/layer_1\/bias\": b.ff.layer_1.bias,\n layer_str + \"ff\/layer_2\/kernel\": b.ff.layer_2.weight,\n layer_str + \"ff\/layer_2\/bias\": b.ff.layer_2.bias,\n })\n\n # Relative positioning biases\n if config.untie_r:\n r_r_list = []\n r_w_list = []\n r_s_list = []\n seg_embed_list = []\n for b in model.layer:\n r_r_list.append(b.rel_attn.r_r_bias)\n r_w_list.append(b.rel_attn.r_w_bias)\n r_s_list.append(b.rel_attn.r_s_bias)\n seg_embed_list.append(b.rel_attn.seg_embed)\n else:\n r_r_list = [model.r_r_bias]\n r_w_list = [model.r_w_bias]\n r_s_list = [model.r_s_bias]\n seg_embed_list = [model.seg_embed]\n tf_to_pt_map.update({\n 'model\/transformer\/r_r_bias': r_r_list,\n 'model\/transformer\/r_w_bias': r_w_list,\n 'model\/transformer\/r_s_bias': r_s_list,\n 'model\/transformer\/seg_embed': seg_embed_list})\n return tf_to_pt_map","function_tokens":["def","build_tf_xlnet_to_pytorch_map","(","model",",","config",",","tf_weights","=","None",")",":","tf_to_pt_map","=","{","}","if","hasattr","(","model",",","'transformer'",")",":","if","hasattr","(","model",",","'lm_loss'",")",":","# We will load also the output bias","tf_to_pt_map","[","'model\/lm_loss\/bias'","]","=","model",".","lm_loss",".","bias","if","hasattr","(","model",",","'sequence_summary'",")","and","'model\/sequnece_summary\/summary\/kernel'","in","tf_weights",":","# We will load also the sequence summary","tf_to_pt_map","[","'model\/sequnece_summary\/summary\/kernel'","]","=","model",".","sequence_summary",".","summary",".","weight","tf_to_pt_map","[","'model\/sequnece_summary\/summary\/bias'","]","=","model",".","sequence_summary",".","summary",".","bias","if","hasattr","(","model",",","'logits_proj'",")","and","config",".","finetuning_task","is","not","None","and","'model\/regression_{}\/logit\/kernel'",".","format","(","config",".","finetuning_task",")","in","tf_weights",":","tf_to_pt_map","[","'model\/regression_{}\/logit\/kernel'",".","format","(","config",".","finetuning_task",")","]","=","model",".","logits_proj",".","weight","tf_to_pt_map","[","'model\/regression_{}\/logit\/bias'",".","format","(","config",".","finetuning_task",")","]","=","model",".","logits_proj",".","bias","# Now load the rest of the transformer","model","=","model",".","transformer","# Embeddings and output","tf_to_pt_map",".","update","(","{","'model\/transformer\/word_embedding\/lookup_table'",":","model",".","word_embedding",".","weight",",","'model\/transformer\/mask_emb\/mask_emb'",":","model",".","mask_emb","}",")","# Transformer blocks","for","i",",","b","in","enumerate","(","model",".","layer",")",":","layer_str","=","\"model\/transformer\/layer_%d\/\"","%","i","tf_to_pt_map",".","update","(","{","layer_str","+","\"rel_attn\/LayerNorm\/gamma\"",":","b",".","rel_attn",".","layer_norm",".","weight",",","layer_str","+","\"rel_attn\/LayerNorm\/beta\"",":","b",".","rel_attn",".","layer_norm",".","bias",",","layer_str","+","\"rel_attn\/o\/kernel\"",":","b",".","rel_attn",".","o",",","layer_str","+","\"rel_attn\/q\/kernel\"",":","b",".","rel_attn",".","q",",","layer_str","+","\"rel_attn\/k\/kernel\"",":","b",".","rel_attn",".","k",",","layer_str","+","\"rel_attn\/r\/kernel\"",":","b",".","rel_attn",".","r",",","layer_str","+","\"rel_attn\/v\/kernel\"",":","b",".","rel_attn",".","v",",","layer_str","+","\"ff\/LayerNorm\/gamma\"",":","b",".","ff",".","layer_norm",".","weight",",","layer_str","+","\"ff\/LayerNorm\/beta\"",":","b",".","ff",".","layer_norm",".","bias",",","layer_str","+","\"ff\/layer_1\/kernel\"",":","b",".","ff",".","layer_1",".","weight",",","layer_str","+","\"ff\/layer_1\/bias\"",":","b",".","ff",".","layer_1",".","bias",",","layer_str","+","\"ff\/layer_2\/kernel\"",":","b",".","ff",".","layer_2",".","weight",",","layer_str","+","\"ff\/layer_2\/bias\"",":","b",".","ff",".","layer_2",".","bias",",","}",")","# Relative positioning biases","if","config",".","untie_r",":","r_r_list","=","[","]","r_w_list","=","[","]","r_s_list","=","[","]","seg_embed_list","=","[","]","for","b","in","model",".","layer",":","r_r_list",".","append","(","b",".","rel_attn",".","r_r_bias",")","r_w_list",".","append","(","b",".","rel_attn",".","r_w_bias",")","r_s_list",".","append","(","b",".","rel_attn",".","r_s_bias",")","seg_embed_list",".","append","(","b",".","rel_attn",".","seg_embed",")","else",":","r_r_list","=","[","model",".","r_r_bias","]","r_w_list","=","[","model",".","r_w_bias","]","r_s_list","=","[","model",".","r_s_bias","]","seg_embed_list","=","[","model",".","seg_embed","]","tf_to_pt_map",".","update","(","{","'model\/transformer\/r_r_bias'",":","r_r_list",",","'model\/transformer\/r_w_bias'",":","r_w_list",",","'model\/transformer\/r_s_bias'",":","r_s_list",",","'model\/transformer\/seg_embed'",":","seg_embed_list","}",")","return","tf_to_pt_map"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py#L45-L113"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py","language":"python","identifier":"load_tf_weights_in_xlnet","parameters":"(model, config, tf_path)","argument_list":"","return_statement":"return model","docstring":"Load tf checkpoints in a pytorch model","docstring_summary":"Load tf checkpoints in a pytorch model","docstring_tokens":["Load","tf","checkpoints","in","a","pytorch","model"],"function":"def load_tf_weights_in_xlnet(model, config, tf_path):\n \"\"\" Load tf checkpoints in a pytorch model\n \"\"\"\n try:\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https:\/\/www.tensorflow.org\/install\/ for installation instructions.\")\n raise\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n tf_weights = {}\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n tf_weights[name] = array\n\n # Build TF to PyTorch weights loading map\n tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)\n\n for name, pointer in tf_to_pt_map.items():\n logger.info(\"Importing {}\".format(name))\n if name not in tf_weights:\n logger.info(\"{} not in tf pre-trained weights, skipping\".format(name))\n continue\n array = tf_weights[name]\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if 'kernel' in name and ('ff' in name or 'summary' in name or 'logit' in name):\n logger.info(\"Transposing\")\n array = np.transpose(array)\n if isinstance(pointer, list):\n # Here we will split the TF weigths\n assert len(pointer) == array.shape[0]\n for i, p_i in enumerate(pointer):\n arr_i = array[i, ...]\n try:\n assert p_i.shape == arr_i.shape\n except AssertionError as e:\n e.args += (p_i.shape, arr_i.shape)\n raise\n logger.info(\"Initialize PyTorch weight {} for layer {}\".format(name, i))\n p_i.data = torch.from_numpy(arr_i)\n else:\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n tf_weights.pop(name, None)\n tf_weights.pop(name + '\/Adam', None)\n tf_weights.pop(name + '\/Adam_1', None)\n\n logger.info(\"Weights not copied to PyTorch model: {}\".format(', '.join(tf_weights.keys())))\n return model","function_tokens":["def","load_tf_weights_in_xlnet","(","model",",","config",",","tf_path",")",":","try",":","import","numpy","as","np","import","tensorflow","as","tf","except","ImportError",":","logger",".","error","(","\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"","\"https:\/\/www.tensorflow.org\/install\/ for installation instructions.\"",")","raise","# Load weights from TF model","init_vars","=","tf",".","train",".","list_variables","(","tf_path",")","tf_weights","=","{","}","for","name",",","shape","in","init_vars",":","logger",".","info","(","\"Loading TF weight {} with shape {}\"",".","format","(","name",",","shape",")",")","array","=","tf",".","train",".","load_variable","(","tf_path",",","name",")","tf_weights","[","name","]","=","array","# Build TF to PyTorch weights loading map","tf_to_pt_map","=","build_tf_xlnet_to_pytorch_map","(","model",",","config",",","tf_weights",")","for","name",",","pointer","in","tf_to_pt_map",".","items","(",")",":","logger",".","info","(","\"Importing {}\"",".","format","(","name",")",")","if","name","not","in","tf_weights",":","logger",".","info","(","\"{} not in tf pre-trained weights, skipping\"",".","format","(","name",")",")","continue","array","=","tf_weights","[","name","]","# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v","# which are not required for using pretrained model","if","'kernel'","in","name","and","(","'ff'","in","name","or","'summary'","in","name","or","'logit'","in","name",")",":","logger",".","info","(","\"Transposing\"",")","array","=","np",".","transpose","(","array",")","if","isinstance","(","pointer",",","list",")",":","# Here we will split the TF weigths","assert","len","(","pointer",")","==","array",".","shape","[","0","]","for","i",",","p_i","in","enumerate","(","pointer",")",":","arr_i","=","array","[","i",",","...","]","try",":","assert","p_i",".","shape","==","arr_i",".","shape","except","AssertionError","as","e",":","e",".","args","+=","(","p_i",".","shape",",","arr_i",".","shape",")","raise","logger",".","info","(","\"Initialize PyTorch weight {} for layer {}\"",".","format","(","name",",","i",")",")","p_i",".","data","=","torch",".","from_numpy","(","arr_i",")","else",":","try",":","assert","pointer",".","shape","==","array",".","shape","except","AssertionError","as","e",":","e",".","args","+=","(","pointer",".","shape",",","array",".","shape",")","raise","logger",".","info","(","\"Initialize PyTorch weight {}\"",".","format","(","name",")",")","pointer",".","data","=","torch",".","from_numpy","(","array",")","tf_weights",".","pop","(","name",",","None",")","tf_weights",".","pop","(","name","+","'\/Adam'",",","None",")","tf_weights",".","pop","(","name","+","'\/Adam_1'",",","None",")","logger",".","info","(","\"Weights not copied to PyTorch model: {}\"",".","format","(","', '",".","join","(","tf_weights",".","keys","(",")",")",")",")","return","model"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py#L115-L172"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py","language":"python","identifier":"gelu","parameters":"(x)","argument_list":"","return_statement":"return x * cdf","docstring":"Implementation of the gelu activation function.\n XLNet is using OpenAI GPT's gelu (not exactly the same as BERT)\n Also see https:\/\/arxiv.org\/abs\/1606.08415","docstring_summary":"Implementation of the gelu activation function.\n XLNet is using OpenAI GPT's gelu (not exactly the same as BERT)\n Also see https:\/\/arxiv.org\/abs\/1606.08415","docstring_tokens":["Implementation","of","the","gelu","activation","function",".","XLNet","is","using","OpenAI","GPT","s","gelu","(","not","exactly","the","same","as","BERT",")","Also","see","https",":","\/\/","arxiv",".","org","\/","abs","\/","1606",".","08415"],"function":"def gelu(x):\n \"\"\" Implementation of the gelu activation function.\n XLNet is using OpenAI GPT's gelu (not exactly the same as BERT)\n Also see https:\/\/arxiv.org\/abs\/1606.08415\n \"\"\"\n cdf = 0.5 * (1.0 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n return x * cdf","function_tokens":["def","gelu","(","x",")",":","cdf","=","0.5","*","(","1.0","+","torch",".","tanh","(","math",".","sqrt","(","2","\/","math",".","pi",")","*","(","x","+","0.044715","*","torch",".","pow","(","x",",","3",")",")",")",")","return","x","*","cdf"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py#L175-L181"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py","language":"python","identifier":"XLNetRelativeAttention.rel_shift","parameters":"(x, klen=-1)","argument_list":"","return_statement":"return x","docstring":"perform relative shift to form the relative attention score.","docstring_summary":"perform relative shift to form the relative attention score.","docstring_tokens":["perform","relative","shift","to","form","the","relative","attention","score","."],"function":"def rel_shift(x, klen=-1):\n \"\"\"perform relative shift to form the relative attention score.\"\"\"\n x_size = x.shape\n\n x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])\n x = x[1:, ...]\n x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])\n # x = x[:, 0:klen, :, :]\n x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))\n\n return x","function_tokens":["def","rel_shift","(","x",",","klen","=","-","1",")",":","x_size","=","x",".","shape","x","=","x",".","reshape","(","x_size","[","1","]",",","x_size","[","0","]",",","x_size","[","2","]",",","x_size","[","3","]",")","x","=","x","[","1",":",",","...","]","x","=","x",".","reshape","(","x_size","[","0","]",",","x_size","[","1","]","-","1",",","x_size","[","2","]",",","x_size","[","3","]",")","# x = x[:, 0:klen, :, :]","x","=","torch",".","index_select","(","x",",","1",",","torch",".","arange","(","klen",",","device","=","x",".","device",",","dtype","=","torch",".","long",")",")","return","x"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py#L230-L240"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py","language":"python","identifier":"XLNetRelativeAttention.rel_attn_core","parameters":"(self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None)","argument_list":"","return_statement":"return attn_vec","docstring":"Core relative positional attention operations.","docstring_summary":"Core relative positional attention operations.","docstring_tokens":["Core","relative","positional","attention","operations","."],"function":"def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None):\n \"\"\"Core relative positional attention operations.\"\"\"\n\n # content based attention score\n ac = torch.einsum('ibnd,jbnd->bnij', q_head + self.r_w_bias, k_head_h)\n\n # position based attention score\n bd = torch.einsum('ibnd,jbnd->bnij', q_head + self.r_r_bias, k_head_r)\n bd = self.rel_shift_bnij(bd, klen=ac.shape[3])\n\n # segment based attention score\n if seg_mat is None:\n ef = 0\n else:\n ef = torch.einsum('ibnd,snd->ibns', q_head + self.r_s_bias, self.seg_embed)\n ef = torch.einsum('ijbs,ibns->bnij', seg_mat, ef)\n\n # merge attention scores and perform masking\n attn_score = (ac + bd + ef) * self.scale\n if attn_mask is not None:\n # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask\n if attn_mask.dtype == torch.float16:\n attn_score = attn_score - 65500 * torch.einsum('ijbn->bnij', attn_mask)\n else:\n attn_score = attn_score - 1e30 * torch.einsum('ijbn->bnij', attn_mask)\n\n # attention probability\n attn_prob = F.softmax(attn_score, dim=3)\n attn_prob = self.dropout(attn_prob)\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_prob = attn_prob * torch.einsum('ijbn->bnij', head_mask)\n\n # attention output\n attn_vec = torch.einsum('bnij,jbnd->ibnd', attn_prob, v_head_h)\n\n if self.output_attentions:\n return attn_vec, torch.einsum('bnij->ijbn', attn_prob)\n\n return attn_vec","function_tokens":["def","rel_attn_core","(","self",",","q_head",",","k_head_h",",","v_head_h",",","k_head_r",",","seg_mat","=","None",",","attn_mask","=","None",",","head_mask","=","None",")",":","# content based attention score","ac","=","torch",".","einsum","(","'ibnd,jbnd->bnij'",",","q_head","+","self",".","r_w_bias",",","k_head_h",")","# position based attention score","bd","=","torch",".","einsum","(","'ibnd,jbnd->bnij'",",","q_head","+","self",".","r_r_bias",",","k_head_r",")","bd","=","self",".","rel_shift_bnij","(","bd",",","klen","=","ac",".","shape","[","3","]",")","# segment based attention score","if","seg_mat","is","None",":","ef","=","0","else",":","ef","=","torch",".","einsum","(","'ibnd,snd->ibns'",",","q_head","+","self",".","r_s_bias",",","self",".","seg_embed",")","ef","=","torch",".","einsum","(","'ijbs,ibns->bnij'",",","seg_mat",",","ef",")","# merge attention scores and perform masking","attn_score","=","(","ac","+","bd","+","ef",")","*","self",".","scale","if","attn_mask","is","not","None",":","# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask","if","attn_mask",".","dtype","==","torch",".","float16",":","attn_score","=","attn_score","-","65500","*","torch",".","einsum","(","'ijbn->bnij'",",","attn_mask",")","else",":","attn_score","=","attn_score","-","1e30","*","torch",".","einsum","(","'ijbn->bnij'",",","attn_mask",")","# attention probability","attn_prob","=","F",".","softmax","(","attn_score",",","dim","=","3",")","attn_prob","=","self",".","dropout","(","attn_prob",")","# Mask heads if we want to","if","head_mask","is","not","None",":","attn_prob","=","attn_prob","*","torch",".","einsum","(","'ijbn->bnij'",",","head_mask",")","# attention output","attn_vec","=","torch",".","einsum","(","'bnij,jbnd->ibnd'",",","attn_prob",",","v_head_h",")","if","self",".","output_attentions",":","return","attn_vec",",","torch",".","einsum","(","'bnij->ijbn'",",","attn_prob",")","return","attn_vec"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py#L257-L297"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py","language":"python","identifier":"XLNetRelativeAttention.post_attention","parameters":"(self, h, attn_vec, residual=True)","argument_list":"","return_statement":"return output","docstring":"Post-attention processing.","docstring_summary":"Post-attention processing.","docstring_tokens":["Post","-","attention","processing","."],"function":"def post_attention(self, h, attn_vec, residual=True):\n \"\"\"Post-attention processing.\"\"\"\n # post-attention projection (back to `d_model`)\n attn_out = torch.einsum('ibnd,hnd->ibh', attn_vec, self.o)\n\n attn_out = self.dropout(attn_out)\n if residual:\n attn_out = attn_out + h\n output = self.layer_norm(attn_out)\n\n return output","function_tokens":["def","post_attention","(","self",",","h",",","attn_vec",",","residual","=","True",")",":","# post-attention projection (back to `d_model`)","attn_out","=","torch",".","einsum","(","'ibnd,hnd->ibh'",",","attn_vec",",","self",".","o",")","attn_out","=","self",".","dropout","(","attn_out",")","if","residual",":","attn_out","=","attn_out","+","h","output","=","self",".","layer_norm","(","attn_out",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py#L299-L309"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py","language":"python","identifier":"XLNetPreTrainedModel._init_weights","parameters":"(self, module)","argument_list":"","return_statement":"","docstring":"Initialize the weights.","docstring_summary":"Initialize the weights.","docstring_tokens":["Initialize","the","weights","."],"function":"def _init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, XLNetLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n elif isinstance(module, XLNetRelativeAttention):\n for param in [module.q, module.k, module.v, module.o, module.r,\n module.r_r_bias, module.r_s_bias, module.r_w_bias,\n module.seg_embed]:\n param.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, XLNetModel):\n module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)","function_tokens":["def","_init_weights","(","self",",","module",")",":","if","isinstance","(","module",",","(","nn",".","Linear",",","nn",".","Embedding",")",")",":","# Slightly different from the TF version which uses truncated_normal for initialization","# cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","if","isinstance","(","module",",","nn",".","Linear",")","and","module",".","bias","is","not","None",":","module",".","bias",".","data",".","zero_","(",")","elif","isinstance","(","module",",","XLNetLayerNorm",")",":","module",".","bias",".","data",".","zero_","(",")","module",".","weight",".","data",".","fill_","(","1.0",")","elif","isinstance","(","module",",","XLNetRelativeAttention",")",":","for","param","in","[","module",".","q",",","module",".","k",",","module",".","v",",","module",".","o",",","module",".","r",",","module",".","r_r_bias",",","module",".","r_s_bias",",","module",".","r_w_bias",",","module",".","seg_embed","]",":","param",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","elif","isinstance","(","module",",","XLNetModel",")",":","module",".","mask_emb",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlnet.py#L459-L477"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_transfo_xl.py","language":"python","identifier":"TransfoXLConfig.__init__","parameters":"(self,\n vocab_size_or_config_json_file=267735,\n cutoffs=[20000, 40000, 200000],\n d_model=1024,\n d_embed=1024,\n n_head=16,\n d_head=64,\n d_inner=4096,\n div_val=4,\n pre_lnorm=False,\n n_layer=18,\n tgt_len=128,\n ext_len=0,\n mem_len=1600,\n clamp_len=1000,\n same_length=True,\n proj_share_all_but_first=True,\n attn_type=0,\n sample_softmax=-1,\n adaptive=True,\n tie_weight=True,\n dropout=0.1,\n dropatt=0.0,\n untie_r=True,\n init=\"normal\",\n init_range=0.01,\n proj_init_std=0.01,\n init_std=0.02,\n layer_norm_epsilon=1e-5,\n **kwargs)","argument_list":"","return_statement":"","docstring":"Constructs TransfoXLConfig.","docstring_summary":"Constructs TransfoXLConfig.","docstring_tokens":["Constructs","TransfoXLConfig","."],"function":"def __init__(self,\n vocab_size_or_config_json_file=267735,\n cutoffs=[20000, 40000, 200000],\n d_model=1024,\n d_embed=1024,\n n_head=16,\n d_head=64,\n d_inner=4096,\n div_val=4,\n pre_lnorm=False,\n n_layer=18,\n tgt_len=128,\n ext_len=0,\n mem_len=1600,\n clamp_len=1000,\n same_length=True,\n proj_share_all_but_first=True,\n attn_type=0,\n sample_softmax=-1,\n adaptive=True,\n tie_weight=True,\n dropout=0.1,\n dropatt=0.0,\n untie_r=True,\n init=\"normal\",\n init_range=0.01,\n proj_init_std=0.01,\n init_std=0.02,\n layer_norm_epsilon=1e-5,\n **kwargs):\n \"\"\"Constructs TransfoXLConfig.\n \"\"\"\n super(TransfoXLConfig, self).__init__(**kwargs)\n self.n_token = vocab_size_or_config_json_file if isinstance(vocab_size_or_config_json_file, int) else -1\n self.cutoffs = []\n self.cutoffs.extend(cutoffs)\n self.tie_weight = tie_weight\n if proj_share_all_but_first:\n self.tie_projs = [False] + [True] * len(self.cutoffs)\n else:\n self.tie_projs = [False] + [False] * len(self.cutoffs)\n self.d_model = d_model\n self.d_embed = d_embed\n self.d_head = d_head\n self.d_inner = d_inner\n self.div_val = div_val\n self.pre_lnorm = pre_lnorm\n self.n_layer = n_layer\n self.n_head = n_head\n self.tgt_len = tgt_len\n self.ext_len = ext_len\n self.mem_len = mem_len\n self.same_length = same_length\n self.attn_type = attn_type\n self.clamp_len = clamp_len\n self.sample_softmax = sample_softmax\n self.adaptive = adaptive\n self.dropout = dropout\n self.dropatt = dropatt\n self.untie_r = untie_r\n self.init = init\n self.init_range = init_range\n self.proj_init_std = proj_init_std\n self.init_std = init_std\n self.layer_norm_epsilon = layer_norm_epsilon\n\n if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2\n and isinstance(vocab_size_or_config_json_file, unicode)):\n with open(vocab_size_or_config_json_file, \"r\", encoding='utf-8') as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif not isinstance(vocab_size_or_config_json_file, int):\n raise ValueError(\"First argument must be either a vocabulary size (int)\"\n \" or the path to a pretrained model config file (str)\")","function_tokens":["def","__init__","(","self",",","vocab_size_or_config_json_file","=","267735",",","cutoffs","=","[","20000",",","40000",",","200000","]",",","d_model","=","1024",",","d_embed","=","1024",",","n_head","=","16",",","d_head","=","64",",","d_inner","=","4096",",","div_val","=","4",",","pre_lnorm","=","False",",","n_layer","=","18",",","tgt_len","=","128",",","ext_len","=","0",",","mem_len","=","1600",",","clamp_len","=","1000",",","same_length","=","True",",","proj_share_all_but_first","=","True",",","attn_type","=","0",",","sample_softmax","=","-","1",",","adaptive","=","True",",","tie_weight","=","True",",","dropout","=","0.1",",","dropatt","=","0.0",",","untie_r","=","True",",","init","=","\"normal\"",",","init_range","=","0.01",",","proj_init_std","=","0.01",",","init_std","=","0.02",",","layer_norm_epsilon","=","1e-5",",","*","*","kwargs",")",":","super","(","TransfoXLConfig",",","self",")",".","__init__","(","*","*","kwargs",")","self",".","n_token","=","vocab_size_or_config_json_file","if","isinstance","(","vocab_size_or_config_json_file",",","int",")","else","-","1","self",".","cutoffs","=","[","]","self",".","cutoffs",".","extend","(","cutoffs",")","self",".","tie_weight","=","tie_weight","if","proj_share_all_but_first",":","self",".","tie_projs","=","[","False","]","+","[","True","]","*","len","(","self",".","cutoffs",")","else",":","self",".","tie_projs","=","[","False","]","+","[","False","]","*","len","(","self",".","cutoffs",")","self",".","d_model","=","d_model","self",".","d_embed","=","d_embed","self",".","d_head","=","d_head","self",".","d_inner","=","d_inner","self",".","div_val","=","div_val","self",".","pre_lnorm","=","pre_lnorm","self",".","n_layer","=","n_layer","self",".","n_head","=","n_head","self",".","tgt_len","=","tgt_len","self",".","ext_len","=","ext_len","self",".","mem_len","=","mem_len","self",".","same_length","=","same_length","self",".","attn_type","=","attn_type","self",".","clamp_len","=","clamp_len","self",".","sample_softmax","=","sample_softmax","self",".","adaptive","=","adaptive","self",".","dropout","=","dropout","self",".","dropatt","=","dropatt","self",".","untie_r","=","untie_r","self",".","init","=","init","self",".","init_range","=","init_range","self",".","proj_init_std","=","proj_init_std","self",".","init_std","=","init_std","self",".","layer_norm_epsilon","=","layer_norm_epsilon","if","isinstance","(","vocab_size_or_config_json_file",",","str",")","or","(","sys",".","version_info","[","0","]","==","2","and","isinstance","(","vocab_size_or_config_json_file",",","unicode",")",")",":","with","open","(","vocab_size_or_config_json_file",",","\"r\"",",","encoding","=","'utf-8'",")","as","reader",":","json_config","=","json",".","loads","(","reader",".","read","(",")",")","for","key",",","value","in","json_config",".","items","(",")",":","self",".","__dict__","[","key","]","=","value","elif","not","isinstance","(","vocab_size_or_config_json_file",",","int",")",":","raise","ValueError","(","\"First argument must be either a vocabulary size (int)\"","\" or the path to a pretrained model config file (str)\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_transfo_xl.py#L70-L144"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_auto.py","language":"python","identifier":"AutoModel.from_pretrained","parameters":"(cls, pretrained_model_name_or_path, *model_args, **kwargs)","argument_list":"","return_statement":"","docstring":"r\"\"\" Instantiates one of the base model classes of the library\n from a pre-trained model configuration.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertModel (DistilBERT model)\n - contains `roberta`: RobertaModel (RoBERTa model)\n - contains `bert`: BertModel (Bert model)\n - contains `openai-gpt`: OpenAIGPTModel (OpenAI GPT model)\n - contains `gpt2`: GPT2Model (OpenAI GPT-2 model)\n - contains `ctrl`: CTRLModel (Salesforce CTRL model)\n - contains `transfo-xl`: TransfoXLModel (Transformer-XL model)\n - contains `xlnet`: XLNetModel (XLNet model)\n - contains `xlm`: XLMModel (XLM model)\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with `model.train()`\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``.\/my_model_directory\/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `.\/tf_model\/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http:\/\/hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n model = AutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = AutoModel.from_pretrained('.\/test\/bert_model\/') # E.g. model was saved using `save_pretrained('.\/test\/saved_model\/')`\n model = AutoModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = AutoConfig.from_json_file('.\/tf_model\/bert_tf_model_config.json')\n model = AutoModel.from_pretrained('.\/tf_model\/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)","docstring_summary":"r\"\"\" Instantiates one of the base model classes of the library\n from a pre-trained model configuration.","docstring_tokens":["r","Instantiates","one","of","the","base","model","classes","of","the","library","from","a","pre","-","trained","model","configuration","."],"function":"def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n r\"\"\" Instantiates one of the base model classes of the library\n from a pre-trained model configuration.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertModel (DistilBERT model)\n - contains `roberta`: RobertaModel (RoBERTa model)\n - contains `bert`: BertModel (Bert model)\n - contains `openai-gpt`: OpenAIGPTModel (OpenAI GPT model)\n - contains `gpt2`: GPT2Model (OpenAI GPT-2 model)\n - contains `ctrl`: CTRLModel (Salesforce CTRL model)\n - contains `transfo-xl`: TransfoXLModel (Transformer-XL model)\n - contains `xlnet`: XLNetModel (XLNet model)\n - contains `xlm`: XLMModel (XLM model)\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with `model.train()`\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``.\/my_model_directory\/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `.\/tf_model\/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http:\/\/hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n model = AutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = AutoModel.from_pretrained('.\/test\/bert_model\/') # E.g. model was saved using `save_pretrained('.\/test\/saved_model\/')`\n model = AutoModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = AutoConfig.from_json_file('.\/tf_model\/bert_tf_model_config.json')\n model = AutoModel.from_pretrained('.\/tf_model\/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)\n\n \"\"\"\n if 'distilbert' in pretrained_model_name_or_path:\n return DistilBertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'roberta' in pretrained_model_name_or_path:\n return RobertaModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'bert' in pretrained_model_name_or_path:\n return BertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'openai-gpt' in pretrained_model_name_or_path:\n return OpenAIGPTModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'gpt2' in pretrained_model_name_or_path:\n return GPT2Model.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'transfo-xl' in pretrained_model_name_or_path:\n return TransfoXLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlnet' in pretrained_model_name_or_path:\n return XLNetModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlm' in pretrained_model_name_or_path:\n return XLMModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'ctrl' in pretrained_model_name_or_path:\n return CTRLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n raise ValueError(\"Unrecognized model identifier in {}. Should contains one of \"\n \"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', \"\n \"'xlm', 'roberta, 'ctrl'\".format(pretrained_model_name_or_path))","function_tokens":["def","from_pretrained","(","cls",",","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")",":","if","'distilbert'","in","pretrained_model_name_or_path",":","return","DistilBertModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'roberta'","in","pretrained_model_name_or_path",":","return","RobertaModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'bert'","in","pretrained_model_name_or_path",":","return","BertModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'openai-gpt'","in","pretrained_model_name_or_path",":","return","OpenAIGPTModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'gpt2'","in","pretrained_model_name_or_path",":","return","GPT2Model",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'transfo-xl'","in","pretrained_model_name_or_path",":","return","TransfoXLModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'xlnet'","in","pretrained_model_name_or_path",":","return","XLNetModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'xlm'","in","pretrained_model_name_or_path",":","return","XLMModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'ctrl'","in","pretrained_model_name_or_path",":","return","CTRLModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","raise","ValueError","(","\"Unrecognized model identifier in {}. Should contains one of \"","\"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', \"","\"'xlm', 'roberta, 'ctrl'\"",".","format","(","pretrained_model_name_or_path",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_auto.py#L67-L159"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_auto.py","language":"python","identifier":"AutoModelWithLMHead.from_pretrained","parameters":"(cls, pretrained_model_name_or_path, *model_args, **kwargs)","argument_list":"","return_statement":"","docstring":"r\"\"\" Instantiates one of the language modeling model classes of the library\n from a pre-trained model configuration.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n using pattern matching on the `pretrained_model_name_or_path` string.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertForMaskedLM (DistilBERT model)\n - contains `roberta`: RobertaForMaskedLM (RoBERTa model)\n - contains `bert`: BertForMaskedLM (Bert model)\n - contains `openai-gpt`: OpenAIGPTLMHeadModel (OpenAI GPT model)\n - contains `gpt2`: GPT2LMHeadModel (OpenAI GPT-2 model)\n - contains `transfo-xl`: TransfoXLLMHeadModel (Transformer-XL model)\n - contains `xlnet`: XLNetLMHeadModel (XLNet model)\n - contains `xlm`: XLMWithLMHeadModel (XLM model)\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with `model.train()`\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``.\/my_model_directory\/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `.\/tf_model\/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http:\/\/hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n model = AutoModelWithLMHead.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = AutoModelWithLMHead.from_pretrained('.\/test\/bert_model\/') # E.g. model was saved using `save_pretrained('.\/test\/saved_model\/')`\n model = AutoModelWithLMHead.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = AutoConfig.from_json_file('.\/tf_model\/bert_tf_model_config.json')\n model = AutoModelWithLMHead.from_pretrained('.\/tf_model\/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)","docstring_summary":"r\"\"\" Instantiates one of the language modeling model classes of the library\n from a pre-trained model configuration.","docstring_tokens":["r","Instantiates","one","of","the","language","modeling","model","classes","of","the","library","from","a","pre","-","trained","model","configuration","."],"function":"def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n r\"\"\" Instantiates one of the language modeling model classes of the library\n from a pre-trained model configuration.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n using pattern matching on the `pretrained_model_name_or_path` string.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertForMaskedLM (DistilBERT model)\n - contains `roberta`: RobertaForMaskedLM (RoBERTa model)\n - contains `bert`: BertForMaskedLM (Bert model)\n - contains `openai-gpt`: OpenAIGPTLMHeadModel (OpenAI GPT model)\n - contains `gpt2`: GPT2LMHeadModel (OpenAI GPT-2 model)\n - contains `transfo-xl`: TransfoXLLMHeadModel (Transformer-XL model)\n - contains `xlnet`: XLNetLMHeadModel (XLNet model)\n - contains `xlm`: XLMWithLMHeadModel (XLM model)\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with `model.train()`\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``.\/my_model_directory\/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `.\/tf_model\/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http:\/\/hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n model = AutoModelWithLMHead.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = AutoModelWithLMHead.from_pretrained('.\/test\/bert_model\/') # E.g. model was saved using `save_pretrained('.\/test\/saved_model\/')`\n model = AutoModelWithLMHead.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = AutoConfig.from_json_file('.\/tf_model\/bert_tf_model_config.json')\n model = AutoModelWithLMHead.from_pretrained('.\/tf_model\/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)\n\n \"\"\"\n if 'distilbert' in pretrained_model_name_or_path:\n return DistilBertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'roberta' in pretrained_model_name_or_path:\n return RobertaForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'bert' in pretrained_model_name_or_path:\n return BertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'openai-gpt' in pretrained_model_name_or_path:\n return OpenAIGPTLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'gpt2' in pretrained_model_name_or_path:\n return GPT2LMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'transfo-xl' in pretrained_model_name_or_path:\n return TransfoXLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlnet' in pretrained_model_name_or_path:\n return XLNetLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlm' in pretrained_model_name_or_path:\n return XLMWithLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'ctrl' in pretrained_model_name_or_path:\n return CTRLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n raise ValueError(\"Unrecognized model identifier in {}. Should contains one of \"\n \"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', \"\n \"'xlm', 'roberta','ctrl'\".format(pretrained_model_name_or_path))","function_tokens":["def","from_pretrained","(","cls",",","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")",":","if","'distilbert'","in","pretrained_model_name_or_path",":","return","DistilBertForMaskedLM",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'roberta'","in","pretrained_model_name_or_path",":","return","RobertaForMaskedLM",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'bert'","in","pretrained_model_name_or_path",":","return","BertForMaskedLM",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'openai-gpt'","in","pretrained_model_name_or_path",":","return","OpenAIGPTLMHeadModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'gpt2'","in","pretrained_model_name_or_path",":","return","GPT2LMHeadModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'transfo-xl'","in","pretrained_model_name_or_path",":","return","TransfoXLLMHeadModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'xlnet'","in","pretrained_model_name_or_path",":","return","XLNetLMHeadModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'xlm'","in","pretrained_model_name_or_path",":","return","XLMWithLMHeadModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'ctrl'","in","pretrained_model_name_or_path",":","return","CTRLLMHeadModel",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","raise","ValueError","(","\"Unrecognized model identifier in {}. Should contains one of \"","\"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', \"","\"'xlm', 'roberta','ctrl'\"",".","format","(","pretrained_model_name_or_path",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_auto.py#L191-L285"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_auto.py","language":"python","identifier":"AutoModelForSequenceClassification.from_pretrained","parameters":"(cls, pretrained_model_name_or_path, *model_args, **kwargs)","argument_list":"","return_statement":"","docstring":"r\"\"\" Instantiates one of the sequence classification model classes of the library\n from a pre-trained model configuration.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n using pattern matching on the `pretrained_model_name_or_path` string.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertForSequenceClassification (DistilBERT model)\n - contains `roberta`: RobertaForSequenceClassification (RoBERTa model)\n - contains `bert`: BertForSequenceClassification (Bert model)\n - contains `xlnet`: XLNetForSequenceClassification (XLNet model)\n - contains `xlm`: XLMForSequenceClassification (XLM model)\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with `model.train()`\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``.\/my_model_directory\/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `.\/tf_model\/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http:\/\/hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = AutoModelForSequenceClassification.from_pretrained('.\/test\/bert_model\/') # E.g. model was saved using `save_pretrained('.\/test\/saved_model\/')`\n model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = AutoConfig.from_json_file('.\/tf_model\/bert_tf_model_config.json')\n model = AutoModelForSequenceClassification.from_pretrained('.\/tf_model\/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)","docstring_summary":"r\"\"\" Instantiates one of the sequence classification model classes of the library\n from a pre-trained model configuration.","docstring_tokens":["r","Instantiates","one","of","the","sequence","classification","model","classes","of","the","library","from","a","pre","-","trained","model","configuration","."],"function":"def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n r\"\"\" Instantiates one of the sequence classification model classes of the library\n from a pre-trained model configuration.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n using pattern matching on the `pretrained_model_name_or_path` string.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertForSequenceClassification (DistilBERT model)\n - contains `roberta`: RobertaForSequenceClassification (RoBERTa model)\n - contains `bert`: BertForSequenceClassification (Bert model)\n - contains `xlnet`: XLNetForSequenceClassification (XLNet model)\n - contains `xlm`: XLMForSequenceClassification (XLM model)\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with `model.train()`\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``.\/my_model_directory\/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `.\/tf_model\/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http:\/\/hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = AutoModelForSequenceClassification.from_pretrained('.\/test\/bert_model\/') # E.g. model was saved using `save_pretrained('.\/test\/saved_model\/')`\n model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = AutoConfig.from_json_file('.\/tf_model\/bert_tf_model_config.json')\n model = AutoModelForSequenceClassification.from_pretrained('.\/tf_model\/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)\n\n \"\"\"\n if 'distilbert' in pretrained_model_name_or_path:\n return DistilBertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'roberta' in pretrained_model_name_or_path:\n return RobertaForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'bert' in pretrained_model_name_or_path:\n return BertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlnet' in pretrained_model_name_or_path:\n return XLNetForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlm' in pretrained_model_name_or_path:\n return XLMForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n\n raise ValueError(\"Unrecognized model identifier in {}. Should contains one of \"\n \"'bert', 'xlnet', 'xlm', 'roberta'\".format(pretrained_model_name_or_path))","function_tokens":["def","from_pretrained","(","cls",",","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")",":","if","'distilbert'","in","pretrained_model_name_or_path",":","return","DistilBertForSequenceClassification",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'roberta'","in","pretrained_model_name_or_path",":","return","RobertaForSequenceClassification",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'bert'","in","pretrained_model_name_or_path",":","return","BertForSequenceClassification",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'xlnet'","in","pretrained_model_name_or_path",":","return","XLNetForSequenceClassification",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'xlm'","in","pretrained_model_name_or_path",":","return","XLMForSequenceClassification",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","raise","ValueError","(","\"Unrecognized model identifier in {}. Should contains one of \"","\"'bert', 'xlnet', 'xlm', 'roberta'\"",".","format","(","pretrained_model_name_or_path",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_auto.py#L313-L396"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_auto.py","language":"python","identifier":"AutoModelForQuestionAnswering.from_pretrained","parameters":"(cls, pretrained_model_name_or_path, *model_args, **kwargs)","argument_list":"","return_statement":"","docstring":"r\"\"\" Instantiates one of the question answering model classes of the library\n from a pre-trained model configuration.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n using pattern matching on the `pretrained_model_name_or_path` string.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertForQuestionAnswering (DistilBERT model)\n - contains `bert`: BertForQuestionAnswering (Bert model)\n - contains `xlnet`: XLNetForQuestionAnswering (XLNet model)\n - contains `xlm`: XLMForQuestionAnswering (XLM model)\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with `model.train()`\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``.\/my_model_directory\/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `.\/tf_model\/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http:\/\/hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = AutoModelForQuestionAnswering.from_pretrained('.\/test\/bert_model\/') # E.g. model was saved using `save_pretrained('.\/test\/saved_model\/')`\n model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = AutoConfig.from_json_file('.\/tf_model\/bert_tf_model_config.json')\n model = AutoModelForQuestionAnswering.from_pretrained('.\/tf_model\/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)","docstring_summary":"r\"\"\" Instantiates one of the question answering model classes of the library\n from a pre-trained model configuration.","docstring_tokens":["r","Instantiates","one","of","the","question","answering","model","classes","of","the","library","from","a","pre","-","trained","model","configuration","."],"function":"def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n r\"\"\" Instantiates one of the question answering model classes of the library\n from a pre-trained model configuration.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n using pattern matching on the `pretrained_model_name_or_path` string.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertForQuestionAnswering (DistilBERT model)\n - contains `bert`: BertForQuestionAnswering (Bert model)\n - contains `xlnet`: XLNetForQuestionAnswering (XLNet model)\n - contains `xlm`: XLMForQuestionAnswering (XLM model)\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with `model.train()`\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``.\/my_model_directory\/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `.\/tf_model\/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http:\/\/hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = AutoModelForQuestionAnswering.from_pretrained('.\/test\/bert_model\/') # E.g. model was saved using `save_pretrained('.\/test\/saved_model\/')`\n model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = AutoConfig.from_json_file('.\/tf_model\/bert_tf_model_config.json')\n model = AutoModelForQuestionAnswering.from_pretrained('.\/tf_model\/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)\n\n \"\"\"\n if 'distilbert' in pretrained_model_name_or_path:\n return DistilBertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'bert' in pretrained_model_name_or_path:\n return BertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlnet' in pretrained_model_name_or_path:\n return XLNetForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlm' in pretrained_model_name_or_path:\n return XLMForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n\n raise ValueError(\"Unrecognized model identifier in {}. Should contains one of \"\n \"'bert', 'xlnet', 'xlm'\".format(pretrained_model_name_or_path))","function_tokens":["def","from_pretrained","(","cls",",","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")",":","if","'distilbert'","in","pretrained_model_name_or_path",":","return","DistilBertForQuestionAnswering",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'bert'","in","pretrained_model_name_or_path",":","return","BertForQuestionAnswering",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'xlnet'","in","pretrained_model_name_or_path",":","return","XLNetForQuestionAnswering",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","elif","'xlm'","in","pretrained_model_name_or_path",":","return","XLMForQuestionAnswering",".","from_pretrained","(","pretrained_model_name_or_path",",","*","model_args",",","*","*","kwargs",")","raise","ValueError","(","\"Unrecognized model identifier in {}. Should contains one of \"","\"'bert', 'xlnet', 'xlm'\"",".","format","(","pretrained_model_name_or_path",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_auto.py#L423-L503"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlm.py","language":"python","identifier":"gelu","parameters":"(x)","argument_list":"","return_statement":"return 0.5 * x * (1.0 + torch.erf(x \/ math.sqrt(2.0)))","docstring":"GELU activation\n https:\/\/arxiv.org\/abs\/1606.08415\n https:\/\/github.com\/huggingface\/pytorch-openai-transformer-lm\/blob\/master\/model_pytorch.py#L14\n https:\/\/github.com\/huggingface\/transformers\/blob\/master\/modeling.py","docstring_summary":"GELU activation\n https:\/\/arxiv.org\/abs\/1606.08415\n https:\/\/github.com\/huggingface\/pytorch-openai-transformer-lm\/blob\/master\/model_pytorch.py#L14\n https:\/\/github.com\/huggingface\/transformers\/blob\/master\/modeling.py","docstring_tokens":["GELU","activation","https",":","\/\/","arxiv",".","org","\/","abs","\/","1606",".","08415","https",":","\/\/","github",".","com","\/","huggingface","\/","pytorch","-","openai","-","transformer","-","lm","\/","blob","\/","master","\/","model_pytorch",".","py#L14","https",":","\/\/","github",".","com","\/","huggingface","\/","transformers","\/","blob","\/","master","\/","modeling",".","py"],"function":"def gelu(x):\n \"\"\"\n GELU activation\n https:\/\/arxiv.org\/abs\/1606.08415\n https:\/\/github.com\/huggingface\/pytorch-openai-transformer-lm\/blob\/master\/model_pytorch.py#L14\n https:\/\/github.com\/huggingface\/transformers\/blob\/master\/modeling.py\n \"\"\"\n # return 0.5 * x * (1 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n return 0.5 * x * (1.0 + torch.erf(x \/ math.sqrt(2.0)))","function_tokens":["def","gelu","(","x",")",":","# return 0.5 * x * (1 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))","return","0.5","*","x","*","(","1.0","+","torch",".","erf","(","x","\/","math",".","sqrt","(","2.0",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlm.py#L61-L69"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlm.py","language":"python","identifier":"get_masks","parameters":"(slen, lengths, causal, padding_mask=None)","argument_list":"","return_statement":"return mask, attn_mask","docstring":"Generate hidden states mask, and optionally an attention mask.","docstring_summary":"Generate hidden states mask, and optionally an attention mask.","docstring_tokens":["Generate","hidden","states","mask","and","optionally","an","attention","mask","."],"function":"def get_masks(slen, lengths, causal, padding_mask=None):\n \"\"\"\n Generate hidden states mask, and optionally an attention mask.\n \"\"\"\n bs = lengths.size(0)\n if padding_mask is not None:\n mask = padding_mask\n else:\n assert lengths.max().item() <= slen\n alen = torch.arange(slen, dtype=torch.long, device=lengths.device)\n mask = alen < lengths[:, None]\n\n # attention mask is the same as mask, or triangular inferior attention (causal)\n if causal:\n attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]\n else:\n attn_mask = mask\n\n # sanity check\n assert mask.size() == (bs, slen)\n assert causal is False or attn_mask.size() == (bs, slen, slen)\n\n return mask, attn_mask","function_tokens":["def","get_masks","(","slen",",","lengths",",","causal",",","padding_mask","=","None",")",":","bs","=","lengths",".","size","(","0",")","if","padding_mask","is","not","None",":","mask","=","padding_mask","else",":","assert","lengths",".","max","(",")",".","item","(",")","<=","slen","alen","=","torch",".","arange","(","slen",",","dtype","=","torch",".","long",",","device","=","lengths",".","device",")","mask","=","alen","<","lengths","[",":",",","None","]","# attention mask is the same as mask, or triangular inferior attention (causal)","if","causal",":","attn_mask","=","alen","[","None",",","None",",",":","]",".","repeat","(","bs",",","slen",",","1",")","<=","alen","[","None",",",":",",","None","]","else",":","attn_mask","=","mask","# sanity check","assert","mask",".","size","(",")","==","(","bs",",","slen",")","assert","causal","is","False","or","attn_mask",".","size","(",")","==","(","bs",",","slen",",","slen",")","return","mask",",","attn_mask"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlm.py#L72-L94"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlm.py","language":"python","identifier":"MultiHeadAttention.forward","parameters":"(self, input, mask, kv=None, cache=None, head_mask=None)","argument_list":"","return_statement":"return outputs","docstring":"Self-attention (if kv is None) or attention over source sentence (provided by kv).","docstring_summary":"Self-attention (if kv is None) or attention over source sentence (provided by kv).","docstring_tokens":["Self","-","attention","(","if","kv","is","None",")","or","attention","over","source","sentence","(","provided","by","kv",")","."],"function":"def forward(self, input, mask, kv=None, cache=None, head_mask=None):\n \"\"\"\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n \"\"\"\n # Input is (bs, qlen, dim)\n # Mask is (bs, klen) (non-causal) or (bs, klen, klen)\n bs, qlen, dim = input.size()\n if kv is None:\n klen = qlen if cache is None else cache['slen'] + qlen\n else:\n klen = kv.size(1)\n # assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)\n n_heads = self.n_heads\n dim_per_head = self.dim \/\/ n_heads\n mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)\n\n def shape(x):\n \"\"\" projection \"\"\"\n return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)\n\n def unshape(x):\n \"\"\" compute context \"\"\"\n return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)\n\n q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n if kv is None:\n k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n elif cache is None or self.layer_id not in cache:\n k = v = kv\n k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)\n\n if cache is not None:\n if self.layer_id in cache:\n if kv is None:\n k_, v_ = cache[self.layer_id]\n k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)\n v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)\n else:\n k, v = cache[self.layer_id]\n cache[self.layer_id] = (k, v)\n\n q = q \/ math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)\n scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)\n mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)\n scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)\n\n weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)\n weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)\n\n # Mask heads if we want to\n if head_mask is not None:\n weights = weights * head_mask\n\n context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)\n context = unshape(context) # (bs, qlen, dim)\n\n outputs = (self.out_lin(context),)\n if self.output_attentions:\n outputs = outputs + (weights,)\n return outputs","function_tokens":["def","forward","(","self",",","input",",","mask",",","kv","=","None",",","cache","=","None",",","head_mask","=","None",")",":","# Input is (bs, qlen, dim)","# Mask is (bs, klen) (non-causal) or (bs, klen, klen)","bs",",","qlen",",","dim","=","input",".","size","(",")","if","kv","is","None",":","klen","=","qlen","if","cache","is","None","else","cache","[","'slen'","]","+","qlen","else",":","klen","=","kv",".","size","(","1",")","# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)","n_heads","=","self",".","n_heads","dim_per_head","=","self",".","dim","\/\/","n_heads","mask_reshape","=","(","bs",",","1",",","qlen",",","klen",")","if","mask",".","dim","(",")","==","3","else","(","bs",",","1",",","1",",","klen",")","def","shape","(","x",")",":","\"\"\" projection \"\"\"","return","x",".","view","(","bs",",","-","1",",","self",".","n_heads",",","dim_per_head",")",".","transpose","(","1",",","2",")","def","unshape","(","x",")",":","\"\"\" compute context \"\"\"","return","x",".","transpose","(","1",",","2",")",".","contiguous","(",")",".","view","(","bs",",","-","1",",","self",".","n_heads","*","dim_per_head",")","q","=","shape","(","self",".","q_lin","(","input",")",")","# (bs, n_heads, qlen, dim_per_head)","if","kv","is","None",":","k","=","shape","(","self",".","k_lin","(","input",")",")","# (bs, n_heads, qlen, dim_per_head)","v","=","shape","(","self",".","v_lin","(","input",")",")","# (bs, n_heads, qlen, dim_per_head)","elif","cache","is","None","or","self",".","layer_id","not","in","cache",":","k","=","v","=","kv","k","=","shape","(","self",".","k_lin","(","k",")",")","# (bs, n_heads, qlen, dim_per_head)","v","=","shape","(","self",".","v_lin","(","v",")",")","# (bs, n_heads, qlen, dim_per_head)","if","cache","is","not","None",":","if","self",".","layer_id","in","cache",":","if","kv","is","None",":","k_",",","v_","=","cache","[","self",".","layer_id","]","k","=","torch",".","cat","(","[","k_",",","k","]",",","dim","=","2",")","# (bs, n_heads, klen, dim_per_head)","v","=","torch",".","cat","(","[","v_",",","v","]",",","dim","=","2",")","# (bs, n_heads, klen, dim_per_head)","else",":","k",",","v","=","cache","[","self",".","layer_id","]","cache","[","self",".","layer_id","]","=","(","k",",","v",")","q","=","q","\/","math",".","sqrt","(","dim_per_head",")","# (bs, n_heads, qlen, dim_per_head)","scores","=","torch",".","matmul","(","q",",","k",".","transpose","(","2",",","3",")",")","# (bs, n_heads, qlen, klen)","mask","=","(","mask","==","0",")",".","view","(","mask_reshape",")",".","expand_as","(","scores",")","# (bs, n_heads, qlen, klen)","scores",".","masked_fill_","(","mask",",","-","float","(","'inf'",")",")","# (bs, n_heads, qlen, klen)","weights","=","F",".","softmax","(","scores",".","float","(",")",",","dim","=","-","1",")",".","type_as","(","scores",")","# (bs, n_heads, qlen, klen)","weights","=","F",".","dropout","(","weights",",","p","=","self",".","dropout",",","training","=","self",".","training",")","# (bs, n_heads, qlen, klen)","# Mask heads if we want to","if","head_mask","is","not","None",":","weights","=","weights","*","head_mask","context","=","torch",".","matmul","(","weights",",","v",")","# (bs, n_heads, qlen, dim_per_head)","context","=","unshape","(","context",")","# (bs, qlen, dim)","outputs","=","(","self",".","out_lin","(","context",")",",",")","if","self",".","output_attentions",":","outputs","=","outputs","+","(","weights",",",")","return","outputs"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlm.py#L137-L198"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlm.py","language":"python","identifier":"XLMPreTrainedModel._init_weights","parameters":"(self, module)","argument_list":"","return_statement":"","docstring":"Initialize the weights.","docstring_summary":"Initialize the weights.","docstring_tokens":["Initialize","the","weights","."],"function":"def _init_weights(self, module):\n \"\"\" Initialize the weights. \"\"\"\n if isinstance(module, nn.Embedding):\n if self.config is not None and self.config.embed_init_std is not None:\n nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std)\n if isinstance(module, nn.Linear):\n if self.config is not None and self.config.init_std is not None:\n nn.init.normal_(module.weight, mean=0, std=self.config.init_std)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, 0.)\n if isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)","function_tokens":["def","_init_weights","(","self",",","module",")",":","if","isinstance","(","module",",","nn",".","Embedding",")",":","if","self",".","config","is","not","None","and","self",".","config",".","embed_init_std","is","not","None",":","nn",".","init",".","normal_","(","module",".","weight",",","mean","=","0",",","std","=","self",".","config",".","embed_init_std",")","if","isinstance","(","module",",","nn",".","Linear",")",":","if","self",".","config","is","not","None","and","self",".","config",".","init_std","is","not","None",":","nn",".","init",".","normal_","(","module",".","weight",",","mean","=","0",",","std","=","self",".","config",".","init_std",")","if","hasattr","(","module",",","'bias'",")","and","module",".","bias","is","not","None",":","nn",".","init",".","constant_","(","module",".","bias",",","0.",")","if","isinstance","(","module",",","nn",".","LayerNorm",")",":","module",".","bias",".","data",".","zero_","(",")","module",".","weight",".","data",".","fill_","(","1.0",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlm.py#L230-L242"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlm.py","language":"python","identifier":"XLMPredLayer.forward","parameters":"(self, x, y=None)","argument_list":"","return_statement":"return outputs","docstring":"Compute the loss, and optionally the scores.","docstring_summary":"Compute the loss, and optionally the scores.","docstring_tokens":["Compute","the","loss","and","optionally","the","scores","."],"function":"def forward(self, x, y=None):\n \"\"\" Compute the loss, and optionally the scores.\n \"\"\"\n outputs = ()\n if self.asm is False:\n scores = self.proj(x)\n outputs = (scores,) + outputs\n if y is not None:\n loss = F.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction='elementwise_mean')\n outputs = (loss,) + outputs\n else:\n scores = self.proj.log_prob(x)\n outputs = (scores,) + outputs\n if y is not None:\n _, loss = self.proj(x, y)\n outputs = (loss,) + outputs\n\n return outputs","function_tokens":["def","forward","(","self",",","x",",","y","=","None",")",":","outputs","=","(",")","if","self",".","asm","is","False",":","scores","=","self",".","proj","(","x",")","outputs","=","(","scores",",",")","+","outputs","if","y","is","not","None",":","loss","=","F",".","cross_entropy","(","scores",".","view","(","-","1",",","self",".","n_words",")",",","y",".","view","(","-","1",")",",","reduction","=","'elementwise_mean'",")","outputs","=","(","loss",",",")","+","outputs","else",":","scores","=","self",".","proj",".","log_prob","(","x",")","outputs","=","(","scores",",",")","+","outputs","if","y","is","not","None",":","_",",","loss","=","self",".","proj","(","x",",","y",")","outputs","=","(","loss",",",")","+","outputs","return","outputs"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_xlm.py#L561-L578"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_openai.py","language":"python","identifier":"OpenAIGPTConfig.__init__","parameters":"(\n self,\n vocab_size_or_config_json_file=40478,\n n_positions=512,\n n_ctx=512,\n n_embd=768,\n n_layer=12,\n n_head=12,\n afn=\"gelu\",\n resid_pdrop=0.1,\n embd_pdrop=0.1,\n attn_pdrop=0.1,\n layer_norm_epsilon=1e-5,\n initializer_range=0.02,\n predict_special_tokens=True,\n\n num_labels=1,\n summary_type='cls_index',\n summary_use_proj=True,\n summary_activation=None,\n summary_proj_to_labels=True,\n summary_first_dropout=0.1,\n **kwargs\n )","argument_list":"","return_statement":"","docstring":"Constructs OpenAIGPTConfig.","docstring_summary":"Constructs OpenAIGPTConfig.","docstring_tokens":["Constructs","OpenAIGPTConfig","."],"function":"def __init__(\n self,\n vocab_size_or_config_json_file=40478,\n n_positions=512,\n n_ctx=512,\n n_embd=768,\n n_layer=12,\n n_head=12,\n afn=\"gelu\",\n resid_pdrop=0.1,\n embd_pdrop=0.1,\n attn_pdrop=0.1,\n layer_norm_epsilon=1e-5,\n initializer_range=0.02,\n predict_special_tokens=True,\n\n num_labels=1,\n summary_type='cls_index',\n summary_use_proj=True,\n summary_activation=None,\n summary_proj_to_labels=True,\n summary_first_dropout=0.1,\n **kwargs\n ):\n \"\"\"Constructs OpenAIGPTConfig.\n \"\"\"\n super(OpenAIGPTConfig, self).__init__(**kwargs)\n\n if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2\n and isinstance(vocab_size_or_config_json_file, unicode)):\n with open(vocab_size_or_config_json_file, \"r\", encoding=\"utf-8\") as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif isinstance(vocab_size_or_config_json_file, int):\n self.vocab_size = vocab_size_or_config_json_file\n self.n_ctx = n_ctx\n self.n_positions = n_positions\n self.n_embd = n_embd\n self.n_layer = n_layer\n self.n_head = n_head\n self.afn = afn\n self.resid_pdrop = resid_pdrop\n self.embd_pdrop = embd_pdrop\n self.attn_pdrop = attn_pdrop\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_range = initializer_range\n self.predict_special_tokens = predict_special_tokens\n\n self.num_labels = num_labels\n self.summary_type = summary_type\n self.summary_use_proj = summary_use_proj\n self.summary_activation = summary_activation\n self.summary_first_dropout = summary_first_dropout\n self.summary_proj_to_labels = summary_proj_to_labels\n else:\n raise ValueError(\n \"First argument must be either a vocabulary size (int)\"\n \"or the path to a pretrained model config file (str)\"\n )","function_tokens":["def","__init__","(","self",",","vocab_size_or_config_json_file","=","40478",",","n_positions","=","512",",","n_ctx","=","512",",","n_embd","=","768",",","n_layer","=","12",",","n_head","=","12",",","afn","=","\"gelu\"",",","resid_pdrop","=","0.1",",","embd_pdrop","=","0.1",",","attn_pdrop","=","0.1",",","layer_norm_epsilon","=","1e-5",",","initializer_range","=","0.02",",","predict_special_tokens","=","True",",","num_labels","=","1",",","summary_type","=","'cls_index'",",","summary_use_proj","=","True",",","summary_activation","=","None",",","summary_proj_to_labels","=","True",",","summary_first_dropout","=","0.1",",","*","*","kwargs",")",":","super","(","OpenAIGPTConfig",",","self",")",".","__init__","(","*","*","kwargs",")","if","isinstance","(","vocab_size_or_config_json_file",",","str",")","or","(","sys",".","version_info","[","0","]","==","2","and","isinstance","(","vocab_size_or_config_json_file",",","unicode",")",")",":","with","open","(","vocab_size_or_config_json_file",",","\"r\"",",","encoding","=","\"utf-8\"",")","as","reader",":","json_config","=","json",".","loads","(","reader",".","read","(",")",")","for","key",",","value","in","json_config",".","items","(",")",":","self",".","__dict__","[","key","]","=","value","elif","isinstance","(","vocab_size_or_config_json_file",",","int",")",":","self",".","vocab_size","=","vocab_size_or_config_json_file","self",".","n_ctx","=","n_ctx","self",".","n_positions","=","n_positions","self",".","n_embd","=","n_embd","self",".","n_layer","=","n_layer","self",".","n_head","=","n_head","self",".","afn","=","afn","self",".","resid_pdrop","=","resid_pdrop","self",".","embd_pdrop","=","embd_pdrop","self",".","attn_pdrop","=","attn_pdrop","self",".","layer_norm_epsilon","=","layer_norm_epsilon","self",".","initializer_range","=","initializer_range","self",".","predict_special_tokens","=","predict_special_tokens","self",".","num_labels","=","num_labels","self",".","summary_type","=","summary_type","self",".","summary_use_proj","=","summary_use_proj","self",".","summary_activation","=","summary_activation","self",".","summary_first_dropout","=","summary_first_dropout","self",".","summary_proj_to_labels","=","summary_proj_to_labels","else",":","raise","ValueError","(","\"First argument must be either a vocabulary size (int)\"","\"or the path to a pretrained model config file (str)\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_openai.py#L59-L118"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_bert.py","language":"python","identifier":"load_tf_weights_in_bert","parameters":"(model, config, tf_checkpoint_path)","argument_list":"","return_statement":"return model","docstring":"Load tf checkpoints in a pytorch model.","docstring_summary":"Load tf checkpoints in a pytorch model.","docstring_tokens":["Load","tf","checkpoints","in","a","pytorch","model","."],"function":"def load_tf_weights_in_bert(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https:\/\/www.tensorflow.org\/install\/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split('\/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\", \"global_step\"] for n in name):\n logger.info(\"Skipping {}\".format(\"\/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'squad':\n pointer = getattr(pointer, 'classifier')\n else:\n try:\n pointer = getattr(pointer, l[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"\/\".join(name)))\n continue\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model","function_tokens":["def","load_tf_weights_in_bert","(","model",",","config",",","tf_checkpoint_path",")",":","try",":","import","re","import","numpy","as","np","import","tensorflow","as","tf","except","ImportError",":","logger",".","error","(","\"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"","\"https:\/\/www.tensorflow.org\/install\/ for installation instructions.\"",")","raise","tf_path","=","os",".","path",".","abspath","(","tf_checkpoint_path",")","logger",".","info","(","\"Converting TensorFlow checkpoint from {}\"",".","format","(","tf_path",")",")","# Load weights from TF model","init_vars","=","tf",".","train",".","list_variables","(","tf_path",")","names","=","[","]","arrays","=","[","]","for","name",",","shape","in","init_vars",":","logger",".","info","(","\"Loading TF weight {} with shape {}\"",".","format","(","name",",","shape",")",")","array","=","tf",".","train",".","load_variable","(","tf_path",",","name",")","names",".","append","(","name",")","arrays",".","append","(","array",")","for","name",",","array","in","zip","(","names",",","arrays",")",":","name","=","name",".","split","(","'\/'",")","# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v","# which are not required for using pretrained model","if","any","(","n","in","[","\"adam_v\"",",","\"adam_m\"",",","\"global_step\"","]","for","n","in","name",")",":","logger",".","info","(","\"Skipping {}\"",".","format","(","\"\/\"",".","join","(","name",")",")",")","continue","pointer","=","model","for","m_name","in","name",":","if","re",".","fullmatch","(","r'[A-Za-z]+_\\d+'",",","m_name",")",":","l","=","re",".","split","(","r'_(\\d+)'",",","m_name",")","else",":","l","=","[","m_name","]","if","l","[","0","]","==","'kernel'","or","l","[","0","]","==","'gamma'",":","pointer","=","getattr","(","pointer",",","'weight'",")","elif","l","[","0","]","==","'output_bias'","or","l","[","0","]","==","'beta'",":","pointer","=","getattr","(","pointer",",","'bias'",")","elif","l","[","0","]","==","'output_weights'",":","pointer","=","getattr","(","pointer",",","'weight'",")","elif","l","[","0","]","==","'squad'",":","pointer","=","getattr","(","pointer",",","'classifier'",")","else",":","try",":","pointer","=","getattr","(","pointer",",","l","[","0","]",")","except","AttributeError",":","logger",".","info","(","\"Skipping {}\"",".","format","(","\"\/\"",".","join","(","name",")",")",")","continue","if","len","(","l",")",">=","2",":","num","=","int","(","l","[","1","]",")","pointer","=","pointer","[","num","]","if","m_name","[","-","11",":","]","==","'_embeddings'",":","pointer","=","getattr","(","pointer",",","'weight'",")","elif","m_name","==","'kernel'",":","array","=","np",".","transpose","(","array",")","try",":","assert","pointer",".","shape","==","array",".","shape","except","AssertionError","as","e",":","e",".","args","+=","(","pointer",".","shape",",","array",".","shape",")","raise","logger",".","info","(","\"Initialize PyTorch weight {}\"",".","format","(","name",")",")","pointer",".","data","=","torch",".","from_numpy","(","array",")","return","model"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_bert.py#L55-L119"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_bert.py","language":"python","identifier":"gelu","parameters":"(x)","argument_list":"","return_statement":"return x * 0.5 * (1.0 + torch.erf(x \/ math.sqrt(2.0)))","docstring":"Original Implementation of the gelu activation function in Google Bert repo when initially created.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https:\/\/arxiv.org\/abs\/1606.08415","docstring_summary":"Original Implementation of the gelu activation function in Google Bert repo when initially created.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https:\/\/arxiv.org\/abs\/1606.08415","docstring_tokens":["Original","Implementation","of","the","gelu","activation","function","in","Google","Bert","repo","when","initially","created",".","For","information",":","OpenAI","GPT","s","gelu","is","slightly","different","(","and","gives","slightly","different","results",")",":","0",".","5","*","x","*","(","1","+","torch",".","tanh","(","math",".","sqrt","(","2","\/","math",".","pi",")","*","(","x","+","0",".","044715","*","torch",".","pow","(","x","3","))))","Also","see","https",":","\/\/","arxiv",".","org","\/","abs","\/","1606",".","08415"],"function":"def gelu(x):\n \"\"\" Original Implementation of the gelu activation function in Google Bert repo when initially created.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https:\/\/arxiv.org\/abs\/1606.08415\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x \/ math.sqrt(2.0)))","function_tokens":["def","gelu","(","x",")",":","return","x","*","0.5","*","(","1.0","+","torch",".","erf","(","x","\/","math",".","sqrt","(","2.0",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_bert.py#L122-L128"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_bert.py","language":"python","identifier":"gelu_new","parameters":"(x)","argument_list":"","return_statement":"return 0.5 * x * (1 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))","docstring":"Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).\n Also see https:\/\/arxiv.org\/abs\/1606.08415","docstring_summary":"Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).\n Also see https:\/\/arxiv.org\/abs\/1606.08415","docstring_tokens":["Implementation","of","the","gelu","activation","function","currently","in","Google","Bert","repo","(","identical","to","OpenAI","GPT",")",".","Also","see","https",":","\/\/","arxiv",".","org","\/","abs","\/","1606",".","08415"],"function":"def gelu_new(x):\n \"\"\" Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).\n Also see https:\/\/arxiv.org\/abs\/1606.08415\n \"\"\"\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 \/ math.pi) * (x + 0.044715 * torch.pow(x, 3))))","function_tokens":["def","gelu_new","(","x",")",":","return","0.5","*","x","*","(","1","+","torch",".","tanh","(","math",".","sqrt","(","2","\/","math",".","pi",")","*","(","x","+","0.044715","*","torch",".","pow","(","x",",","3",")",")",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_bert.py#L130-L134"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_bert.py","language":"python","identifier":"BertPreTrainedModel._init_weights","parameters":"(self, module)","argument_list":"","return_statement":"","docstring":"Initialize the weights","docstring_summary":"Initialize the weights","docstring_tokens":["Initialize","the","weights"],"function":"def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()","function_tokens":["def","_init_weights","(","self",",","module",")",":","if","isinstance","(","module",",","(","nn",".","Linear",",","nn",".","Embedding",")",")",":","# Slightly different from the TF version which uses truncated_normal for initialization","# cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","elif","isinstance","(","module",",","BertLayerNorm",")",":","module",".","bias",".","data",".","zero_","(",")","module",".","weight",".","data",".","fill_","(","1.0",")","if","isinstance","(","module",",","nn",".","Linear",")","and","module",".","bias","is","not","None",":","module",".","bias",".","data",".","zero_","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_bert.py#L458-L468"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_auto.py","language":"python","identifier":"AutoConfig.from_pretrained","parameters":"(cls, pretrained_model_name_or_path, **kwargs)","argument_list":"","return_statement":"","docstring":"r\"\"\" Instantiate a one of the configuration classes of the library\n from a pre-trained model configuration.\n\n The configuration class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertConfig (DistilBERT model)\n - contains `bert`: BertConfig (Bert model)\n - contains `openai-gpt`: OpenAIGPTConfig (OpenAI GPT model)\n - contains `gpt2`: GPT2Config (OpenAI GPT-2 model)\n - contains `transfo-xl`: TransfoXLConfig (Transformer-XL model)\n - contains `xlnet`: XLNetConfig (XLNet model)\n - contains `xlm`: XLMConfig (XLM model)\n - contains `roberta`: RobertaConfig (RoBERTa model)\n - contains `ctrl` : CTRLConfig (CTRL model)\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model configuration to load from cache or download, e.g.: ``bert-base-uncased``.\n - a path to a `directory` containing a configuration file saved using the :func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``.\/my_model_directory\/``.\n - a path or url to a saved configuration JSON `file`, e.g.: ``.\/my_model_directory\/configuration.json``.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n kwargs: (`optional`) dict: key\/value pairs with which to update the configuration object after loading.\n\n - The values in kwargs of any keys which are configuration attributes will be used to override the loaded values.\n - Behavior concerning key\/value pairs whose keys are *not* configuration attributes is controlled by the `return_unused_kwargs` keyword parameter.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http:\/\/hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n return_unused_kwargs: (`optional`) bool:\n\n - If False, then this function returns just the final configuration object.\n - If True, then this functions returns a tuple `(config, unused_kwargs)` where `unused_kwargs` is a dictionary consisting of the key\/value pairs whose keys are not configuration attributes: ie the part of kwargs which has not been used to update `config` and is otherwise ignored.\n\n Examples::\n\n config = AutoConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.\n config = AutoConfig.from_pretrained('.\/test\/bert_saved_model\/') # E.g. config (or model) was saved using `save_pretrained('.\/test\/saved_model\/')`\n config = AutoConfig.from_pretrained('.\/test\/bert_saved_model\/my_configuration.json')\n config = AutoConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)\n assert config.output_attention == True\n config, unused_kwargs = AutoConfig.from_pretrained('bert-base-uncased', output_attention=True,\n foo=False, return_unused_kwargs=True)\n assert config.output_attention == True\n assert unused_kwargs == {'foo': False}","docstring_summary":"r\"\"\" Instantiate a one of the configuration classes of the library\n from a pre-trained model configuration.","docstring_tokens":["r","Instantiate","a","one","of","the","configuration","classes","of","the","library","from","a","pre","-","trained","model","configuration","."],"function":"def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):\n r\"\"\" Instantiate a one of the configuration classes of the library\n from a pre-trained model configuration.\n\n The configuration class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertConfig (DistilBERT model)\n - contains `bert`: BertConfig (Bert model)\n - contains `openai-gpt`: OpenAIGPTConfig (OpenAI GPT model)\n - contains `gpt2`: GPT2Config (OpenAI GPT-2 model)\n - contains `transfo-xl`: TransfoXLConfig (Transformer-XL model)\n - contains `xlnet`: XLNetConfig (XLNet model)\n - contains `xlm`: XLMConfig (XLM model)\n - contains `roberta`: RobertaConfig (RoBERTa model)\n - contains `ctrl` : CTRLConfig (CTRL model)\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model configuration to load from cache or download, e.g.: ``bert-base-uncased``.\n - a path to a `directory` containing a configuration file saved using the :func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``.\/my_model_directory\/``.\n - a path or url to a saved configuration JSON `file`, e.g.: ``.\/my_model_directory\/configuration.json``.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n kwargs: (`optional`) dict: key\/value pairs with which to update the configuration object after loading.\n\n - The values in kwargs of any keys which are configuration attributes will be used to override the loaded values.\n - Behavior concerning key\/value pairs whose keys are *not* configuration attributes is controlled by the `return_unused_kwargs` keyword parameter.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http:\/\/hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n return_unused_kwargs: (`optional`) bool:\n\n - If False, then this function returns just the final configuration object.\n - If True, then this functions returns a tuple `(config, unused_kwargs)` where `unused_kwargs` is a dictionary consisting of the key\/value pairs whose keys are not configuration attributes: ie the part of kwargs which has not been used to update `config` and is otherwise ignored.\n\n Examples::\n\n config = AutoConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.\n config = AutoConfig.from_pretrained('.\/test\/bert_saved_model\/') # E.g. config (or model) was saved using `save_pretrained('.\/test\/saved_model\/')`\n config = AutoConfig.from_pretrained('.\/test\/bert_saved_model\/my_configuration.json')\n config = AutoConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)\n assert config.output_attention == True\n config, unused_kwargs = AutoConfig.from_pretrained('bert-base-uncased', output_attention=True,\n foo=False, return_unused_kwargs=True)\n assert config.output_attention == True\n assert unused_kwargs == {'foo': False}\n\n \"\"\"\n if 'distilbert' in pretrained_model_name_or_path:\n return DistilBertConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)\n elif 'roberta' in pretrained_model_name_or_path:\n return RobertaConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)\n elif 'bert' in pretrained_model_name_or_path:\n return BertConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)\n elif 'openai-gpt' in pretrained_model_name_or_path:\n return OpenAIGPTConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)\n elif 'gpt2' in pretrained_model_name_or_path:\n return GPT2Config.from_pretrained(pretrained_model_name_or_path, **kwargs)\n elif 'transfo-xl' in pretrained_model_name_or_path:\n return TransfoXLConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)\n elif 'xlnet' in pretrained_model_name_or_path:\n return XLNetConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)\n elif 'xlm' in pretrained_model_name_or_path:\n return XLMConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)\n elif 'ctrl' in pretrained_model_name_or_path:\n return CTRLConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)\n raise ValueError(\"Unrecognized model identifier in {}. Should contains one of \"\n \"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', \"\n \"'xlm', 'roberta', 'ctrl'\".format(pretrained_model_name_or_path))","function_tokens":["def","from_pretrained","(","cls",",","pretrained_model_name_or_path",",","*","*","kwargs",")",":","if","'distilbert'","in","pretrained_model_name_or_path",":","return","DistilBertConfig",".","from_pretrained","(","pretrained_model_name_or_path",",","*","*","kwargs",")","elif","'roberta'","in","pretrained_model_name_or_path",":","return","RobertaConfig",".","from_pretrained","(","pretrained_model_name_or_path",",","*","*","kwargs",")","elif","'bert'","in","pretrained_model_name_or_path",":","return","BertConfig",".","from_pretrained","(","pretrained_model_name_or_path",",","*","*","kwargs",")","elif","'openai-gpt'","in","pretrained_model_name_or_path",":","return","OpenAIGPTConfig",".","from_pretrained","(","pretrained_model_name_or_path",",","*","*","kwargs",")","elif","'gpt2'","in","pretrained_model_name_or_path",":","return","GPT2Config",".","from_pretrained","(","pretrained_model_name_or_path",",","*","*","kwargs",")","elif","'transfo-xl'","in","pretrained_model_name_or_path",":","return","TransfoXLConfig",".","from_pretrained","(","pretrained_model_name_or_path",",","*","*","kwargs",")","elif","'xlnet'","in","pretrained_model_name_or_path",":","return","XLNetConfig",".","from_pretrained","(","pretrained_model_name_or_path",",","*","*","kwargs",")","elif","'xlm'","in","pretrained_model_name_or_path",":","return","XLMConfig",".","from_pretrained","(","pretrained_model_name_or_path",",","*","*","kwargs",")","elif","'ctrl'","in","pretrained_model_name_or_path",":","return","CTRLConfig",".","from_pretrained","(","pretrained_model_name_or_path",",","*","*","kwargs",")","raise","ValueError","(","\"Unrecognized model identifier in {}. Should contains one of \"","\"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', \"","\"'xlm', 'roberta', 'ctrl'\"",".","format","(","pretrained_model_name_or_path",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_auto.py#L61-L137"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl_utilities.py","language":"python","identifier":"sample_logits","parameters":"(embedding, bias, labels, inputs, sampler)","argument_list":"","return_statement":"return logits","docstring":"embedding: an nn.Embedding layer\n bias: [n_vocab]\n labels: [b1, b2]\n inputs: [b1, b2, n_emb]\n sampler: you may use a LogUniformSampler\n Return\n logits: [b1, b2, 1 + n_sample]","docstring_summary":"embedding: an nn.Embedding layer\n bias: [n_vocab]\n labels: [b1, b2]\n inputs: [b1, b2, n_emb]\n sampler: you may use a LogUniformSampler\n Return\n logits: [b1, b2, 1 + n_sample]","docstring_tokens":["embedding",":","an","nn",".","Embedding","layer","bias",":","[","n_vocab","]","labels",":","[","b1","b2","]","inputs",":","[","b1","b2","n_emb","]","sampler",":","you","may","use","a","LogUniformSampler","Return","logits",":","[","b1","b2","1","+","n_sample","]"],"function":"def sample_logits(embedding, bias, labels, inputs, sampler):\n \"\"\"\n embedding: an nn.Embedding layer\n bias: [n_vocab]\n labels: [b1, b2]\n inputs: [b1, b2, n_emb]\n sampler: you may use a LogUniformSampler\n Return\n logits: [b1, b2, 1 + n_sample]\n \"\"\"\n true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)\n n_sample = neg_samples.size(0)\n b1, b2 = labels.size(0), labels.size(1)\n all_ids = torch.cat([labels.view(-1), neg_samples])\n all_w = embedding(all_ids)\n true_w = all_w[: -n_sample].view(b1, b2, -1)\n sample_w = all_w[- n_sample:].view(n_sample, -1)\n\n all_b = bias[all_ids]\n true_b = all_b[: -n_sample].view(b1, b2)\n sample_b = all_b[- n_sample:]\n\n hit = (labels[:, :, None] == neg_samples).detach()\n\n true_logits = torch.einsum('ijk,ijk->ij',\n [true_w, inputs]) + true_b - true_log_probs\n sample_logits = torch.einsum('lk,ijk->ijl',\n [sample_w, inputs]) + sample_b - samp_log_probs\n sample_logits.masked_fill_(hit, -1e30)\n logits = torch.cat([true_logits[:, :, None], sample_logits], -1)\n\n return logits","function_tokens":["def","sample_logits","(","embedding",",","bias",",","labels",",","inputs",",","sampler",")",":","true_log_probs",",","samp_log_probs",",","neg_samples","=","sampler",".","sample","(","labels",")","n_sample","=","neg_samples",".","size","(","0",")","b1",",","b2","=","labels",".","size","(","0",")",",","labels",".","size","(","1",")","all_ids","=","torch",".","cat","(","[","labels",".","view","(","-","1",")",",","neg_samples","]",")","all_w","=","embedding","(","all_ids",")","true_w","=","all_w","[",":","-","n_sample","]",".","view","(","b1",",","b2",",","-","1",")","sample_w","=","all_w","[","-","n_sample",":","]",".","view","(","n_sample",",","-","1",")","all_b","=","bias","[","all_ids","]","true_b","=","all_b","[",":","-","n_sample","]",".","view","(","b1",",","b2",")","sample_b","=","all_b","[","-","n_sample",":","]","hit","=","(","labels","[",":",",",":",",","None","]","==","neg_samples",")",".","detach","(",")","true_logits","=","torch",".","einsum","(","'ijk,ijk->ij'",",","[","true_w",",","inputs","]",")","+","true_b","-","true_log_probs","sample_logits","=","torch",".","einsum","(","'lk,ijk->ijl'",",","[","sample_w",",","inputs","]",")","+","sample_b","-","samp_log_probs","sample_logits",".","masked_fill_","(","hit",",","-","1e30",")","logits","=","torch",".","cat","(","[","true_logits","[",":",",",":",",","None","]",",","sample_logits","]",",","-","1",")","return","logits"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl_utilities.py#L301-L332"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl_utilities.py","language":"python","identifier":"ProjectedAdaptiveLogSoftmax.forward","parameters":"(self, hidden, labels=None, keep_order=False)","argument_list":"","return_statement":"return out","docstring":"Params:\n hidden :: [len*bsz x d_proj]\n labels :: [len*bsz]\n Return:\n if labels is None:\n out :: [len*bsz] Negative log likelihood\n else:\n out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary\n We could replace this implementation by the native PyTorch one\n if their's had an option to set bias on all clusters in the native one.\n here: https:\/\/github.com\/pytorch\/pytorch\/blob\/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da\/torch\/nn\/modules\/adaptive.py#L138","docstring_summary":"Params:\n hidden :: [len*bsz x d_proj]\n labels :: [len*bsz]\n Return:\n if labels is None:\n out :: [len*bsz] Negative log likelihood\n else:\n out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary\n We could replace this implementation by the native PyTorch one\n if their's had an option to set bias on all clusters in the native one.\n here: https:\/\/github.com\/pytorch\/pytorch\/blob\/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da\/torch\/nn\/modules\/adaptive.py#L138","docstring_tokens":["Params",":","hidden","::","[","len","*","bsz","x","d_proj","]","labels","::","[","len","*","bsz","]","Return",":","if","labels","is","None",":","out","::","[","len","*","bsz","]","Negative","log","likelihood","else",":","out","::","[","len","*","bsz","x","n_tokens","]","log","probabilities","of","tokens","over","the","vocabulary","We","could","replace","this","implementation","by","the","native","PyTorch","one","if","their","s","had","an","option","to","set","bias","on","all","clusters","in","the","native","one",".","here",":","https",":","\/\/","github",".","com","\/","pytorch","\/","pytorch","\/","blob","\/","dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da","\/","torch","\/","nn","\/","modules","\/","adaptive",".","py#L138"],"function":"def forward(self, hidden, labels=None, keep_order=False):\n '''\n Params:\n hidden :: [len*bsz x d_proj]\n labels :: [len*bsz]\n Return:\n if labels is None:\n out :: [len*bsz] Negative log likelihood\n else:\n out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary\n We could replace this implementation by the native PyTorch one\n if their's had an option to set bias on all clusters in the native one.\n here: https:\/\/github.com\/pytorch\/pytorch\/blob\/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da\/torch\/nn\/modules\/adaptive.py#L138\n '''\n\n if labels is not None:\n labels = labels.view(-1)\n if hidden.size(0) != labels.size(0):\n raise RuntimeError('Input and labels should have the same size '\n 'in the batch dimension.')\n\n if self.n_clusters == 0:\n logit = self._compute_logit(hidden, self.out_layers[0].weight,\n self.out_layers[0].bias, self.out_projs[0])\n if labels is not None:\n out = -F.log_softmax(logit, dim=-1) \\\n .gather(1, labels.unsqueeze(1)).squeeze(1)\n else:\n out = F.log_softmax(logit, dim=-1)\n else:\n # construct weights and biases\n weights, biases = [], []\n for i in range(len(self.cutoffs)):\n if self.div_val == 1:\n l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]\n weight_i = self.out_layers[0].weight[l_idx:r_idx]\n bias_i = self.out_layers[0].bias[l_idx:r_idx]\n else:\n weight_i = self.out_layers[i].weight\n bias_i = self.out_layers[i].bias\n\n if i == 0:\n weight_i = torch.cat(\n [weight_i, self.cluster_weight], dim=0)\n bias_i = torch.cat(\n [bias_i, self.cluster_bias], dim=0)\n\n weights.append(weight_i)\n biases.append(bias_i)\n\n head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]\n\n head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)\n head_logprob = F.log_softmax(head_logit, dim=1)\n\n if labels is None:\n out = hidden.new_empty((head_logit.size(0), self.n_token))\n else:\n out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device)\n\n offset = 0\n cutoff_values = [0] + self.cutoffs\n for i in range(len(cutoff_values) - 1):\n l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]\n\n if labels is not None:\n mask_i = (labels >= l_idx) & (labels < r_idx)\n indices_i = mask_i.nonzero().squeeze()\n\n if indices_i.numel() == 0:\n continue\n\n target_i = labels.index_select(0, indices_i) - l_idx\n head_logprob_i = head_logprob.index_select(0, indices_i)\n hidden_i = hidden.index_select(0, indices_i)\n else:\n hidden_i = hidden\n\n if i == 0:\n if labels is not None:\n logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)\n else:\n out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]\n else:\n weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]\n\n tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)\n tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)\n cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster\n if labels is not None:\n logprob_i = head_logprob_i[:, cluster_prob_idx] \\\n + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)\n else:\n logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i\n out[:, l_idx:r_idx] = logprob_i\n\n if labels is not None:\n if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:\n out.index_copy_(0, indices_i, -logprob_i)\n else:\n out[offset:offset+logprob_i.size(0)].copy_(-logprob_i)\n offset += logprob_i.size(0)\n\n return out","function_tokens":["def","forward","(","self",",","hidden",",","labels","=","None",",","keep_order","=","False",")",":","if","labels","is","not","None",":","labels","=","labels",".","view","(","-","1",")","if","hidden",".","size","(","0",")","!=","labels",".","size","(","0",")",":","raise","RuntimeError","(","'Input and labels should have the same size '","'in the batch dimension.'",")","if","self",".","n_clusters","==","0",":","logit","=","self",".","_compute_logit","(","hidden",",","self",".","out_layers","[","0","]",".","weight",",","self",".","out_layers","[","0","]",".","bias",",","self",".","out_projs","[","0","]",")","if","labels","is","not","None",":","out","=","-","F",".","log_softmax","(","logit",",","dim","=","-","1",")",".","gather","(","1",",","labels",".","unsqueeze","(","1",")",")",".","squeeze","(","1",")","else",":","out","=","F",".","log_softmax","(","logit",",","dim","=","-","1",")","else",":","# construct weights and biases","weights",",","biases","=","[","]",",","[","]","for","i","in","range","(","len","(","self",".","cutoffs",")",")",":","if","self",".","div_val","==","1",":","l_idx",",","r_idx","=","self",".","cutoff_ends","[","i","]",",","self",".","cutoff_ends","[","i","+","1","]","weight_i","=","self",".","out_layers","[","0","]",".","weight","[","l_idx",":","r_idx","]","bias_i","=","self",".","out_layers","[","0","]",".","bias","[","l_idx",":","r_idx","]","else",":","weight_i","=","self",".","out_layers","[","i","]",".","weight","bias_i","=","self",".","out_layers","[","i","]",".","bias","if","i","==","0",":","weight_i","=","torch",".","cat","(","[","weight_i",",","self",".","cluster_weight","]",",","dim","=","0",")","bias_i","=","torch",".","cat","(","[","bias_i",",","self",".","cluster_bias","]",",","dim","=","0",")","weights",".","append","(","weight_i",")","biases",".","append","(","bias_i",")","head_weight",",","head_bias",",","head_proj","=","weights","[","0","]",",","biases","[","0","]",",","self",".","out_projs","[","0","]","head_logit","=","self",".","_compute_logit","(","hidden",",","head_weight",",","head_bias",",","head_proj",")","head_logprob","=","F",".","log_softmax","(","head_logit",",","dim","=","1",")","if","labels","is","None",":","out","=","hidden",".","new_empty","(","(","head_logit",".","size","(","0",")",",","self",".","n_token",")",")","else",":","out","=","torch",".","zeros_like","(","labels",",","dtype","=","hidden",".","dtype",",","device","=","hidden",".","device",")","offset","=","0","cutoff_values","=","[","0","]","+","self",".","cutoffs","for","i","in","range","(","len","(","cutoff_values",")","-","1",")",":","l_idx",",","r_idx","=","cutoff_values","[","i","]",",","cutoff_values","[","i","+","1","]","if","labels","is","not","None",":","mask_i","=","(","labels",">=","l_idx",")","&","(","labels","<","r_idx",")","indices_i","=","mask_i",".","nonzero","(",")",".","squeeze","(",")","if","indices_i",".","numel","(",")","==","0",":","continue","target_i","=","labels",".","index_select","(","0",",","indices_i",")","-","l_idx","head_logprob_i","=","head_logprob",".","index_select","(","0",",","indices_i",")","hidden_i","=","hidden",".","index_select","(","0",",","indices_i",")","else",":","hidden_i","=","hidden","if","i","==","0",":","if","labels","is","not","None",":","logprob_i","=","head_logprob_i",".","gather","(","1",",","target_i","[",":",",","None","]",")",".","squeeze","(","1",")","else",":","out","[",":",",",":","self",".","cutoffs","[","0","]","]","=","head_logprob","[",":",",",":","self",".","cutoffs","[","0","]","]","else",":","weight_i",",","bias_i",",","proj_i","=","weights","[","i","]",",","biases","[","i","]",",","self",".","out_projs","[","i","]","tail_logit_i","=","self",".","_compute_logit","(","hidden_i",",","weight_i",",","bias_i",",","proj_i",")","tail_logprob_i","=","F",".","log_softmax","(","tail_logit_i",",","dim","=","1",")","cluster_prob_idx","=","self",".","cutoffs","[","0","]","+","i","-","1","# No probability for the head cluster","if","labels","is","not","None",":","logprob_i","=","head_logprob_i","[",":",",","cluster_prob_idx","]","+","tail_logprob_i",".","gather","(","1",",","target_i","[",":",",","None","]",")",".","squeeze","(","1",")","else",":","logprob_i","=","head_logprob","[",":",",","cluster_prob_idx",",","None","]","+","tail_logprob_i","out","[",":",",","l_idx",":","r_idx","]","=","logprob_i","if","labels","is","not","None",":","if","(","hasattr","(","self",",","'keep_order'",")","and","self",".","keep_order",")","or","keep_order",":","out",".","index_copy_","(","0",",","indices_i",",","-","logprob_i",")","else",":","out","[","offset",":","offset","+","logprob_i",".","size","(","0",")","]",".","copy_","(","-","logprob_i",")","offset","+=","logprob_i",".","size","(","0",")","return","out"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl_utilities.py#L92-L195"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl_utilities.py","language":"python","identifier":"ProjectedAdaptiveLogSoftmax.log_prob","parameters":"(self, hidden)","argument_list":"","return_statement":"","docstring":"r\"\"\" Computes log probabilities for all :math:`n\\_classes`\n From: https:\/\/github.com\/pytorch\/pytorch\/blob\/master\/torch\/nn\/modules\/adaptive.py\n Args:\n hidden (Tensor): a minibatch of examples\n Returns:\n log-probabilities of for each class :math:`c`\n in range :math:`0 <= c <= n\\_classes`, where :math:`n\\_classes` is a\n parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.\n Shape:\n - Input: :math:`(N, in\\_features)`\n - Output: :math:`(N, n\\_classes)`","docstring_summary":"r\"\"\" Computes log probabilities for all :math:`n\\_classes`\n From: https:\/\/github.com\/pytorch\/pytorch\/blob\/master\/torch\/nn\/modules\/adaptive.py\n Args:\n hidden (Tensor): a minibatch of examples\n Returns:\n log-probabilities of for each class :math:`c`\n in range :math:`0 <= c <= n\\_classes`, where :math:`n\\_classes` is a\n parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.\n Shape:\n - Input: :math:`(N, in\\_features)`\n - Output: :math:`(N, n\\_classes)`","docstring_tokens":["r","Computes","log","probabilities","for","all",":","math",":","n","\\","_classes","From",":","https",":","\/\/","github",".","com","\/","pytorch","\/","pytorch","\/","blob","\/","master","\/","torch","\/","nn","\/","modules","\/","adaptive",".","py","Args",":","hidden","(","Tensor",")",":","a","minibatch","of","examples","Returns",":","log","-","probabilities","of","for","each","class",":","math",":","c","in","range",":","math",":","0","<","=","c","<","=","n","\\","_classes","where",":","math",":","n","\\","_classes","is","a","parameter","passed","to","AdaptiveLogSoftmaxWithLoss","constructor",".","Shape",":","-","Input",":",":","math",":","(","N","in","\\","_features",")","-","Output",":",":","math",":","(","N","n","\\","_classes",")"],"function":"def log_prob(self, hidden):\n r\"\"\" Computes log probabilities for all :math:`n\\_classes`\n From: https:\/\/github.com\/pytorch\/pytorch\/blob\/master\/torch\/nn\/modules\/adaptive.py\n Args:\n hidden (Tensor): a minibatch of examples\n Returns:\n log-probabilities of for each class :math:`c`\n in range :math:`0 <= c <= n\\_classes`, where :math:`n\\_classes` is a\n parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.\n Shape:\n - Input: :math:`(N, in\\_features)`\n - Output: :math:`(N, n\\_classes)`\n \"\"\"\n if self.n_clusters == 0:\n logit = self._compute_logit(hidden, self.out_layers[0].weight,\n self.out_layers[0].bias, self.out_projs[0])\n return F.log_softmax(logit, dim=-1)\n else:\n # construct weights and biases\n weights, biases = [], []\n for i in range(len(self.cutoffs)):\n if self.div_val == 1:\n l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]\n weight_i = self.out_layers[0].weight[l_idx:r_idx]\n bias_i = self.out_layers[0].bias[l_idx:r_idx]\n else:\n weight_i = self.out_layers[i].weight\n bias_i = self.out_layers[i].bias\n\n if i == 0:\n weight_i = torch.cat(\n [weight_i, self.cluster_weight], dim=0)\n bias_i = torch.cat(\n [bias_i, self.cluster_bias], dim=0)\n\n weights.append(weight_i)\n biases.append(bias_i)\n\n head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]\n head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)\n\n out = hidden.new_empty((head_logit.size(0), self.n_token))\n head_logprob = F.log_softmax(head_logit, dim=1)\n\n cutoff_values = [0] + self.cutoffs\n for i in range(len(cutoff_values) - 1):\n start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]\n\n if i == 0:\n out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]\n else:\n weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]\n\n tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)\n tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)\n\n logprob_i = head_logprob[:, -i] + tail_logprob_i\n out[:, start_idx, stop_idx] = logprob_i\n\n return out","function_tokens":["def","log_prob","(","self",",","hidden",")",":","if","self",".","n_clusters","==","0",":","logit","=","self",".","_compute_logit","(","hidden",",","self",".","out_layers","[","0","]",".","weight",",","self",".","out_layers","[","0","]",".","bias",",","self",".","out_projs","[","0","]",")","return","F",".","log_softmax","(","logit",",","dim","=","-","1",")","else",":","# construct weights and biases","weights",",","biases","=","[","]",",","[","]","for","i","in","range","(","len","(","self",".","cutoffs",")",")",":","if","self",".","div_val","==","1",":","l_idx",",","r_idx","=","self",".","cutoff_ends","[","i","]",",","self",".","cutoff_ends","[","i","+","1","]","weight_i","=","self",".","out_layers","[","0","]",".","weight","[","l_idx",":","r_idx","]","bias_i","=","self",".","out_layers","[","0","]",".","bias","[","l_idx",":","r_idx","]","else",":","weight_i","=","self",".","out_layers","[","i","]",".","weight","bias_i","=","self",".","out_layers","[","i","]",".","bias","if","i","==","0",":","weight_i","=","torch",".","cat","(","[","weight_i",",","self",".","cluster_weight","]",",","dim","=","0",")","bias_i","=","torch",".","cat","(","[","bias_i",",","self",".","cluster_bias","]",",","dim","=","0",")","weights",".","append","(","weight_i",")","biases",".","append","(","bias_i",")","head_weight",",","head_bias",",","head_proj","=","weights","[","0","]",",","biases","[","0","]",",","self",".","out_projs","[","0","]","head_logit","=","self",".","_compute_logit","(","hidden",",","head_weight",",","head_bias",",","head_proj",")","out","=","hidden",".","new_empty","(","(","head_logit",".","size","(","0",")",",","self",".","n_token",")",")","head_logprob","=","F",".","log_softmax","(","head_logit",",","dim","=","1",")","cutoff_values","=","[","0","]","+","self",".","cutoffs","for","i","in","range","(","len","(","cutoff_values",")","-","1",")",":","start_idx",",","stop_idx","=","cutoff_values","[","i","]",",","cutoff_values","[","i","+","1","]","if","i","==","0",":","out","[",":",",",":","self",".","cutoffs","[","0","]","]","=","head_logprob","[",":",",",":","self",".","cutoffs","[","0","]","]","else",":","weight_i",",","bias_i",",","proj_i","=","weights","[","i","]",",","biases","[","i","]",",","self",".","out_projs","[","i","]","tail_logit_i","=","self",".","_compute_logit","(","hidden",",","weight_i",",","bias_i",",","proj_i",")","tail_logprob_i","=","F",".","log_softmax","(","tail_logit_i",",","dim","=","1",")","logprob_i","=","head_logprob","[",":",",","-","i","]","+","tail_logprob_i","out","[",":",",","start_idx",",","stop_idx","]","=","logprob_i","return","out"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl_utilities.py#L198-L257"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl_utilities.py","language":"python","identifier":"LogUniformSampler.__init__","parameters":"(self, range_max, n_sample)","argument_list":"","return_statement":"","docstring":"Reference : https:\/\/github.com\/tensorflow\/tensorflow\/blob\/r1.10\/tensorflow\/python\/ops\/candidate_sampling_ops.py\n `P(class) = (log(class + 2) - log(class + 1)) \/ log(range_max + 1)`\n\n expected count can be approximated by 1 - (1 - p)^n\n and we use a numerically stable version -expm1(num_tries * log1p(-p))\n\n Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run","docstring_summary":"Reference : https:\/\/github.com\/tensorflow\/tensorflow\/blob\/r1.10\/tensorflow\/python\/ops\/candidate_sampling_ops.py\n `P(class) = (log(class + 2) - log(class + 1)) \/ log(range_max + 1)`","docstring_tokens":["Reference",":","https",":","\/\/","github",".","com","\/","tensorflow","\/","tensorflow","\/","blob","\/","r1",".","10","\/","tensorflow","\/","python","\/","ops","\/","candidate_sampling_ops",".","py","P","(","class",")","=","(","log","(","class","+","2",")","-","log","(","class","+","1","))","\/","log","(","range_max","+","1",")"],"function":"def __init__(self, range_max, n_sample):\n \"\"\"\n Reference : https:\/\/github.com\/tensorflow\/tensorflow\/blob\/r1.10\/tensorflow\/python\/ops\/candidate_sampling_ops.py\n `P(class) = (log(class + 2) - log(class + 1)) \/ log(range_max + 1)`\n\n expected count can be approximated by 1 - (1 - p)^n\n and we use a numerically stable version -expm1(num_tries * log1p(-p))\n\n Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run\n \"\"\"\n with torch.no_grad():\n self.range_max = range_max\n log_indices = torch.arange(1., range_max+2., 1.).log_()\n self.dist = (log_indices[1:] - log_indices[:-1]) \/ log_indices[-1]\n\n self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()\n\n self.n_sample = n_sample","function_tokens":["def","__init__","(","self",",","range_max",",","n_sample",")",":","with","torch",".","no_grad","(",")",":","self",".","range_max","=","range_max","log_indices","=","torch",".","arange","(","1.",",","range_max","+","2.",",","1.",")",".","log_","(",")","self",".","dist","=","(","log_indices","[","1",":","]","-","log_indices","[",":","-","1","]",")","\/","log_indices","[","-","1","]","self",".","log_q","=","(","-","(","-","self",".","dist",".","double","(",")",".","log1p_","(",")","*","2","*","n_sample",")",".","expm1_","(",")",")",".","log_","(",")",".","float","(",")","self",".","n_sample","=","n_sample"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl_utilities.py#L261-L278"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl_utilities.py","language":"python","identifier":"LogUniformSampler.sample","parameters":"(self, labels)","argument_list":"","return_statement":"","docstring":"labels: [b1, b2]\n Return\n true_log_probs: [b1, b2]\n samp_log_probs: [n_sample]\n neg_samples: [n_sample]","docstring_summary":"labels: [b1, b2]\n Return\n true_log_probs: [b1, b2]\n samp_log_probs: [n_sample]\n neg_samples: [n_sample]","docstring_tokens":["labels",":","[","b1","b2","]","Return","true_log_probs",":","[","b1","b2","]","samp_log_probs",":","[","n_sample","]","neg_samples",":","[","n_sample","]"],"function":"def sample(self, labels):\n \"\"\"\n labels: [b1, b2]\n Return\n true_log_probs: [b1, b2]\n samp_log_probs: [n_sample]\n neg_samples: [n_sample]\n \"\"\"\n\n # neg_samples = torch.empty(0).long()\n n_sample = self.n_sample\n n_tries = 2 * n_sample\n\n with torch.no_grad():\n neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()\n device = labels.device\n neg_samples = neg_samples.to(device)\n true_log_probs = self.log_q[labels].to(device)\n samp_log_probs = self.log_q[neg_samples].to(device)\n return true_log_probs, samp_log_probs, neg_samples","function_tokens":["def","sample","(","self",",","labels",")",":","# neg_samples = torch.empty(0).long()","n_sample","=","self",".","n_sample","n_tries","=","2","*","n_sample","with","torch",".","no_grad","(",")",":","neg_samples","=","torch",".","multinomial","(","self",".","dist",",","n_tries",",","replacement","=","True",")",".","unique","(",")","device","=","labels",".","device","neg_samples","=","neg_samples",".","to","(","device",")","true_log_probs","=","self",".","log_q","[","labels","]",".","to","(","device",")","samp_log_probs","=","self",".","log_q","[","neg_samples","]",".","to","(","device",")","return","true_log_probs",",","samp_log_probs",",","neg_samples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl_utilities.py#L280-L299"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_xlm.py","language":"python","identifier":"XLMConfig.__init__","parameters":"(self,\n vocab_size_or_config_json_file=30145,\n emb_dim=2048,\n n_layers=12,\n n_heads=16,\n dropout=0.1,\n attention_dropout=0.1,\n gelu_activation=True,\n sinusoidal_embeddings=False,\n causal=False,\n asm=False,\n n_langs=1,\n use_lang_emb=True,\n max_position_embeddings=512,\n embed_init_std=2048 ** -0.5,\n layer_norm_eps=1e-12,\n init_std=0.02,\n bos_index=0,\n eos_index=1,\n pad_index=2,\n unk_index=3,\n mask_index=5,\n is_encoder=True,\n\n finetuning_task=None,\n num_labels=2,\n summary_type='first',\n summary_use_proj=True,\n summary_activation=None,\n summary_proj_to_labels=True,\n summary_first_dropout=0.1,\n start_n_top=5,\n end_n_top=5,\n **kwargs)","argument_list":"","return_statement":"","docstring":"Constructs XLMConfig.","docstring_summary":"Constructs XLMConfig.","docstring_tokens":["Constructs","XLMConfig","."],"function":"def __init__(self,\n vocab_size_or_config_json_file=30145,\n emb_dim=2048,\n n_layers=12,\n n_heads=16,\n dropout=0.1,\n attention_dropout=0.1,\n gelu_activation=True,\n sinusoidal_embeddings=False,\n causal=False,\n asm=False,\n n_langs=1,\n use_lang_emb=True,\n max_position_embeddings=512,\n embed_init_std=2048 ** -0.5,\n layer_norm_eps=1e-12,\n init_std=0.02,\n bos_index=0,\n eos_index=1,\n pad_index=2,\n unk_index=3,\n mask_index=5,\n is_encoder=True,\n\n finetuning_task=None,\n num_labels=2,\n summary_type='first',\n summary_use_proj=True,\n summary_activation=None,\n summary_proj_to_labels=True,\n summary_first_dropout=0.1,\n start_n_top=5,\n end_n_top=5,\n **kwargs):\n \"\"\"Constructs XLMConfig.\n \"\"\"\n super(XLMConfig, self).__init__(**kwargs)\n\n if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2\n and isinstance(vocab_size_or_config_json_file, unicode)):\n with open(vocab_size_or_config_json_file, \"r\", encoding='utf-8') as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif isinstance(vocab_size_or_config_json_file, int):\n self.n_words = vocab_size_or_config_json_file\n self.emb_dim = emb_dim\n self.n_layers = n_layers\n self.n_heads = n_heads\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.gelu_activation = gelu_activation\n self.sinusoidal_embeddings = sinusoidal_embeddings\n self.causal = causal\n self.asm = asm\n self.n_langs = n_langs\n self.use_lang_emb = use_lang_emb\n self.layer_norm_eps = layer_norm_eps\n self.bos_index = bos_index\n self.eos_index = eos_index\n self.pad_index = pad_index\n self.unk_index = unk_index\n self.mask_index = mask_index\n self.is_encoder = is_encoder\n self.max_position_embeddings = max_position_embeddings\n self.embed_init_std = embed_init_std\n self.init_std = init_std\n self.finetuning_task = finetuning_task\n self.num_labels = num_labels\n self.summary_type = summary_type\n self.summary_use_proj = summary_use_proj\n self.summary_activation = summary_activation\n self.summary_proj_to_labels = summary_proj_to_labels\n self.summary_first_dropout = summary_first_dropout\n self.start_n_top = start_n_top\n self.end_n_top = end_n_top\n else:\n raise ValueError(\"First argument must be either a vocabulary size (int)\"\n \" or the path to a pretrained model config file (str)\")","function_tokens":["def","__init__","(","self",",","vocab_size_or_config_json_file","=","30145",",","emb_dim","=","2048",",","n_layers","=","12",",","n_heads","=","16",",","dropout","=","0.1",",","attention_dropout","=","0.1",",","gelu_activation","=","True",",","sinusoidal_embeddings","=","False",",","causal","=","False",",","asm","=","False",",","n_langs","=","1",",","use_lang_emb","=","True",",","max_position_embeddings","=","512",",","embed_init_std","=","2048","**","-","0.5",",","layer_norm_eps","=","1e-12",",","init_std","=","0.02",",","bos_index","=","0",",","eos_index","=","1",",","pad_index","=","2",",","unk_index","=","3",",","mask_index","=","5",",","is_encoder","=","True",",","finetuning_task","=","None",",","num_labels","=","2",",","summary_type","=","'first'",",","summary_use_proj","=","True",",","summary_activation","=","None",",","summary_proj_to_labels","=","True",",","summary_first_dropout","=","0.1",",","start_n_top","=","5",",","end_n_top","=","5",",","*","*","kwargs",")",":","super","(","XLMConfig",",","self",")",".","__init__","(","*","*","kwargs",")","if","isinstance","(","vocab_size_or_config_json_file",",","str",")","or","(","sys",".","version_info","[","0","]","==","2","and","isinstance","(","vocab_size_or_config_json_file",",","unicode",")",")",":","with","open","(","vocab_size_or_config_json_file",",","\"r\"",",","encoding","=","'utf-8'",")","as","reader",":","json_config","=","json",".","loads","(","reader",".","read","(",")",")","for","key",",","value","in","json_config",".","items","(",")",":","self",".","__dict__","[","key","]","=","value","elif","isinstance","(","vocab_size_or_config_json_file",",","int",")",":","self",".","n_words","=","vocab_size_or_config_json_file","self",".","emb_dim","=","emb_dim","self",".","n_layers","=","n_layers","self",".","n_heads","=","n_heads","self",".","dropout","=","dropout","self",".","attention_dropout","=","attention_dropout","self",".","gelu_activation","=","gelu_activation","self",".","sinusoidal_embeddings","=","sinusoidal_embeddings","self",".","causal","=","causal","self",".","asm","=","asm","self",".","n_langs","=","n_langs","self",".","use_lang_emb","=","use_lang_emb","self",".","layer_norm_eps","=","layer_norm_eps","self",".","bos_index","=","bos_index","self",".","eos_index","=","eos_index","self",".","pad_index","=","pad_index","self",".","unk_index","=","unk_index","self",".","mask_index","=","mask_index","self",".","is_encoder","=","is_encoder","self",".","max_position_embeddings","=","max_position_embeddings","self",".","embed_init_std","=","embed_init_std","self",".","init_std","=","init_std","self",".","finetuning_task","=","finetuning_task","self",".","num_labels","=","num_labels","self",".","summary_type","=","summary_type","self",".","summary_use_proj","=","summary_use_proj","self",".","summary_activation","=","summary_activation","self",".","summary_proj_to_labels","=","summary_proj_to_labels","self",".","summary_first_dropout","=","summary_first_dropout","self",".","start_n_top","=","start_n_top","self",".","end_n_top","=","end_n_top","else",":","raise","ValueError","(","\"First argument must be either a vocabulary size (int)\"","\" or the path to a pretrained model config file (str)\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/configuration_xlm.py#L83-L161"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_albert.py","language":"python","identifier":"load_tf_weights_in_albert","parameters":"(model, config, tf_checkpoint_path)","argument_list":"","return_statement":"return model","docstring":"Load tf checkpoints in a pytorch model.","docstring_summary":"Load tf checkpoints in a pytorch model.","docstring_tokens":["Load","tf","checkpoints","in","a","pytorch","model","."],"function":"def load_tf_weights_in_albert(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https:\/\/www.tensorflow.org\/install\/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split('\/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\", \"global_step\"] for n in name):\n logger.info(\"Skipping {}\".format(\"\/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'squad':\n pointer = getattr(pointer, 'classifier')\n else:\n try:\n pointer = getattr(pointer, l[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"\/\".join(name)))\n continue\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name[-13:] == '_embeddings_2':\n pointer = getattr(pointer, 'weight')\n array = np.transpose(array)\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model","function_tokens":["def","load_tf_weights_in_albert","(","model",",","config",",","tf_checkpoint_path",")",":","try",":","import","re","import","numpy","as","np","import","tensorflow","as","tf","except","ImportError",":","logger",".","error","(","\"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"","\"https:\/\/www.tensorflow.org\/install\/ for installation instructions.\"",")","raise","tf_path","=","os",".","path",".","abspath","(","tf_checkpoint_path",")","logger",".","info","(","\"Converting TensorFlow checkpoint from {}\"",".","format","(","tf_path",")",")","# Load weights from TF model","init_vars","=","tf",".","train",".","list_variables","(","tf_path",")","names","=","[","]","arrays","=","[","]","for","name",",","shape","in","init_vars",":","logger",".","info","(","\"Loading TF weight {} with shape {}\"",".","format","(","name",",","shape",")",")","array","=","tf",".","train",".","load_variable","(","tf_path",",","name",")","names",".","append","(","name",")","arrays",".","append","(","array",")","for","name",",","array","in","zip","(","names",",","arrays",")",":","name","=","name",".","split","(","'\/'",")","# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v","# which are not required for using pretrained model","if","any","(","n","in","[","\"adam_v\"",",","\"adam_m\"",",","\"global_step\"","]","for","n","in","name",")",":","logger",".","info","(","\"Skipping {}\"",".","format","(","\"\/\"",".","join","(","name",")",")",")","continue","pointer","=","model","for","m_name","in","name",":","if","re",".","fullmatch","(","r'[A-Za-z]+_\\d+'",",","m_name",")",":","l","=","re",".","split","(","r'_(\\d+)'",",","m_name",")","else",":","l","=","[","m_name","]","if","l","[","0","]","==","'kernel'","or","l","[","0","]","==","'gamma'",":","pointer","=","getattr","(","pointer",",","'weight'",")","elif","l","[","0","]","==","'output_bias'","or","l","[","0","]","==","'beta'",":","pointer","=","getattr","(","pointer",",","'bias'",")","elif","l","[","0","]","==","'output_weights'",":","pointer","=","getattr","(","pointer",",","'weight'",")","elif","l","[","0","]","==","'squad'",":","pointer","=","getattr","(","pointer",",","'classifier'",")","else",":","try",":","pointer","=","getattr","(","pointer",",","l","[","0","]",")","except","AttributeError",":","logger",".","info","(","\"Skipping {}\"",".","format","(","\"\/\"",".","join","(","name",")",")",")","continue","if","len","(","l",")",">=","2",":","num","=","int","(","l","[","1","]",")","pointer","=","pointer","[","num","]","if","m_name","[","-","11",":","]","==","'_embeddings'",":","pointer","=","getattr","(","pointer",",","'weight'",")","elif","m_name","[","-","13",":","]","==","'_embeddings_2'",":","pointer","=","getattr","(","pointer",",","'weight'",")","array","=","np",".","transpose","(","array",")","elif","m_name","==","'kernel'",":","array","=","np",".","transpose","(","array",")","try",":","assert","pointer",".","shape","==","array",".","shape","except","AssertionError","as","e",":","e",".","args","+=","(","pointer",".","shape",",","array",".","shape",")","raise","logger",".","info","(","\"Initialize PyTorch weight {}\"",".","format","(","name",")",")","pointer",".","data","=","torch",".","from_numpy","(","array",")","return","model"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_albert.py#L47-L114"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_albert.py","language":"python","identifier":"AlbertPreTrainedModel._init_weights","parameters":"(self, module)","argument_list":"","return_statement":"","docstring":"Initialize the weights","docstring_summary":"Initialize the weights","docstring_tokens":["Initialize","the","weights"],"function":"def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()","function_tokens":["def","_init_weights","(","self",",","module",")",":","if","isinstance","(","module",",","(","nn",".","Linear",",","nn",".","Embedding",")",")",":","# Slightly different from the TF version which uses truncated_normal for initialization","# cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","elif","isinstance","(","module",",","BertLayerNorm",")",":","module",".","bias",".","data",".","zero_","(",")","module",".","weight",".","data",".","fill_","(","1.0",")","if","isinstance","(","module",",","nn",".","Linear",")","and","module",".","bias","is","not","None",":","module",".","bias",".","data",".","zero_","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_albert.py#L360-L370"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_openai.py","language":"python","identifier":"load_tf_weights_in_openai_gpt","parameters":"(model, config, openai_checkpoint_folder_path)","argument_list":"","return_statement":"return model","docstring":"Load tf pre-trained weights in a pytorch model (from NumPy arrays here)","docstring_summary":"Load tf pre-trained weights in a pytorch model (from NumPy arrays here)","docstring_tokens":["Load","tf","pre","-","trained","weights","in","a","pytorch","model","(","from","NumPy","arrays","here",")"],"function":"def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):\n \"\"\" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)\n \"\"\"\n import re\n import numpy as np\n\n if '.ckpt' in openai_checkpoint_folder_path:\n openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)\n\n logger.info(\"Loading weights from {}\".format(openai_checkpoint_folder_path))\n\n names = json.load(open(openai_checkpoint_folder_path + '\/parameters_names.json', \"r\", encoding='utf-8'))\n shapes = json.load(open(openai_checkpoint_folder_path + '\/params_shapes.json', \"r\", encoding='utf-8'))\n offsets = np.cumsum([np.prod(shape) for shape in shapes])\n init_params = [np.load(openai_checkpoint_folder_path + '\/params_{}.npy'.format(n)) for n in range(10)]\n init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]\n init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]\n\n # This was used when we had a single embedding matrix for positions and tokens\n # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)\n # del init_params[1]\n init_params = [arr.squeeze() for arr in init_params]\n\n try:\n assert model.tokens_embed.weight.shape == init_params[1].shape\n assert model.positions_embed.weight.shape == init_params[0].shape\n except AssertionError as e:\n e.args += (model.tokens_embed.weight.shape, init_params[1].shape)\n e.args += (model.positions_embed.weight.shape, init_params[0].shape)\n raise\n\n model.tokens_embed.weight.data = torch.from_numpy(init_params[1])\n model.positions_embed.weight.data = torch.from_numpy(init_params[0])\n names.pop(0)\n # Pop position and token embedding arrays\n init_params.pop(0)\n init_params.pop(0)\n\n for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):\n name = name[6:] # skip \"model\/\"\n assert name[-2:] == \":0\"\n name = name[:-2]\n name = name.split('\/')\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+\\d+', m_name):\n l = re.split(r'(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'g':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'b':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'w':\n pointer = getattr(pointer, 'weight')\n else:\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model","function_tokens":["def","load_tf_weights_in_openai_gpt","(","model",",","config",",","openai_checkpoint_folder_path",")",":","import","re","import","numpy","as","np","if","'.ckpt'","in","openai_checkpoint_folder_path",":","openai_checkpoint_folder_path","=","os",".","path",".","dirname","(","openai_checkpoint_folder_path",")","logger",".","info","(","\"Loading weights from {}\"",".","format","(","openai_checkpoint_folder_path",")",")","names","=","json",".","load","(","open","(","openai_checkpoint_folder_path","+","'\/parameters_names.json'",",","\"r\"",",","encoding","=","'utf-8'",")",")","shapes","=","json",".","load","(","open","(","openai_checkpoint_folder_path","+","'\/params_shapes.json'",",","\"r\"",",","encoding","=","'utf-8'",")",")","offsets","=","np",".","cumsum","(","[","np",".","prod","(","shape",")","for","shape","in","shapes","]",")","init_params","=","[","np",".","load","(","openai_checkpoint_folder_path","+","'\/params_{}.npy'",".","format","(","n",")",")","for","n","in","range","(","10",")","]","init_params","=","np",".","split","(","np",".","concatenate","(","init_params",",","0",")",",","offsets",")","[",":","-","1","]","init_params","=","[","param",".","reshape","(","shape",")","for","param",",","shape","in","zip","(","init_params",",","shapes",")","]","# This was used when we had a single embedding matrix for positions and tokens","# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)","# del init_params[1]","init_params","=","[","arr",".","squeeze","(",")","for","arr","in","init_params","]","try",":","assert","model",".","tokens_embed",".","weight",".","shape","==","init_params","[","1","]",".","shape","assert","model",".","positions_embed",".","weight",".","shape","==","init_params","[","0","]",".","shape","except","AssertionError","as","e",":","e",".","args","+=","(","model",".","tokens_embed",".","weight",".","shape",",","init_params","[","1","]",".","shape",")","e",".","args","+=","(","model",".","positions_embed",".","weight",".","shape",",","init_params","[","0","]",".","shape",")","raise","model",".","tokens_embed",".","weight",".","data","=","torch",".","from_numpy","(","init_params","[","1","]",")","model",".","positions_embed",".","weight",".","data","=","torch",".","from_numpy","(","init_params","[","0","]",")","names",".","pop","(","0",")","# Pop position and token embedding arrays","init_params",".","pop","(","0",")","init_params",".","pop","(","0",")","for","name",",","array","in","zip","(","names",",","init_params",")",":","# names[1:n_transfer], init_params[1:n_transfer]):","name","=","name","[","6",":","]","# skip \"model\/\"","assert","name","[","-","2",":","]","==","\":0\"","name","=","name","[",":","-","2","]","name","=","name",".","split","(","'\/'",")","pointer","=","model","for","m_name","in","name",":","if","re",".","fullmatch","(","r'[A-Za-z]+\\d+'",",","m_name",")",":","l","=","re",".","split","(","r'(\\d+)'",",","m_name",")","else",":","l","=","[","m_name","]","if","l","[","0","]","==","'g'",":","pointer","=","getattr","(","pointer",",","'weight'",")","elif","l","[","0","]","==","'b'",":","pointer","=","getattr","(","pointer",",","'bias'",")","elif","l","[","0","]","==","'w'",":","pointer","=","getattr","(","pointer",",","'weight'",")","else",":","pointer","=","getattr","(","pointer",",","l","[","0","]",")","if","len","(","l",")",">=","2",":","num","=","int","(","l","[","1","]",")","pointer","=","pointer","[","num","]","try",":","assert","pointer",".","shape","==","array",".","shape","except","AssertionError","as","e",":","e",".","args","+=","(","pointer",".","shape",",","array",".","shape",")","raise","try",":","assert","pointer",".","shape","==","array",".","shape","except","AssertionError","as","e",":","e",".","args","+=","(","pointer",".","shape",",","array",".","shape",")","raise","logger",".","info","(","\"Initialize PyTorch weight {}\"",".","format","(","name",")",")","pointer",".","data","=","torch",".","from_numpy","(","array",")","return","model"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_openai.py#L42-L114"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_openai.py","language":"python","identifier":"OpenAIGPTPreTrainedModel._init_weights","parameters":"(self, module)","argument_list":"","return_statement":"","docstring":"Initialize the weights.","docstring_summary":"Initialize the weights.","docstring_tokens":["Initialize","the","weights","."],"function":"def _init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)","function_tokens":["def","_init_weights","(","self",",","module",")",":","if","isinstance","(","module",",","(","nn",".","Linear",",","nn",".","Embedding",",","Conv1D",")",")",":","# Slightly different from the TF version which uses truncated_normal for initialization","# cf https:\/\/github.com\/pytorch\/pytorch\/pull\/5617","module",".","weight",".","data",".","normal_","(","mean","=","0.0",",","std","=","self",".","config",".","initializer_range",")","if","isinstance","(","module",",","(","nn",".","Linear",",","Conv1D",")",")","and","module",".","bias","is","not","None",":","module",".","bias",".","data",".","zero_","(",")","elif","isinstance","(","module",",","nn",".","LayerNorm",")",":","module",".","bias",".","data",".","zero_","(",")","module",".","weight",".","data",".","fill_","(","1.0",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_openai.py#L267-L278"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl.py","language":"python","identifier":"build_tf_to_pytorch_map","parameters":"(model, config)","argument_list":"","return_statement":"return tf_to_pt_map","docstring":"A map of modules from TF to PyTorch.\n This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.","docstring_summary":"A map of modules from TF to PyTorch.\n This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.","docstring_tokens":["A","map","of","modules","from","TF","to","PyTorch",".","This","time","I","use","a","map","to","keep","the","PyTorch","model","as","identical","to","the","original","PyTorch","model","as","possible","."],"function":"def build_tf_to_pytorch_map(model, config):\n \"\"\" A map of modules from TF to PyTorch.\n This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.\n \"\"\"\n tf_to_pt_map = {}\n\n if hasattr(model, 'transformer'):\n # We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax\n tf_to_pt_map.update({\n \"transformer\/adaptive_softmax\/cutoff_0\/cluster_W\": model.crit.cluster_weight,\n \"transformer\/adaptive_softmax\/cutoff_0\/cluster_b\": model.crit.cluster_bias})\n for i, (out_l, proj_l, tie_proj) in enumerate(zip(\n model.crit.out_layers,\n model.crit.out_projs,\n config.tie_projs)):\n layer_str = \"transformer\/adaptive_softmax\/cutoff_%d\/\" % i\n if config.tie_weight:\n tf_to_pt_map.update({\n layer_str + 'b': out_l.bias})\n else:\n raise NotImplementedError\n # I don't think this is implemented in the TF code\n tf_to_pt_map.update({\n layer_str + 'lookup_table': out_l.weight,\n layer_str + 'b': out_l.bias})\n if not tie_proj:\n tf_to_pt_map.update({\n layer_str + 'proj': proj_l\n })\n # Now load the rest of the transformer\n model = model.transformer\n\n # Embeddings\n for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):\n layer_str = \"transformer\/adaptive_embed\/cutoff_%d\/\" % i\n tf_to_pt_map.update({\n layer_str + 'lookup_table': embed_l.weight,\n layer_str + 'proj_W': proj_l\n })\n\n # Transformer blocks\n for i, b in enumerate(model.layers):\n layer_str = \"transformer\/layer_%d\/\" % i\n tf_to_pt_map.update({\n layer_str + \"rel_attn\/LayerNorm\/gamma\": b.dec_attn.layer_norm.weight,\n layer_str + \"rel_attn\/LayerNorm\/beta\": b.dec_attn.layer_norm.bias,\n layer_str + \"rel_attn\/o\/kernel\": b.dec_attn.o_net.weight,\n layer_str + \"rel_attn\/qkv\/kernel\": b.dec_attn.qkv_net.weight,\n layer_str + \"rel_attn\/r\/kernel\": b.dec_attn.r_net.weight,\n layer_str + \"ff\/LayerNorm\/gamma\": b.pos_ff.layer_norm.weight,\n layer_str + \"ff\/LayerNorm\/beta\": b.pos_ff.layer_norm.bias,\n layer_str + \"ff\/layer_1\/kernel\": b.pos_ff.CoreNet[0].weight,\n layer_str + \"ff\/layer_1\/bias\": b.pos_ff.CoreNet[0].bias,\n layer_str + \"ff\/layer_2\/kernel\": b.pos_ff.CoreNet[3].weight,\n layer_str + \"ff\/layer_2\/bias\": b.pos_ff.CoreNet[3].bias,\n })\n\n # Relative positioning biases\n if config.untie_r:\n r_r_list = []\n r_w_list = []\n for b in model.layers:\n r_r_list.append(b.dec_attn.r_r_bias)\n r_w_list.append(b.dec_attn.r_w_bias)\n else:\n r_r_list = [model.r_r_bias]\n r_w_list = [model.r_w_bias]\n tf_to_pt_map.update({\n 'transformer\/r_r_bias': r_r_list,\n 'transformer\/r_w_bias': r_w_list})\n return tf_to_pt_map","function_tokens":["def","build_tf_to_pytorch_map","(","model",",","config",")",":","tf_to_pt_map","=","{","}","if","hasattr","(","model",",","'transformer'",")",":","# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax","tf_to_pt_map",".","update","(","{","\"transformer\/adaptive_softmax\/cutoff_0\/cluster_W\"",":","model",".","crit",".","cluster_weight",",","\"transformer\/adaptive_softmax\/cutoff_0\/cluster_b\"",":","model",".","crit",".","cluster_bias","}",")","for","i",",","(","out_l",",","proj_l",",","tie_proj",")","in","enumerate","(","zip","(","model",".","crit",".","out_layers",",","model",".","crit",".","out_projs",",","config",".","tie_projs",")",")",":","layer_str","=","\"transformer\/adaptive_softmax\/cutoff_%d\/\"","%","i","if","config",".","tie_weight",":","tf_to_pt_map",".","update","(","{","layer_str","+","'b'",":","out_l",".","bias","}",")","else",":","raise","NotImplementedError","# I don't think this is implemented in the TF code","tf_to_pt_map",".","update","(","{","layer_str","+","'lookup_table'",":","out_l",".","weight",",","layer_str","+","'b'",":","out_l",".","bias","}",")","if","not","tie_proj",":","tf_to_pt_map",".","update","(","{","layer_str","+","'proj'",":","proj_l","}",")","# Now load the rest of the transformer","model","=","model",".","transformer","# Embeddings","for","i",",","(","embed_l",",","proj_l",")","in","enumerate","(","zip","(","model",".","word_emb",".","emb_layers",",","model",".","word_emb",".","emb_projs",")",")",":","layer_str","=","\"transformer\/adaptive_embed\/cutoff_%d\/\"","%","i","tf_to_pt_map",".","update","(","{","layer_str","+","'lookup_table'",":","embed_l",".","weight",",","layer_str","+","'proj_W'",":","proj_l","}",")","# Transformer blocks","for","i",",","b","in","enumerate","(","model",".","layers",")",":","layer_str","=","\"transformer\/layer_%d\/\"","%","i","tf_to_pt_map",".","update","(","{","layer_str","+","\"rel_attn\/LayerNorm\/gamma\"",":","b",".","dec_attn",".","layer_norm",".","weight",",","layer_str","+","\"rel_attn\/LayerNorm\/beta\"",":","b",".","dec_attn",".","layer_norm",".","bias",",","layer_str","+","\"rel_attn\/o\/kernel\"",":","b",".","dec_attn",".","o_net",".","weight",",","layer_str","+","\"rel_attn\/qkv\/kernel\"",":","b",".","dec_attn",".","qkv_net",".","weight",",","layer_str","+","\"rel_attn\/r\/kernel\"",":","b",".","dec_attn",".","r_net",".","weight",",","layer_str","+","\"ff\/LayerNorm\/gamma\"",":","b",".","pos_ff",".","layer_norm",".","weight",",","layer_str","+","\"ff\/LayerNorm\/beta\"",":","b",".","pos_ff",".","layer_norm",".","bias",",","layer_str","+","\"ff\/layer_1\/kernel\"",":","b",".","pos_ff",".","CoreNet","[","0","]",".","weight",",","layer_str","+","\"ff\/layer_1\/bias\"",":","b",".","pos_ff",".","CoreNet","[","0","]",".","bias",",","layer_str","+","\"ff\/layer_2\/kernel\"",":","b",".","pos_ff",".","CoreNet","[","3","]",".","weight",",","layer_str","+","\"ff\/layer_2\/bias\"",":","b",".","pos_ff",".","CoreNet","[","3","]",".","bias",",","}",")","# Relative positioning biases","if","config",".","untie_r",":","r_r_list","=","[","]","r_w_list","=","[","]","for","b","in","model",".","layers",":","r_r_list",".","append","(","b",".","dec_attn",".","r_r_bias",")","r_w_list",".","append","(","b",".","dec_attn",".","r_w_bias",")","else",":","r_r_list","=","[","model",".","r_r_bias","]","r_w_list","=","[","model",".","r_w_bias","]","tf_to_pt_map",".","update","(","{","'transformer\/r_r_bias'",":","r_r_list",",","'transformer\/r_w_bias'",":","r_w_list","}",")","return","tf_to_pt_map"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl.py#L48-L118"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl.py","language":"python","identifier":"load_tf_weights_in_transfo_xl","parameters":"(model, config, tf_path)","argument_list":"","return_statement":"return model","docstring":"Load tf checkpoints in a pytorch model","docstring_summary":"Load tf checkpoints in a pytorch model","docstring_tokens":["Load","tf","checkpoints","in","a","pytorch","model"],"function":"def load_tf_weights_in_transfo_xl(model, config, tf_path):\n \"\"\" Load tf checkpoints in a pytorch model\n \"\"\"\n try:\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https:\/\/www.tensorflow.org\/install\/ for installation instructions.\")\n raise\n # Build TF to PyTorch weights loading map\n tf_to_pt_map = build_tf_to_pytorch_map(model, config)\n\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n tf_weights = {}\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n tf_weights[name] = array\n\n for name, pointer in tf_to_pt_map.items():\n assert name in tf_weights\n array = tf_weights[name]\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if 'kernel' in name or 'proj' in name:\n array = np.transpose(array)\n if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1:\n # Here we will split the TF weigths\n assert len(pointer) == array.shape[0]\n for i, p_i in enumerate(pointer):\n arr_i = array[i, ...]\n try:\n assert p_i.shape == arr_i.shape\n except AssertionError as e:\n e.args += (p_i.shape, arr_i.shape)\n raise\n logger.info(\"Initialize PyTorch weight {} for layer {}\".format(name, i))\n p_i.data = torch.from_numpy(arr_i)\n else:\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n tf_weights.pop(name, None)\n tf_weights.pop(name + '\/Adam', None)\n tf_weights.pop(name + '\/Adam_1', None)\n\n logger.info(\"Weights not copied to PyTorch model: {}\".format(', '.join(tf_weights.keys())))\n return model","function_tokens":["def","load_tf_weights_in_transfo_xl","(","model",",","config",",","tf_path",")",":","try",":","import","numpy","as","np","import","tensorflow","as","tf","except","ImportError",":","logger",".","error","(","\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"","\"https:\/\/www.tensorflow.org\/install\/ for installation instructions.\"",")","raise","# Build TF to PyTorch weights loading map","tf_to_pt_map","=","build_tf_to_pytorch_map","(","model",",","config",")","# Load weights from TF model","init_vars","=","tf",".","train",".","list_variables","(","tf_path",")","tf_weights","=","{","}","for","name",",","shape","in","init_vars",":","logger",".","info","(","\"Loading TF weight {} with shape {}\"",".","format","(","name",",","shape",")",")","array","=","tf",".","train",".","load_variable","(","tf_path",",","name",")","tf_weights","[","name","]","=","array","for","name",",","pointer","in","tf_to_pt_map",".","items","(",")",":","assert","name","in","tf_weights","array","=","tf_weights","[","name","]","# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v","# which are not required for using pretrained model","if","'kernel'","in","name","or","'proj'","in","name",":","array","=","np",".","transpose","(","array",")","if","(","'r_r_bias'","in","name","or","'r_w_bias'","in","name",")","and","len","(","pointer",")",">","1",":","# Here we will split the TF weigths","assert","len","(","pointer",")","==","array",".","shape","[","0","]","for","i",",","p_i","in","enumerate","(","pointer",")",":","arr_i","=","array","[","i",",","...","]","try",":","assert","p_i",".","shape","==","arr_i",".","shape","except","AssertionError","as","e",":","e",".","args","+=","(","p_i",".","shape",",","arr_i",".","shape",")","raise","logger",".","info","(","\"Initialize PyTorch weight {} for layer {}\"",".","format","(","name",",","i",")",")","p_i",".","data","=","torch",".","from_numpy","(","arr_i",")","else",":","try",":","assert","pointer",".","shape","==","array",".","shape","except","AssertionError","as","e",":","e",".","args","+=","(","pointer",".","shape",",","array",".","shape",")","raise","logger",".","info","(","\"Initialize PyTorch weight {}\"",".","format","(","name",")",")","pointer",".","data","=","torch",".","from_numpy","(","array",")","tf_weights",".","pop","(","name",",","None",")","tf_weights",".","pop","(","name","+","'\/Adam'",",","None",")","tf_weights",".","pop","(","name","+","'\/Adam_1'",",","None",")","logger",".","info","(","\"Weights not copied to PyTorch model: {}\"",".","format","(","', '",".","join","(","tf_weights",".","keys","(",")",")",")",")","return","model"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl.py#L120-L173"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl.py","language":"python","identifier":"TransfoXLPreTrainedModel._init_weights","parameters":"(self, m)","argument_list":"","return_statement":"","docstring":"Initialize the weights.","docstring_summary":"Initialize the weights.","docstring_tokens":["Initialize","the","weights","."],"function":"def _init_weights(self, m):\n \"\"\" Initialize the weights.\n \"\"\"\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n if hasattr(m, 'weight') and m.weight is not None:\n self._init_weight(m.weight)\n if hasattr(m, 'bias') and m.bias is not None:\n self._init_bias(m.bias)\n elif classname.find('AdaptiveEmbedding') != -1:\n if hasattr(m, 'emb_projs'):\n for i in range(len(m.emb_projs)):\n if m.emb_projs[i] is not None:\n nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)\n elif classname.find('Embedding') != -1:\n if hasattr(m, 'weight'):\n self._init_weight(m.weight)\n elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:\n if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:\n self._init_weight(m.cluster_weight)\n if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:\n self._init_bias(m.cluster_bias)\n if hasattr(m, 'out_projs'):\n for i in range(len(m.out_projs)):\n if m.out_projs[i] is not None:\n nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)\n elif classname.find('LayerNorm') != -1:\n if hasattr(m, 'weight'):\n nn.init.normal_(m.weight, 1.0, self.config.init_std)\n if hasattr(m, 'bias') and m.bias is not None:\n self._init_bias(m.bias)\n else:\n if hasattr(m, 'r_emb'):\n self._init_weight(m.r_emb)\n if hasattr(m, 'r_w_bias'):\n self._init_weight(m.r_w_bias)\n if hasattr(m, 'r_r_bias'):\n self._init_weight(m.r_r_bias)\n if hasattr(m, 'r_bias'):\n self._init_bias(m.r_bias)","function_tokens":["def","_init_weights","(","self",",","m",")",":","classname","=","m",".","__class__",".","__name__","if","classname",".","find","(","'Linear'",")","!=","-","1",":","if","hasattr","(","m",",","'weight'",")","and","m",".","weight","is","not","None",":","self",".","_init_weight","(","m",".","weight",")","if","hasattr","(","m",",","'bias'",")","and","m",".","bias","is","not","None",":","self",".","_init_bias","(","m",".","bias",")","elif","classname",".","find","(","'AdaptiveEmbedding'",")","!=","-","1",":","if","hasattr","(","m",",","'emb_projs'",")",":","for","i","in","range","(","len","(","m",".","emb_projs",")",")",":","if","m",".","emb_projs","[","i","]","is","not","None",":","nn",".","init",".","normal_","(","m",".","emb_projs","[","i","]",",","0.0",",","self",".","config",".","proj_init_std",")","elif","classname",".","find","(","'Embedding'",")","!=","-","1",":","if","hasattr","(","m",",","'weight'",")",":","self",".","_init_weight","(","m",".","weight",")","elif","classname",".","find","(","'ProjectedAdaptiveLogSoftmax'",")","!=","-","1",":","if","hasattr","(","m",",","'cluster_weight'",")","and","m",".","cluster_weight","is","not","None",":","self",".","_init_weight","(","m",".","cluster_weight",")","if","hasattr","(","m",",","'cluster_bias'",")","and","m",".","cluster_bias","is","not","None",":","self",".","_init_bias","(","m",".","cluster_bias",")","if","hasattr","(","m",",","'out_projs'",")",":","for","i","in","range","(","len","(","m",".","out_projs",")",")",":","if","m",".","out_projs","[","i","]","is","not","None",":","nn",".","init",".","normal_","(","m",".","out_projs","[","i","]",",","0.0",",","self",".","config",".","proj_init_std",")","elif","classname",".","find","(","'LayerNorm'",")","!=","-","1",":","if","hasattr","(","m",",","'weight'",")",":","nn",".","init",".","normal_","(","m",".","weight",",","1.0",",","self",".","config",".","init_std",")","if","hasattr","(","m",",","'bias'",")","and","m",".","bias","is","not","None",":","self",".","_init_bias","(","m",".","bias",")","else",":","if","hasattr","(","m",",","'r_emb'",")",":","self",".","_init_weight","(","m",".","r_emb",")","if","hasattr","(","m",",","'r_w_bias'",")",":","self",".","_init_weight","(","m",".","r_w_bias",")","if","hasattr","(","m",",","'r_r_bias'",")",":","self",".","_init_weight","(","m",".","r_r_bias",")","if","hasattr","(","m",",","'r_bias'",")",":","self",".","_init_bias","(","m",".","r_bias",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/transformers\/modeling_transfo_xl.py#L475-L514"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"collate_fn","parameters":"(batch)","argument_list":"","return_statement":"return all_input_ids, all_attention_mask, all_token_type_ids, all_labels","docstring":"batch should be a list of (sequence, target, length) tuples...\n Returns a padded tensor of sequences sorted from longest to shortest,","docstring_summary":"batch should be a list of (sequence, target, length) tuples...\n Returns a padded tensor of sequences sorted from longest to shortest,","docstring_tokens":["batch","should","be","a","list","of","(","sequence","target","length",")","tuples","...","Returns","a","padded","tensor","of","sequences","sorted","from","longest","to","shortest"],"function":"def collate_fn(batch):\n \"\"\"\n batch should be a list of (sequence, target, length) tuples...\n Returns a padded tensor of sequences sorted from longest to shortest,\n \"\"\"\n all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels = map(torch.stack, zip(*batch))\n max_len = max(all_lens).item()\n all_input_ids = all_input_ids[:, :max_len]\n all_attention_mask = all_attention_mask[:, :max_len]\n all_token_type_ids = all_token_type_ids[:, :max_len]\n return all_input_ids, all_attention_mask, all_token_type_ids, all_labels","function_tokens":["def","collate_fn","(","batch",")",":","all_input_ids",",","all_attention_mask",",","all_token_type_ids",",","all_lens",",","all_labels","=","map","(","torch",".","stack",",","zip","(","*","batch",")",")","max_len","=","max","(","all_lens",")",".","item","(",")","all_input_ids","=","all_input_ids","[",":",",",":","max_len","]","all_attention_mask","=","all_attention_mask","[",":",",",":","max_len","]","all_token_type_ids","=","all_token_type_ids","[",":",",",":","max_len","]","return","all_input_ids",",","all_attention_mask",",","all_token_type_ids",",","all_labels"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L16-L26"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"xlnet_collate_fn","parameters":"(batch)","argument_list":"","return_statement":"return all_input_ids, all_attention_mask, all_token_type_ids, all_labels","docstring":"batch should be a list of (sequence, target, length) tuples...\n Returns a padded tensor of sequences sorted from longest to shortest,","docstring_summary":"batch should be a list of (sequence, target, length) tuples...\n Returns a padded tensor of sequences sorted from longest to shortest,","docstring_tokens":["batch","should","be","a","list","of","(","sequence","target","length",")","tuples","...","Returns","a","padded","tensor","of","sequences","sorted","from","longest","to","shortest"],"function":"def xlnet_collate_fn(batch):\n \"\"\"\n batch should be a list of (sequence, target, length) tuples...\n Returns a padded tensor of sequences sorted from longest to shortest,\n \"\"\"\n all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels = map(torch.stack, zip(*batch))\n max_len = max(all_lens).item()\n all_input_ids = all_input_ids[:, -max_len:]\n all_attention_mask = all_attention_mask[:, -max_len:]\n all_token_type_ids = all_token_type_ids[:, -max_len:]\n return all_input_ids, all_attention_mask, all_token_type_ids, all_labels","function_tokens":["def","xlnet_collate_fn","(","batch",")",":","all_input_ids",",","all_attention_mask",",","all_token_type_ids",",","all_lens",",","all_labels","=","map","(","torch",".","stack",",","zip","(","*","batch",")",")","max_len","=","max","(","all_lens",")",".","item","(",")","all_input_ids","=","all_input_ids","[",":",",","-","max_len",":","]","all_attention_mask","=","all_attention_mask","[",":",",","-","max_len",":","]","all_token_type_ids","=","all_token_type_ids","[",":",",","-","max_len",":","]","return","all_input_ids",",","all_attention_mask",",","all_token_type_ids",",","all_labels"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L29-L39"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"clue_convert_examples_to_features","parameters":"(examples, tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True)","argument_list":"","return_statement":"return features","docstring":"Loads a data file into a list of ``InputFeatures``\n Args:\n examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.\n tokenizer: Instance of a tokenizer that will tokenize the examples\n max_length: Maximum example length\n task: CLUE task\n label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method\n output_mode: String indicating the output mode. Either ``regression`` or ``classification``\n pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)\n pad_token: Padding token\n pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)\n mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values\n and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for\n actual values)\n\n Returns:\n If the input is a list of ``InputExamples``, will return\n a list of task-specific ``InputFeatures`` which can be fed to the model.","docstring_summary":"Loads a data file into a list of ``InputFeatures``\n Args:\n examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.\n tokenizer: Instance of a tokenizer that will tokenize the examples\n max_length: Maximum example length\n task: CLUE task\n label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method\n output_mode: String indicating the output mode. Either ``regression`` or ``classification``\n pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)\n pad_token: Padding token\n pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)\n mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values\n and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for\n actual values)","docstring_tokens":["Loads","a","data","file","into","a","list","of","InputFeatures","Args",":","examples",":","List","of","InputExamples","or","tf",".","data",".","Dataset","containing","the","examples",".","tokenizer",":","Instance","of","a","tokenizer","that","will","tokenize","the","examples","max_length",":","Maximum","example","length","task",":","CLUE","task","label_list",":","List","of","labels",".","Can","be","obtained","from","the","processor","using","the","processor",".","get_labels","()","method","output_mode",":","String","indicating","the","output","mode",".","Either","regression","or","classification","pad_on_left",":","If","set","to","True","the","examples","will","be","padded","on","the","left","rather","than","on","the","right","(","default",")","pad_token",":","Padding","token","pad_token_segment_id",":","The","segment","ID","for","the","padding","token","(","It","is","usually","0","but","can","vary","such","as","for","XLNet","where","it","is","4",")","mask_padding_with_zero",":","If","set","to","True","the","attention","mask","will","be","filled","by","1","for","actual","values","and","by","0","for","padded","values",".","If","set","to","False","inverts","it","(","1","for","padded","values","0","for","actual","values",")"],"function":"def clue_convert_examples_to_features(examples, tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True):\n \"\"\"\n Loads a data file into a list of ``InputFeatures``\n Args:\n examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.\n tokenizer: Instance of a tokenizer that will tokenize the examples\n max_length: Maximum example length\n task: CLUE task\n label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method\n output_mode: String indicating the output mode. Either ``regression`` or ``classification``\n pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)\n pad_token: Padding token\n pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)\n mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values\n and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for\n actual values)\n\n Returns:\n If the input is a list of ``InputExamples``, will return\n a list of task-specific ``InputFeatures`` which can be fed to the model.\n\n \"\"\"\n if task is not None:\n processor = clue_processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\n if output_mode is None:\n output_mode = clue_output_modes[task]\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d\" % (ex_index))\n\n inputs = tokenizer.encode_plus(\n example.text_a,\n example.text_b,\n add_special_tokens=True,\n max_length=max_length\n )\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n input_len = len(input_ids)\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(len(input_ids), max_length)\n assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format(len(attention_mask),\n max_length)\n assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format(len(token_type_ids),\n max_length)\n if output_mode == \"classification\":\n label = label_map[example.label]\n elif output_mode == \"regression\":\n label = float(example.label)\n else:\n raise KeyError(output_mode)\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"attention_mask: %s\" % \" \".join([str(x) for x in attention_mask]))\n logger.info(\"token_type_ids: %s\" % \" \".join([str(x) for x in token_type_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label))\n logger.info(\"input length: %d\" % (input_len))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n label=label,\n input_len=input_len))\n return features","function_tokens":["def","clue_convert_examples_to_features","(","examples",",","tokenizer",",","max_length","=","512",",","task","=","None",",","label_list","=","None",",","output_mode","=","None",",","pad_on_left","=","False",",","pad_token","=","0",",","pad_token_segment_id","=","0",",","mask_padding_with_zero","=","True",")",":","if","task","is","not","None",":","processor","=","clue_processors","[","task","]","(",")","if","label_list","is","None",":","label_list","=","processor",".","get_labels","(",")","logger",".","info","(","\"Using label list %s for task %s\"","%","(","label_list",",","task",")",")","if","output_mode","is","None",":","output_mode","=","clue_output_modes","[","task","]","logger",".","info","(","\"Using output mode %s for task %s\"","%","(","output_mode",",","task",")",")","label_map","=","{","label",":","i","for","i",",","label","in","enumerate","(","label_list",")","}","features","=","[","]","for","(","ex_index",",","example",")","in","enumerate","(","examples",")",":","if","ex_index","%","10000","==","0",":","logger",".","info","(","\"Writing example %d\"","%","(","ex_index",")",")","inputs","=","tokenizer",".","encode_plus","(","example",".","text_a",",","example",".","text_b",",","add_special_tokens","=","True",",","max_length","=","max_length",")","input_ids",",","token_type_ids","=","inputs","[","\"input_ids\"","]",",","inputs","[","\"token_type_ids\"","]","# The mask has 1 for real tokens and 0 for padding tokens. Only real","# tokens are attended to.","attention_mask","=","[","1","if","mask_padding_with_zero","else","0","]","*","len","(","input_ids",")","input_len","=","len","(","input_ids",")","# Zero-pad up to the sequence length.","padding_length","=","max_length","-","len","(","input_ids",")","if","pad_on_left",":","input_ids","=","(","[","pad_token","]","*","padding_length",")","+","input_ids","attention_mask","=","(","[","0","if","mask_padding_with_zero","else","1","]","*","padding_length",")","+","attention_mask","token_type_ids","=","(","[","pad_token_segment_id","]","*","padding_length",")","+","token_type_ids","else",":","input_ids","=","input_ids","+","(","[","pad_token","]","*","padding_length",")","attention_mask","=","attention_mask","+","(","[","0","if","mask_padding_with_zero","else","1","]","*","padding_length",")","token_type_ids","=","token_type_ids","+","(","[","pad_token_segment_id","]","*","padding_length",")","assert","len","(","input_ids",")","==","max_length",",","\"Error with input length {} vs {}\"",".","format","(","len","(","input_ids",")",",","max_length",")","assert","len","(","attention_mask",")","==","max_length",",","\"Error with input length {} vs {}\"",".","format","(","len","(","attention_mask",")",",","max_length",")","assert","len","(","token_type_ids",")","==","max_length",",","\"Error with input length {} vs {}\"",".","format","(","len","(","token_type_ids",")",",","max_length",")","if","output_mode","==","\"classification\"",":","label","=","label_map","[","example",".","label","]","elif","output_mode","==","\"regression\"",":","label","=","float","(","example",".","label",")","else",":","raise","KeyError","(","output_mode",")","if","ex_index","<","5",":","logger",".","info","(","\"*** Example ***\"",")","logger",".","info","(","\"guid: %s\"","%","(","example",".","guid",")",")","logger",".","info","(","\"input_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","input_ids","]",")",")","logger",".","info","(","\"attention_mask: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","attention_mask","]",")",")","logger",".","info","(","\"token_type_ids: %s\"","%","\" \"",".","join","(","[","str","(","x",")","for","x","in","token_type_ids","]",")",")","logger",".","info","(","\"label: %s (id = %d)\"","%","(","example",".","label",",","label",")",")","logger",".","info","(","\"input length: %d\"","%","(","input_len",")",")","features",".","append","(","InputFeatures","(","input_ids","=","input_ids",",","attention_mask","=","attention_mask",",","token_type_ids","=","token_type_ids",",","label","=","label",",","input_len","=","input_len",")",")","return","features"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L42-L138"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"TnewsProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L144-L147"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"TnewsProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L149-L152"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"TnewsProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L154-L157"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"TnewsProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return labels","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n labels = []\n for i in range(17):\n if i == 5 or i == 11:\n continue\n labels.append(str(100 + i))\n return labels","function_tokens":["def","get_labels","(","self",")",":","labels","=","[","]","for","i","in","range","(","17",")",":","if","i","==","5","or","i","==","11",":","continue","labels",".","append","(","str","(","100","+","i",")",")","return","labels"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L159-L166"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"TnewsProcessor._create_examples","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line['sentence']\n text_b = None\n label = str(line['label']) if set_type != 'test' else \"100\"\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples","function_tokens":["def","_create_examples","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","text_a","=","line","[","'sentence'","]","text_b","=","None","label","=","str","(","line","[","'label'","]",")","if","set_type","!=","'test'","else","\"100\"","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L168-L178"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"IflytekProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L184-L187"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"IflytekProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L189-L192"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"IflytekProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L194-L197"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"IflytekProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return labels","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n labels = []\n for i in range(119):\n labels.append(str(i))\n return labels","function_tokens":["def","get_labels","(","self",")",":","labels","=","[","]","for","i","in","range","(","119",")",":","labels",".","append","(","str","(","i",")",")","return","labels"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L199-L204"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"IflytekProcessor._create_examples","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line['sentence']\n text_b = None\n label = str(line['label']) if set_type != 'test' else \"0\"\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples","function_tokens":["def","_create_examples","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","text_a","=","line","[","'sentence'","]","text_b","=","None","label","=","str","(","line","[","'label'","]",")","if","set_type","!=","'test'","else","\"0\"","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L206-L216"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"AfqmcProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L222-L225"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"AfqmcProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L227-L230"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"AfqmcProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L232-L235"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"AfqmcProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return [\"0\", \"1\"]","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]","function_tokens":["def","get_labels","(","self",")",":","return","[","\"0\"",",","\"1\"","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L237-L239"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"AfqmcProcessor._create_examples","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line['sentence1']\n text_b = line['sentence2']\n label = str(line['label']) if set_type != 'test' else \"0\"\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples","function_tokens":["def","_create_examples","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","text_a","=","line","[","'sentence1'","]","text_b","=","line","[","'sentence2'","]","label","=","str","(","line","[","'label'","]",")","if","set_type","!=","'test'","else","\"0\"","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L241-L251"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"OcnliProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L256-L259"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"OcnliProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L261-L264"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"OcnliProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L266-L269"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"OcnliProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return [\"contradiction\", \"entailment\", \"neutral\"]","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]","function_tokens":["def","get_labels","(","self",")",":","return","[","\"contradiction\"",",","\"entailment\"",",","\"neutral\"","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L271-L273"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"OcnliProcessor._create_examples","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[\"sentence1\"]\n text_b = line[\"sentence2\"]\n label = str(line[\"label\"]) if set_type != 'test' else 'neutral'\n if label.strip()=='-':\n continue\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples","function_tokens":["def","_create_examples","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","text_a","=","line","[","\"sentence1\"","]","text_b","=","line","[","\"sentence2\"","]","label","=","str","(","line","[","\"label\"","]",")","if","set_type","!=","'test'","else","'neutral'","if","label",".","strip","(",")","==","'-'",":","continue","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L275-L287"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CmnliProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L292-L295"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CmnliProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L297-L300"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CmnliProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L302-L305"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CmnliProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return [\"contradiction\", \"entailment\", \"neutral\"]","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]","function_tokens":["def","get_labels","(","self",")",":","return","[","\"contradiction\"",",","\"entailment\"",",","\"neutral\"","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L307-L309"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CmnliProcessor._create_examples","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[\"sentence1\"]\n text_b = line[\"sentence2\"]\n label = str(line[\"label\"]) if set_type != 'test' else 'neutral'\n if label.strip()=='-':\n continue\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples","function_tokens":["def","_create_examples","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","text_a","=","line","[","\"sentence1\"","]","text_b","=","line","[","\"sentence2\"","]","label","=","str","(","line","[","\"label\"","]",")","if","set_type","!=","'test'","else","'neutral'","if","label",".","strip","(",")","==","'-'",":","continue","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L311-L323"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CslProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L329-L332"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CslProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L334-L337"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CslProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L339-L342"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CslProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return [\"0\", \"1\"]","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]","function_tokens":["def","get_labels","(","self",")",":","return","[","\"0\"",",","\"1\"","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L344-L346"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CslProcessor._create_examples","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = \" \".join(line['keyword'])\n text_b = line['abst']\n label = str(line['label']) if set_type != 'test' else '0'\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples","function_tokens":["def","_create_examples","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","text_a","=","\" \"",".","join","(","line","[","'keyword'","]",")","text_b","=","line","[","'abst'","]","label","=","str","(","line","[","'label'","]",")","if","set_type","!=","'test'","else","'0'","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L348-L358"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"WscProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L364-L367"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"WscProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L369-L372"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"WscProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L374-L377"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"WscProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return [\"true\", \"false\"]","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"true\", \"false\"]","function_tokens":["def","get_labels","(","self",")",":","return","[","\"true\"",",","\"false\"","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L379-L381"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"WscProcessor._create_examples","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line['text']\n text_a_list = list(text_a)\n target = line['target']\n query = target['span1_text']\n query_idx = target['span1_index']\n pronoun = target['span2_text']\n pronoun_idx = target['span2_index']\n assert text_a[pronoun_idx: (pronoun_idx + len(pronoun))] == pronoun, \"pronoun: {}\".format(pronoun)\n assert text_a[query_idx: (query_idx + len(query))] == query, \"query: {}\".format(query)\n if pronoun_idx > query_idx:\n text_a_list.insert(query_idx, \"_\")\n text_a_list.insert(query_idx + len(query) + 1, \"_\")\n text_a_list.insert(pronoun_idx + 2, \"[\")\n text_a_list.insert(pronoun_idx + len(pronoun) + 2 + 1, \"]\")\n else:\n text_a_list.insert(pronoun_idx, \"[\")\n text_a_list.insert(pronoun_idx + len(pronoun) + 1, \"]\")\n text_a_list.insert(query_idx + 2, \"_\")\n text_a_list.insert(query_idx + len(query) + 2 + 1, \"_\")\n text_a = \"\".join(text_a_list)\n text_b = None\n label = str(line['label']) if set_type != 'test' else 'true'\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples","function_tokens":["def","_create_examples","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","text_a","=","line","[","'text'","]","text_a_list","=","list","(","text_a",")","target","=","line","[","'target'","]","query","=","target","[","'span1_text'","]","query_idx","=","target","[","'span1_index'","]","pronoun","=","target","[","'span2_text'","]","pronoun_idx","=","target","[","'span2_index'","]","assert","text_a","[","pronoun_idx",":","(","pronoun_idx","+","len","(","pronoun",")",")","]","==","pronoun",",","\"pronoun: {}\"",".","format","(","pronoun",")","assert","text_a","[","query_idx",":","(","query_idx","+","len","(","query",")",")","]","==","query",",","\"query: {}\"",".","format","(","query",")","if","pronoun_idx",">","query_idx",":","text_a_list",".","insert","(","query_idx",",","\"_\"",")","text_a_list",".","insert","(","query_idx","+","len","(","query",")","+","1",",","\"_\"",")","text_a_list",".","insert","(","pronoun_idx","+","2",",","\"[\"",")","text_a_list",".","insert","(","pronoun_idx","+","len","(","pronoun",")","+","2","+","1",",","\"]\"",")","else",":","text_a_list",".","insert","(","pronoun_idx",",","\"[\"",")","text_a_list",".","insert","(","pronoun_idx","+","len","(","pronoun",")","+","1",",","\"]\"",")","text_a_list",".","insert","(","query_idx","+","2",",","\"_\"",")","text_a_list",".","insert","(","query_idx","+","len","(","query",")","+","2","+","1",",","\"_\"",")","text_a","=","\"\"",".","join","(","text_a_list",")","text_b","=","None","label","=","str","(","line","[","'label'","]",")","if","set_type","!=","'test'","else","'true'","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L383-L412"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CopaProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"train.json\")), \"train\")","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"train.json\"",")",")",",","\"train\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L418-L421"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CopaProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"dev.json\")), \"dev\")","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"dev.json\"",")",")",",","\"dev\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L423-L426"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CopaProcessor.get_test_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_json(os.path.join(data_dir, \"test.json\")), \"test\")","function_tokens":["def","get_test_examples","(","self",",","data_dir",")",":","return","self",".","_create_examples","(","self",".","_read_json","(","os",".","path",".","join","(","data_dir",",","\"test.json\"",")",")",",","\"test\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L428-L431"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CopaProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"return [\"0\", \"1\"]","docstring":"See base class.","docstring_summary":"See base class.","docstring_tokens":["See","base","class","."],"function":"def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]","function_tokens":["def","get_labels","(","self",")",":","return","[","\"0\"",",","\"1\"","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L433-L435"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py","language":"python","identifier":"CopaProcessor._create_examples_version2","parameters":"(self, lines, set_type)","argument_list":"","return_statement":"return examples","docstring":"Creates examples for the training and dev sets.","docstring_summary":"Creates examples for the training and dev sets.","docstring_tokens":["Creates","examples","for","the","training","and","dev","sets","."],"function":"def _create_examples_version2(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n if line['question'] == 'cause':\n text_a = line['premise'] + '\u8fd9\u662f\u4ec0\u4e48\u539f\u56e0\u9020\u6210\u7684\uff1f' + line['choice0']\n text_b = line['premise'] + '\u8fd9\u662f\u4ec0\u4e48\u539f\u56e0\u9020\u6210\u7684\uff1f' + line['choice1']\n else:\n text_a = line['premise'] + '\u8fd9\u9020\u6210\u4e86\u4ec0\u4e48\u5f71\u54cd\uff1f' + line['choice0']\n text_b = line['premise'] + '\u8fd9\u9020\u6210\u4e86\u4ec0\u4e48\u5f71\u54cd\uff1f' + line['choice1']\n label = str(1 if line['label'] == 0 else 0) if set_type != 'test' else '0'\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples","function_tokens":["def","_create_examples_version2","(","self",",","lines",",","set_type",")",":","examples","=","[","]","for","(","i",",","line",")","in","enumerate","(","lines",")",":","guid","=","\"%s-%s\"","%","(","set_type",",","i",")","if","line","[","'question'","]","==","'cause'",":","text_a","=","line","[","'premise'","]","+","'\u8fd9\u662f\u4ec0\u4e48\u539f\u56e0\u9020\u6210\u7684\uff1f' + line['choice0']","","","","","","text_b","=","line","[","'premise'","]","+","'\u8fd9\u662f\u4ec0\u4e48\u539f\u56e0\u9020\u6210\u7684\uff1f' + line['choice1']","","","","","","else",":","text_a","=","line","[","'premise'","]","+","'\u8fd9\u9020\u6210\u4e86\u4ec0\u4e48\u5f71\u54cd\uff1f' + line['choice0']","","","","","","text_b","=","line","[","'premise'","]","+","'\u8fd9\u9020\u6210\u4e86\u4ec0\u4e48\u5f71\u54cd\uff1f' + line['choice1']","","","","","","label","=","str","(","1","if","line","[","'label'","]","==","0","else","0",")","if","set_type","!=","'test'","else","'0'","examples",".","append","(","InputExample","(","guid","=","guid",",","text_a","=","text_a",",","text_b","=","text_b",",","label","=","label",")",")","return","examples"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/clue.py#L466-L480"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py","language":"python","identifier":"InputExample.to_dict","parameters":"(self)","argument_list":"","return_statement":"return output","docstring":"Serializes this instance to a Python dictionary.","docstring_summary":"Serializes this instance to a Python dictionary.","docstring_tokens":["Serializes","this","instance","to","a","Python","dictionary","."],"function":"def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output","function_tokens":["def","to_dict","(","self",")",":","output","=","copy",".","deepcopy","(","self",".","__dict__",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py#L28-L31"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py","language":"python","identifier":"InputExample.to_json_string","parameters":"(self)","argument_list":"","return_statement":"return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","docstring":"Serializes this instance to a JSON string.","docstring_summary":"Serializes this instance to a JSON string.","docstring_tokens":["Serializes","this","instance","to","a","JSON","string","."],"function":"def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","function_tokens":["def","to_json_string","(","self",")",":","return","json",".","dumps","(","self",".","to_dict","(",")",",","indent","=","2",",","sort_keys","=","True",")","+","\"\\n\""],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py#L33-L35"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py","language":"python","identifier":"InputFeatures.to_dict","parameters":"(self)","argument_list":"","return_statement":"return output","docstring":"Serializes this instance to a Python dictionary.","docstring_summary":"Serializes this instance to a Python dictionary.","docstring_tokens":["Serializes","this","instance","to","a","Python","dictionary","."],"function":"def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output","function_tokens":["def","to_dict","(","self",")",":","output","=","copy",".","deepcopy","(","self",".","__dict__",")","return","output"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py#L61-L64"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py","language":"python","identifier":"InputFeatures.to_json_string","parameters":"(self)","argument_list":"","return_statement":"return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","docstring":"Serializes this instance to a JSON string.","docstring_summary":"Serializes this instance to a JSON string.","docstring_tokens":["Serializes","this","instance","to","a","JSON","string","."],"function":"def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"","function_tokens":["def","to_json_string","(","self",")",":","return","json",".","dumps","(","self",".","to_dict","(",")",",","indent","=","2",",","sort_keys","=","True",")","+","\"\\n\""],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py#L66-L68"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py","language":"python","identifier":"DataProcessor.get_train_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the train set.","docstring_summary":"Gets a collection of `InputExample`s for the train set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","train","set","."],"function":"def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_train_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py#L74-L76"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py","language":"python","identifier":"DataProcessor.get_dev_examples","parameters":"(self, data_dir)","argument_list":"","return_statement":"","docstring":"Gets a collection of `InputExample`s for the dev set.","docstring_summary":"Gets a collection of `InputExample`s for the dev set.","docstring_tokens":["Gets","a","collection","of","InputExample","s","for","the","dev","set","."],"function":"def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_dev_examples","(","self",",","data_dir",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py#L78-L80"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py","language":"python","identifier":"DataProcessor.get_labels","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Gets the list of labels for this data set.","docstring_summary":"Gets the list of labels for this data set.","docstring_tokens":["Gets","the","list","of","labels","for","this","data","set","."],"function":"def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()","function_tokens":["def","get_labels","(","self",")",":","raise","NotImplementedError","(",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py#L82-L84"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py","language":"python","identifier":"DataProcessor._read_tsv","parameters":"(cls, input_file, quotechar=None)","argument_list":"","return_statement":"","docstring":"Reads a tab separated value file.","docstring_summary":"Reads a tab separated value file.","docstring_tokens":["Reads","a","tab","separated","value","file","."],"function":"def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines","function_tokens":["def","_read_tsv","(","cls",",","input_file",",","quotechar","=","None",")",":","with","open","(","input_file",",","\"r\"",",","encoding","=","\"utf-8-sig\"",")","as","f",":","reader","=","csv",".","reader","(","f",",","delimiter","=","\"\\t\"",",","quotechar","=","quotechar",")","lines","=","[","]","for","line","in","reader",":","lines",".","append","(","line",")","return","lines"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py#L87-L94"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py","language":"python","identifier":"DataProcessor._read_json","parameters":"(cls, input_file)","argument_list":"","return_statement":"","docstring":"Reads a json list file.","docstring_summary":"Reads a json list file.","docstring_tokens":["Reads","a","json","list","file","."],"function":"def _read_json(cls, input_file):\n \"\"\"Reads a json list file.\"\"\"\n with open(input_file, \"r\") as f:\n reader = f.readlines()\n lines = []\n for line in reader:\n lines.append(json.loads(line.strip()))\n return lines","function_tokens":["def","_read_json","(","cls",",","input_file",")",":","with","open","(","input_file",",","\"r\"",")","as","f",":","reader","=","f",".","readlines","(",")","lines","=","[","]","for","line","in","reader",":","lines",".","append","(","json",".","loads","(","line",".","strip","(",")",")",")","return","lines"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/processors\/utils.py#L97-L104"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"init_logger","parameters":"(log_file=None, log_file_level=logging.NOTSET)","argument_list":"","return_statement":"return logger","docstring":"Example:\n >>> init_logger(log_file)\n >>> logger.info(\"abc'\")","docstring_summary":"Example:\n >>> init_logger(log_file)\n >>> logger.info(\"abc'\")","docstring_tokens":["Example",":",">>>","init_logger","(","log_file",")",">>>","logger",".","info","(","abc",")"],"function":"def init_logger(log_file=None, log_file_level=logging.NOTSET):\n '''\n Example:\n >>> init_logger(log_file)\n >>> logger.info(\"abc'\")\n '''\n if isinstance(log_file,Path):\n log_file = str(log_file)\n log_format = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m\/%d\/%Y %H:%M:%S')\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_format)\n logger.handlers = [console_handler]\n if log_file and log_file != '':\n file_handler = logging.FileHandler(log_file)\n file_handler.setLevel(log_file_level)\n # file_handler.setFormatter(log_format)\n logger.addHandler(file_handler)\n return logger","function_tokens":["def","init_logger","(","log_file","=","None",",","log_file_level","=","logging",".","NOTSET",")",":","if","isinstance","(","log_file",",","Path",")",":","log_file","=","str","(","log_file",")","log_format","=","logging",".","Formatter","(","fmt","=","'%(asctime)s - %(levelname)s - %(name)s - %(message)s'",",","datefmt","=","'%m\/%d\/%Y %H:%M:%S'",")","logger","=","logging",".","getLogger","(",")","logger",".","setLevel","(","logging",".","INFO",")","console_handler","=","logging",".","StreamHandler","(",")","console_handler",".","setFormatter","(","log_format",")","logger",".","handlers","=","[","console_handler","]","if","log_file","and","log_file","!=","''",":","file_handler","=","logging",".","FileHandler","(","log_file",")","file_handler",".","setLevel","(","log_file_level",")","# file_handler.setFormatter(log_format)","logger",".","addHandler","(","file_handler",")","return","logger"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L20-L41"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"seed_everything","parameters":"(seed=1029)","argument_list":"","return_statement":"","docstring":"\u8bbe\u7f6e\u6574\u4e2a\u5f00\u53d1\u73af\u5883\u7684seed\n :param seed:\n :param device:\n :return:","docstring_summary":"\u8bbe\u7f6e\u6574\u4e2a\u5f00\u53d1\u73af\u5883\u7684seed\n :param seed:\n :param device:\n :return:","docstring_tokens":["\u8bbe\u7f6e\u6574\u4e2a\u5f00\u53d1\u73af\u5883\u7684seed",":","param","seed",":",":","param","device",":",":","return",":"],"function":"def seed_everything(seed=1029):\n '''\n \u8bbe\u7f6e\u6574\u4e2a\u5f00\u53d1\u73af\u5883\u7684seed\n :param seed:\n :param device:\n :return:\n '''\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # some cudnn methods can be random even after fixing the seed\n # unless you tell it to be deterministic\n torch.backends.cudnn.deterministic = True","function_tokens":["def","seed_everything","(","seed","=","1029",")",":","random",".","seed","(","seed",")","os",".","environ","[","'PYTHONHASHSEED'","]","=","str","(","seed",")","np",".","random",".","seed","(","seed",")","torch",".","manual_seed","(","seed",")","torch",".","cuda",".","manual_seed","(","seed",")","torch",".","cuda",".","manual_seed_all","(","seed",")","# some cudnn methods can be random even after fixing the seed","# unless you tell it to be deterministic","torch",".","backends",".","cudnn",".","deterministic","=","True"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L43-L58"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"prepare_device","parameters":"(n_gpu_use)","argument_list":"","return_statement":"return device, list_ids","docstring":"setup GPU device if available, move model into configured device\n # \u5982\u679cn_gpu_use\u4e3a\u6570\u5b57\uff0c\u5219\u4f7f\u7528range\u751f\u6210list\n # \u5982\u679c\u8f93\u5165\u7684\u662f\u4e00\u4e2alist\uff0c\u5219\u9ed8\u8ba4\u4f7f\u7528list[0]\u4f5c\u4e3acontroller","docstring_summary":"setup GPU device if available, move model into configured device\n # \u5982\u679cn_gpu_use\u4e3a\u6570\u5b57\uff0c\u5219\u4f7f\u7528range\u751f\u6210list\n # \u5982\u679c\u8f93\u5165\u7684\u662f\u4e00\u4e2alist\uff0c\u5219\u9ed8\u8ba4\u4f7f\u7528list[0]\u4f5c\u4e3acontroller","docstring_tokens":["setup","GPU","device","if","available","move","model","into","configured","device","#","\u5982\u679cn_gpu_use\u4e3a\u6570\u5b57\uff0c\u5219\u4f7f\u7528range\u751f\u6210list","#","\u5982\u679c\u8f93\u5165\u7684\u662f\u4e00\u4e2alist\uff0c\u5219\u9ed8\u8ba4\u4f7f\u7528list","[","0","]","\u4f5c\u4e3acontroller"],"function":"def prepare_device(n_gpu_use):\n \"\"\"\n setup GPU device if available, move model into configured device\n # \u5982\u679cn_gpu_use\u4e3a\u6570\u5b57\uff0c\u5219\u4f7f\u7528range\u751f\u6210list\n # \u5982\u679c\u8f93\u5165\u7684\u662f\u4e00\u4e2alist\uff0c\u5219\u9ed8\u8ba4\u4f7f\u7528list[0]\u4f5c\u4e3acontroller\n \"\"\"\n if not n_gpu_use:\n device_type = 'cpu'\n else:\n n_gpu_use = n_gpu_use.split(\",\")\n device_type = f\"cuda:{n_gpu_use[0]}\"\n n_gpu = torch.cuda.device_count()\n if len(n_gpu_use) > 0 and n_gpu == 0:\n logger.warning(\"Warning: There\\'s no GPU available on this machine, training will be performed on CPU.\")\n device_type = 'cpu'\n if len(n_gpu_use) > n_gpu:\n msg = f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, but only {n_gpu} are available on this machine.\"\n logger.warning(msg)\n n_gpu_use = range(n_gpu)\n device = torch.device(device_type)\n list_ids = n_gpu_use\n return device, list_ids","function_tokens":["def","prepare_device","(","n_gpu_use",")",":","if","not","n_gpu_use",":","device_type","=","'cpu'","else",":","n_gpu_use","=","n_gpu_use",".","split","(","\",\"",")","device_type","=","f\"cuda:{n_gpu_use[0]}\"","n_gpu","=","torch",".","cuda",".","device_count","(",")","if","len","(","n_gpu_use",")",">","0","and","n_gpu","==","0",":","logger",".","warning","(","\"Warning: There\\'s no GPU available on this machine, training will be performed on CPU.\"",")","device_type","=","'cpu'","if","len","(","n_gpu_use",")",">","n_gpu",":","msg","=","f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, but only {n_gpu} are available on this machine.\"","logger",".","warning","(","msg",")","n_gpu_use","=","range","(","n_gpu",")","device","=","torch",".","device","(","device_type",")","list_ids","=","n_gpu_use","return","device",",","list_ids"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L61-L82"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"model_device","parameters":"(n_gpu, model)","argument_list":"","return_statement":"return model, device","docstring":"\u5224\u65ad\u73af\u5883 cpu\u8fd8\u662fgpu\n \u652f\u6301\u5355\u673a\u591a\u5361\n :param n_gpu:\n :param model:\n :return:","docstring_summary":"\u5224\u65ad\u73af\u5883 cpu\u8fd8\u662fgpu\n \u652f\u6301\u5355\u673a\u591a\u5361\n :param n_gpu:\n :param model:\n :return:","docstring_tokens":["\u5224\u65ad\u73af\u5883","cpu\u8fd8\u662fgpu","\u652f\u6301\u5355\u673a\u591a\u5361",":","param","n_gpu",":",":","param","model",":",":","return",":"],"function":"def model_device(n_gpu, model):\n '''\n \u5224\u65ad\u73af\u5883 cpu\u8fd8\u662fgpu\n \u652f\u6301\u5355\u673a\u591a\u5361\n :param n_gpu:\n :param model:\n :return:\n '''\n device, device_ids = prepare_device(n_gpu)\n if len(device_ids) > 1:\n logger.info(f\"current {len(device_ids)} GPUs\")\n model = torch.nn.DataParallel(model, device_ids=device_ids)\n if len(device_ids) == 1:\n os.environ['CUDA_VISIBLE_DEVICES'] = str(device_ids[0])\n model = model.to(device)\n return model, device","function_tokens":["def","model_device","(","n_gpu",",","model",")",":","device",",","device_ids","=","prepare_device","(","n_gpu",")","if","len","(","device_ids",")",">","1",":","logger",".","info","(","f\"current {len(device_ids)} GPUs\"",")","model","=","torch",".","nn",".","DataParallel","(","model",",","device_ids","=","device_ids",")","if","len","(","device_ids",")","==","1",":","os",".","environ","[","'CUDA_VISIBLE_DEVICES'","]","=","str","(","device_ids","[","0","]",")","model","=","model",".","to","(","device",")","return","model",",","device"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L85-L100"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"restore_checkpoint","parameters":"(resume_path, model=None)","argument_list":"","return_statement":"return [model,best,start_epoch]","docstring":"\u52a0\u8f7d\u6a21\u578b\n :param resume_path:\n :param model:\n :param optimizer:\n :return:\n \u6ce8\u610f\uff1a \u5982\u679c\u662f\u52a0\u8f7dBert\u6a21\u578b\u7684\u8bdd\uff0c\u9700\u8981\u8c03\u6574\uff0c\u4e0d\u80fd\u4f7f\u7528\u8be5\u6a21\u5f0f\n \u53ef\u4ee5\u4f7f\u7528\u6a21\u5757\u81ea\u5e26\u7684Bert_model.from_pretrained(state_dict = your save state_dict)","docstring_summary":"\u52a0\u8f7d\u6a21\u578b\n :param resume_path:\n :param model:\n :param optimizer:\n :return:\n \u6ce8\u610f\uff1a \u5982\u679c\u662f\u52a0\u8f7dBert\u6a21\u578b\u7684\u8bdd\uff0c\u9700\u8981\u8c03\u6574\uff0c\u4e0d\u80fd\u4f7f\u7528\u8be5\u6a21\u5f0f\n \u53ef\u4ee5\u4f7f\u7528\u6a21\u5757\u81ea\u5e26\u7684Bert_model.from_pretrained(state_dict = your save state_dict)","docstring_tokens":["\u52a0\u8f7d\u6a21\u578b",":","param","resume_path",":",":","param","model",":",":","param","optimizer",":",":","return",":","\u6ce8\u610f\uff1a","\u5982\u679c\u662f\u52a0\u8f7dBert\u6a21\u578b\u7684\u8bdd\uff0c\u9700\u8981\u8c03\u6574\uff0c\u4e0d\u80fd\u4f7f\u7528\u8be5\u6a21\u5f0f","\u53ef\u4ee5\u4f7f\u7528\u6a21\u5757\u81ea\u5e26\u7684Bert_model",".","from_pretrained","(","state_dict","=","your","save","state_dict",")"],"function":"def restore_checkpoint(resume_path, model=None):\n '''\n \u52a0\u8f7d\u6a21\u578b\n :param resume_path:\n :param model:\n :param optimizer:\n :return:\n \u6ce8\u610f\uff1a \u5982\u679c\u662f\u52a0\u8f7dBert\u6a21\u578b\u7684\u8bdd\uff0c\u9700\u8981\u8c03\u6574\uff0c\u4e0d\u80fd\u4f7f\u7528\u8be5\u6a21\u5f0f\n \u53ef\u4ee5\u4f7f\u7528\u6a21\u5757\u81ea\u5e26\u7684Bert_model.from_pretrained(state_dict = your save state_dict)\n '''\n if isinstance(resume_path, Path):\n resume_path = str(resume_path)\n checkpoint = torch.load(resume_path)\n best = checkpoint['best']\n start_epoch = checkpoint['epoch'] + 1\n states = checkpoint['state_dict']\n if isinstance(model, nn.DataParallel):\n model.module.load_state_dict(states)\n else:\n model.load_state_dict(states)\n return [model,best,start_epoch]","function_tokens":["def","restore_checkpoint","(","resume_path",",","model","=","None",")",":","if","isinstance","(","resume_path",",","Path",")",":","resume_path","=","str","(","resume_path",")","checkpoint","=","torch",".","load","(","resume_path",")","best","=","checkpoint","[","'best'","]","start_epoch","=","checkpoint","[","'epoch'","]","+","1","states","=","checkpoint","[","'state_dict'","]","if","isinstance","(","model",",","nn",".","DataParallel",")",":","model",".","module",".","load_state_dict","(","states",")","else",":","model",".","load_state_dict","(","states",")","return","[","model",",","best",",","start_epoch","]"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L103-L123"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"save_pickle","parameters":"(data, file_path)","argument_list":"","return_statement":"","docstring":"\u4fdd\u5b58\u6210pickle\u6587\u4ef6\n :param data:\n :param file_name:\n :param pickle_path:\n :return:","docstring_summary":"\u4fdd\u5b58\u6210pickle\u6587\u4ef6\n :param data:\n :param file_name:\n :param pickle_path:\n :return:","docstring_tokens":["\u4fdd\u5b58\u6210pickle\u6587\u4ef6",":","param","data",":",":","param","file_name",":",":","param","pickle_path",":",":","return",":"],"function":"def save_pickle(data, file_path):\n '''\n \u4fdd\u5b58\u6210pickle\u6587\u4ef6\n :param data:\n :param file_name:\n :param pickle_path:\n :return:\n '''\n if isinstance(file_path, Path):\n file_path = str(file_path)\n with open(file_path, 'wb') as f:\n pickle.dump(data, f)","function_tokens":["def","save_pickle","(","data",",","file_path",")",":","if","isinstance","(","file_path",",","Path",")",":","file_path","=","str","(","file_path",")","with","open","(","file_path",",","'wb'",")","as","f",":","pickle",".","dump","(","data",",","f",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L126-L137"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"load_pickle","parameters":"(input_file)","argument_list":"","return_statement":"return data","docstring":"\u8bfb\u53d6pickle\u6587\u4ef6\n :param pickle_path:\n :param file_name:\n :return:","docstring_summary":"\u8bfb\u53d6pickle\u6587\u4ef6\n :param pickle_path:\n :param file_name:\n :return:","docstring_tokens":["\u8bfb\u53d6pickle\u6587\u4ef6",":","param","pickle_path",":",":","param","file_name",":",":","return",":"],"function":"def load_pickle(input_file):\n '''\n \u8bfb\u53d6pickle\u6587\u4ef6\n :param pickle_path:\n :param file_name:\n :return:\n '''\n with open(str(input_file), 'rb') as f:\n data = pickle.load(f)\n return data","function_tokens":["def","load_pickle","(","input_file",")",":","with","open","(","str","(","input_file",")",",","'rb'",")","as","f",":","data","=","pickle",".","load","(","f",")","return","data"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L140-L149"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"save_json","parameters":"(data, file_path)","argument_list":"","return_statement":"","docstring":"\u4fdd\u5b58\u6210json\u6587\u4ef6\n :param data:\n :param json_path:\n :param file_name:\n :return:","docstring_summary":"\u4fdd\u5b58\u6210json\u6587\u4ef6\n :param data:\n :param json_path:\n :param file_name:\n :return:","docstring_tokens":["\u4fdd\u5b58\u6210json\u6587\u4ef6",":","param","data",":",":","param","json_path",":",":","param","file_name",":",":","return",":"],"function":"def save_json(data, file_path):\n '''\n \u4fdd\u5b58\u6210json\u6587\u4ef6\n :param data:\n :param json_path:\n :param file_name:\n :return:\n '''\n if not isinstance(file_path, Path):\n file_path = Path(file_path)\n # if isinstance(data,dict):\n # data = json.dumps(data)\n with open(str(file_path), 'w') as f:\n json.dump(data, f)","function_tokens":["def","save_json","(","data",",","file_path",")",":","if","not","isinstance","(","file_path",",","Path",")",":","file_path","=","Path","(","file_path",")","# if isinstance(data,dict):","# data = json.dumps(data)","with","open","(","str","(","file_path",")",",","'w'",")","as","f",":","json",".","dump","(","data",",","f",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L152-L165"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"save_numpy","parameters":"(data, file_path)","argument_list":"","return_statement":"","docstring":"\u4fdd\u5b58\u6210.npy\u6587\u4ef6\n :param data:\n :param file_path:\n :return:","docstring_summary":"\u4fdd\u5b58\u6210.npy\u6587\u4ef6\n :param data:\n :param file_path:\n :return:","docstring_tokens":["\u4fdd\u5b58\u6210",".","npy\u6587\u4ef6",":","param","data",":",":","param","file_path",":",":","return",":"],"function":"def save_numpy(data, file_path):\n '''\n \u4fdd\u5b58\u6210.npy\u6587\u4ef6\n :param data:\n :param file_path:\n :return:\n '''\n if not isinstance(file_path, Path):\n file_path = Path(file_path)\n np.save(str(file_path),data)","function_tokens":["def","save_numpy","(","data",",","file_path",")",":","if","not","isinstance","(","file_path",",","Path",")",":","file_path","=","Path","(","file_path",")","np",".","save","(","str","(","file_path",")",",","data",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L167-L176"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"load_numpy","parameters":"(file_path)","argument_list":"","return_statement":"","docstring":"\u52a0\u8f7d.npy\u6587\u4ef6\n :param file_path:\n :return:","docstring_summary":"\u52a0\u8f7d.npy\u6587\u4ef6\n :param file_path:\n :return:","docstring_tokens":["\u52a0\u8f7d",".","npy\u6587\u4ef6",":","param","file_path",":",":","return",":"],"function":"def load_numpy(file_path):\n '''\n \u52a0\u8f7d.npy\u6587\u4ef6\n :param file_path:\n :return:\n '''\n if not isinstance(file_path, Path):\n file_path = Path(file_path)\n np.load(str(file_path))","function_tokens":["def","load_numpy","(","file_path",")",":","if","not","isinstance","(","file_path",",","Path",")",":","file_path","=","Path","(","file_path",")","np",".","load","(","str","(","file_path",")",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L178-L186"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"load_json","parameters":"(file_path)","argument_list":"","return_statement":"return data","docstring":"\u52a0\u8f7djson\u6587\u4ef6\n :param json_path:\n :param file_name:\n :return:","docstring_summary":"\u52a0\u8f7djson\u6587\u4ef6\n :param json_path:\n :param file_name:\n :return:","docstring_tokens":["\u52a0\u8f7djson\u6587\u4ef6",":","param","json_path",":",":","param","file_name",":",":","return",":"],"function":"def load_json(file_path):\n '''\n \u52a0\u8f7djson\u6587\u4ef6\n :param json_path:\n :param file_name:\n :return:\n '''\n if not isinstance(file_path, Path):\n file_path = Path(file_path)\n with open(str(file_path), 'r') as f:\n data = json.load(f)\n return data","function_tokens":["def","load_json","(","file_path",")",":","if","not","isinstance","(","file_path",",","Path",")",":","file_path","=","Path","(","file_path",")","with","open","(","str","(","file_path",")",",","'r'",")","as","f",":","data","=","json",".","load","(","f",")","return","data"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L188-L199"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"json_to_text","parameters":"(file_path,data)","argument_list":"","return_statement":"","docstring":"\u5c06json list\u5199\u5165text\u6587\u4ef6\u4e2d\n :param file_path:\n :param data:\n :return:","docstring_summary":"\u5c06json list\u5199\u5165text\u6587\u4ef6\u4e2d\n :param file_path:\n :param data:\n :return:","docstring_tokens":["\u5c06json","list\u5199\u5165text\u6587\u4ef6\u4e2d",":","param","file_path",":",":","param","data",":",":","return",":"],"function":"def json_to_text(file_path,data):\n '''\n \u5c06json list\u5199\u5165text\u6587\u4ef6\u4e2d\n :param file_path:\n :param data:\n :return:\n '''\n if not isinstance(file_path, Path):\n file_path = Path(file_path)\n with open(str(file_path), 'w') as fw:\n for line in data:\n line = json.dumps(line, ensure_ascii=False)\n fw.write(line + '\\n')","function_tokens":["def","json_to_text","(","file_path",",","data",")",":","if","not","isinstance","(","file_path",",","Path",")",":","file_path","=","Path","(","file_path",")","with","open","(","str","(","file_path",")",",","'w'",")","as","fw",":","for","line","in","data",":","line","=","json",".","dumps","(","line",",","ensure_ascii","=","False",")","fw",".","write","(","line","+","'\\n'",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L201-L213"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"save_model","parameters":"(model, model_path)","argument_list":"","return_statement":"","docstring":"\u5b58\u50a8\u4e0d\u542b\u6709\u663e\u5361\u4fe1\u606f\u7684state_dict\u6216model\n :param model:\n :param model_name:\n :param only_param:\n :return:","docstring_summary":"\u5b58\u50a8\u4e0d\u542b\u6709\u663e\u5361\u4fe1\u606f\u7684state_dict\u6216model\n :param model:\n :param model_name:\n :param only_param:\n :return:","docstring_tokens":["\u5b58\u50a8\u4e0d\u542b\u6709\u663e\u5361\u4fe1\u606f\u7684state_dict\u6216model",":","param","model",":",":","param","model_name",":",":","param","only_param",":",":","return",":"],"function":"def save_model(model, model_path):\n \"\"\" \u5b58\u50a8\u4e0d\u542b\u6709\u663e\u5361\u4fe1\u606f\u7684state_dict\u6216model\n :param model:\n :param model_name:\n :param only_param:\n :return:\n \"\"\"\n if isinstance(model_path, Path):\n model_path = str(model_path)\n if isinstance(model, nn.DataParallel):\n model = model.module\n state_dict = model.state_dict()\n for key in state_dict:\n state_dict[key] = state_dict[key].cpu()\n torch.save(state_dict, model_path)","function_tokens":["def","save_model","(","model",",","model_path",")",":","if","isinstance","(","model_path",",","Path",")",":","model_path","=","str","(","model_path",")","if","isinstance","(","model",",","nn",".","DataParallel",")",":","model","=","model",".","module","state_dict","=","model",".","state_dict","(",")","for","key","in","state_dict",":","state_dict","[","key","]","=","state_dict","[","key","]",".","cpu","(",")","torch",".","save","(","state_dict",",","model_path",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L215-L229"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"load_model","parameters":"(model, model_path)","argument_list":"","return_statement":"return model","docstring":"\u52a0\u8f7d\u6a21\u578b\n :param model:\n :param model_name:\n :param model_path:\n :param only_param:\n :return:","docstring_summary":"\u52a0\u8f7d\u6a21\u578b\n :param model:\n :param model_name:\n :param model_path:\n :param only_param:\n :return:","docstring_tokens":["\u52a0\u8f7d\u6a21\u578b",":","param","model",":",":","param","model_name",":",":","param","model_path",":",":","param","only_param",":",":","return",":"],"function":"def load_model(model, model_path):\n '''\n \u52a0\u8f7d\u6a21\u578b\n :param model:\n :param model_name:\n :param model_path:\n :param only_param:\n :return:\n '''\n if isinstance(model_path, Path):\n model_path = str(model_path)\n logging.info(f\"loading model from {str(model_path)} .\")\n states = torch.load(model_path)\n state = states['state_dict']\n if isinstance(model, nn.DataParallel):\n model.module.load_state_dict(state)\n else:\n model.load_state_dict(state)\n return model","function_tokens":["def","load_model","(","model",",","model_path",")",":","if","isinstance","(","model_path",",","Path",")",":","model_path","=","str","(","model_path",")","logging",".","info","(","f\"loading model from {str(model_path)} .\"",")","states","=","torch",".","load","(","model_path",")","state","=","states","[","'state_dict'","]","if","isinstance","(","model",",","nn",".","DataParallel",")",":","model",".","module",".","load_state_dict","(","state",")","else",":","model",".","load_state_dict","(","state",")","return","model"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L231-L249"} {"nwo":"CLUEbenchmark\/CLUE","sha":"5bd39732734afecb490cf18a5212e692dbf2c007","path":"baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py","language":"python","identifier":"summary","parameters":"(model, *inputs, batch_size=-1, show_input=True)","argument_list":"","return_statement":"","docstring":"\u6253\u5370\u6a21\u578b\u7ed3\u6784\u4fe1\u606f\n :param model:\n :param inputs:\n :param batch_size:\n :param show_input:\n :return:\n Example:\n >>> print(\"model summary info: \")\n >>> for step,batch in enumerate(train_data):\n >>> summary(self.model,*batch,show_input=True)\n >>> break","docstring_summary":"\u6253\u5370\u6a21\u578b\u7ed3\u6784\u4fe1\u606f\n :param model:\n :param inputs:\n :param batch_size:\n :param show_input:\n :return:\n Example:\n >>> print(\"model summary info: \")\n >>> for step,batch in enumerate(train_data):\n >>> summary(self.model,*batch,show_input=True)\n >>> break","docstring_tokens":["\u6253\u5370\u6a21\u578b\u7ed3\u6784\u4fe1\u606f",":","param","model",":",":","param","inputs",":",":","param","batch_size",":",":","param","show_input",":",":","return",":","Example",":",">>>","print","(","model","summary","info",":",")",">>>","for","step","batch","in","enumerate","(","train_data",")",":",">>>","summary","(","self",".","model","*","batch","show_input","=","True",")",">>>","break"],"function":"def summary(model, *inputs, batch_size=-1, show_input=True):\n '''\n \u6253\u5370\u6a21\u578b\u7ed3\u6784\u4fe1\u606f\n :param model:\n :param inputs:\n :param batch_size:\n :param show_input:\n :return:\n Example:\n >>> print(\"model summary info: \")\n >>> for step,batch in enumerate(train_data):\n >>> summary(self.model,*batch,show_input=True)\n >>> break\n '''\n\n def register_hook(module):\n def hook(module, input, output=None):\n class_name = str(module.__class__).split(\".\")[-1].split(\"'\")[0]\n module_idx = len(summary)\n\n m_key = f\"{class_name}-{module_idx + 1}\"\n summary[m_key] = OrderedDict()\n summary[m_key][\"input_shape\"] = list(input[0].size())\n summary[m_key][\"input_shape\"][0] = batch_size\n\n if show_input is False and output is not None:\n if isinstance(output, (list, tuple)):\n for out in output:\n if isinstance(out, torch.Tensor):\n summary[m_key][\"output_shape\"] = [\n [-1] + list(out.size())[1:]\n ][0]\n else:\n summary[m_key][\"output_shape\"] = [\n [-1] + list(out[0].size())[1:]\n ][0]\n else:\n summary[m_key][\"output_shape\"] = list(output.size())\n summary[m_key][\"output_shape\"][0] = batch_size\n\n params = 0\n if hasattr(module, \"weight\") and hasattr(module.weight, \"size\"):\n params += torch.prod(torch.LongTensor(list(module.weight.size())))\n summary[m_key][\"trainable\"] = module.weight.requires_grad\n if hasattr(module, \"bias\") and hasattr(module.bias, \"size\"):\n params += torch.prod(torch.LongTensor(list(module.bias.size())))\n summary[m_key][\"nb_params\"] = params\n\n if (not isinstance(module, nn.Sequential) and not isinstance(module, nn.ModuleList) and not (module == model)):\n if show_input is True:\n hooks.append(module.register_forward_pre_hook(hook))\n else:\n hooks.append(module.register_forward_hook(hook))\n\n # create properties\n summary = OrderedDict()\n hooks = []\n\n # register hook\n model.apply(register_hook)\n model(*inputs)\n\n # remove these hooks\n for h in hooks:\n h.remove()\n\n print(\"-----------------------------------------------------------------------\")\n if show_input is True:\n line_new = f\"{'Layer (type)':>25} {'Input Shape':>25} {'Param #':>15}\"\n else:\n line_new = f\"{'Layer (type)':>25} {'Output Shape':>25} {'Param #':>15}\"\n print(line_new)\n print(\"=======================================================================\")\n\n total_params = 0\n total_output = 0\n trainable_params = 0\n for layer in summary:\n # input_shape, output_shape, trainable, nb_params\n if show_input is True:\n line_new = \"{:>25} {:>25} {:>15}\".format(\n layer,\n str(summary[layer][\"input_shape\"]),\n \"{0:,}\".format(summary[layer][\"nb_params\"]),\n )\n else:\n line_new = \"{:>25} {:>25} {:>15}\".format(\n layer,\n str(summary[layer][\"output_shape\"]),\n \"{0:,}\".format(summary[layer][\"nb_params\"]),\n )\n\n total_params += summary[layer][\"nb_params\"]\n if show_input is True:\n total_output += np.prod(summary[layer][\"input_shape\"])\n else:\n total_output += np.prod(summary[layer][\"output_shape\"])\n if \"trainable\" in summary[layer]:\n if summary[layer][\"trainable\"] == True:\n trainable_params += summary[layer][\"nb_params\"]\n\n print(line_new)\n\n print(\"=======================================================================\")\n print(f\"Total params: {total_params:0,}\")\n print(f\"Trainable params: {trainable_params:0,}\")\n print(f\"Non-trainable params: {(total_params - trainable_params):0,}\")\n print(\"-----------------------------------------------------------------------\")","function_tokens":["def","summary","(","model",",","*","inputs",",","batch_size","=","-","1",",","show_input","=","True",")",":","def","register_hook","(","module",")",":","def","hook","(","module",",","input",",","output","=","None",")",":","class_name","=","str","(","module",".","__class__",")",".","split","(","\".\"",")","[","-","1","]",".","split","(","\"'\"",")","[","0","]","module_idx","=","len","(","summary",")","m_key","=","f\"{class_name}-{module_idx + 1}\"","summary","[","m_key","]","=","OrderedDict","(",")","summary","[","m_key","]","[","\"input_shape\"","]","=","list","(","input","[","0","]",".","size","(",")",")","summary","[","m_key","]","[","\"input_shape\"","]","[","0","]","=","batch_size","if","show_input","is","False","and","output","is","not","None",":","if","isinstance","(","output",",","(","list",",","tuple",")",")",":","for","out","in","output",":","if","isinstance","(","out",",","torch",".","Tensor",")",":","summary","[","m_key","]","[","\"output_shape\"","]","=","[","[","-","1","]","+","list","(","out",".","size","(",")",")","[","1",":","]","]","[","0","]","else",":","summary","[","m_key","]","[","\"output_shape\"","]","=","[","[","-","1","]","+","list","(","out","[","0","]",".","size","(",")",")","[","1",":","]","]","[","0","]","else",":","summary","[","m_key","]","[","\"output_shape\"","]","=","list","(","output",".","size","(",")",")","summary","[","m_key","]","[","\"output_shape\"","]","[","0","]","=","batch_size","params","=","0","if","hasattr","(","module",",","\"weight\"",")","and","hasattr","(","module",".","weight",",","\"size\"",")",":","params","+=","torch",".","prod","(","torch",".","LongTensor","(","list","(","module",".","weight",".","size","(",")",")",")",")","summary","[","m_key","]","[","\"trainable\"","]","=","module",".","weight",".","requires_grad","if","hasattr","(","module",",","\"bias\"",")","and","hasattr","(","module",".","bias",",","\"size\"",")",":","params","+=","torch",".","prod","(","torch",".","LongTensor","(","list","(","module",".","bias",".","size","(",")",")",")",")","summary","[","m_key","]","[","\"nb_params\"","]","=","params","if","(","not","isinstance","(","module",",","nn",".","Sequential",")","and","not","isinstance","(","module",",","nn",".","ModuleList",")","and","not","(","module","==","model",")",")",":","if","show_input","is","True",":","hooks",".","append","(","module",".","register_forward_pre_hook","(","hook",")",")","else",":","hooks",".","append","(","module",".","register_forward_hook","(","hook",")",")","# create properties","summary","=","OrderedDict","(",")","hooks","=","[","]","# register hook","model",".","apply","(","register_hook",")","model","(","*","inputs",")","# remove these hooks","for","h","in","hooks",":","h",".","remove","(",")","print","(","\"-----------------------------------------------------------------------\"",")","if","show_input","is","True",":","line_new","=","f\"{'Layer (type)':>25} {'Input Shape':>25} {'Param #':>15}\"","else",":","line_new","=","f\"{'Layer (type)':>25} {'Output Shape':>25} {'Param #':>15}\"","print","(","line_new",")","print","(","\"=======================================================================\"",")","total_params","=","0","total_output","=","0","trainable_params","=","0","for","layer","in","summary",":","# input_shape, output_shape, trainable, nb_params","if","show_input","is","True",":","line_new","=","\"{:>25} {:>25} {:>15}\"",".","format","(","layer",",","str","(","summary","[","layer","]","[","\"input_shape\"","]",")",",","\"{0:,}\"",".","format","(","summary","[","layer","]","[","\"nb_params\"","]",")",",",")","else",":","line_new","=","\"{:>25} {:>25} {:>15}\"",".","format","(","layer",",","str","(","summary","[","layer","]","[","\"output_shape\"","]",")",",","\"{0:,}\"",".","format","(","summary","[","layer","]","[","\"nb_params\"","]",")",",",")","total_params","+=","summary","[","layer","]","[","\"nb_params\"","]","if","show_input","is","True",":","total_output","+=","np",".","prod","(","summary","[","layer","]","[","\"input_shape\"","]",")","else",":","total_output","+=","np",".","prod","(","summary","[","layer","]","[","\"output_shape\"","]",")","if","\"trainable\"","in","summary","[","layer","]",":","if","summary","[","layer","]","[","\"trainable\"","]","==","True",":","trainable_params","+=","summary","[","layer","]","[","\"nb_params\"","]","print","(","line_new",")","print","(","\"=======================================================================\"",")","print","(","f\"Total params: {total_params:0,}\"",")","print","(","f\"Trainable params: {trainable_params:0,}\"",")","print","(","f\"Non-trainable params: {(total_params - trainable_params):0,}\"",")","print","(","\"-----------------------------------------------------------------------\"",")"],"url":"https:\/\/github.com\/CLUEbenchmark\/CLUE\/blob\/5bd39732734afecb490cf18a5212e692dbf2c007\/baselines\/models_pytorch\/classifier_pytorch\/tools\/common.py#L280-L387"}