{"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/build_tfrecord.py","language":"python","identifier":"_int64_feature","parameters":"(value)","argument_list":"","return_statement":"return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))","docstring":"Wrapper for inserting an int64 Feature into a SequenceExample proto.","docstring_summary":"Wrapper for inserting an int64 Feature into a SequenceExample proto.","docstring_tokens":["Wrapper","for","inserting","an","int64","Feature","into","a","SequenceExample","proto","."],"function":"def _int64_feature(value):\n \"\"\"Wrapper for inserting an int64 Feature into a SequenceExample proto.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))","function_tokens":["def","_int64_feature","(","value",")",":","return","tf",".","train",".","Feature","(","int64_list","=","tf",".","train",".","Int64List","(","value","=","[","value","]",")",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/build_tfrecord.py#L115-L117"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/build_tfrecord.py","language":"python","identifier":"_bytes_feature","parameters":"(value)","argument_list":"","return_statement":"return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))","docstring":"Wrapper for inserting a bytes Feature into a SequenceExample proto.","docstring_summary":"Wrapper for inserting a bytes Feature into a SequenceExample proto.","docstring_tokens":["Wrapper","for","inserting","a","bytes","Feature","into","a","SequenceExample","proto","."],"function":"def _bytes_feature(value):\n \"\"\"Wrapper for inserting a bytes Feature into a SequenceExample proto.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))","function_tokens":["def","_bytes_feature","(","value",")",":","return","tf",".","train",".","Feature","(","bytes_list","=","tf",".","train",".","BytesList","(","value","=","[","str","(","value",")","]",")",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/build_tfrecord.py#L120-L122"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/build_tfrecord.py","language":"python","identifier":"_int64_feature_list","parameters":"(values)","argument_list":"","return_statement":"return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])","docstring":"Wrapper for inserting an int64 FeatureList into a SequenceExample proto.","docstring_summary":"Wrapper for inserting an int64 FeatureList into a SequenceExample proto.","docstring_tokens":["Wrapper","for","inserting","an","int64","FeatureList","into","a","SequenceExample","proto","."],"function":"def _int64_feature_list(values):\n \"\"\"Wrapper for inserting an int64 FeatureList into a SequenceExample proto.\"\"\"\n return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])","function_tokens":["def","_int64_feature_list","(","values",")",":","return","tf",".","train",".","FeatureList","(","feature","=","[","_int64_feature","(","v",")","for","v","in","values","]",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/build_tfrecord.py#L125-L127"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/build_tfrecord.py","language":"python","identifier":"_bytes_feature_list","parameters":"(values)","argument_list":"","return_statement":"return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])","docstring":"Wrapper for inserting a bytes FeatureList into a SequenceExample proto.","docstring_summary":"Wrapper for inserting a bytes FeatureList into a SequenceExample proto.","docstring_tokens":["Wrapper","for","inserting","a","bytes","FeatureList","into","a","SequenceExample","proto","."],"function":"def _bytes_feature_list(values):\n \"\"\"Wrapper for inserting a bytes FeatureList into a SequenceExample proto.\"\"\"\n return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])","function_tokens":["def","_bytes_feature_list","(","values",")",":","return","tf",".","train",".","FeatureList","(","feature","=","[","_bytes_feature","(","v",")","for","v","in","values","]",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/build_tfrecord.py#L130-L132"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/build_tfrecord.py","language":"python","identifier":"_to_sequence_example","parameters":"(image, decoder, vocab)","argument_list":"","return_statement":"return sequence_example","docstring":"Builds a SequenceExample proto for an image-caption pair.\n Args:\n image: An ImageMetadata object.\n decoder: An ImageDecoder object.\n vocab: A Vocabulary object.\n Returns:\n A SequenceExample proto.","docstring_summary":"Builds a SequenceExample proto for an image-caption pair.\n Args:\n image: An ImageMetadata object.\n decoder: An ImageDecoder object.\n vocab: A Vocabulary object.\n Returns:\n A SequenceExample proto.","docstring_tokens":["Builds","a","SequenceExample","proto","for","an","image","-","caption","pair",".","Args",":","image",":","An","ImageMetadata","object",".","decoder",":","An","ImageDecoder","object",".","vocab",":","A","Vocabulary","object",".","Returns",":","A","SequenceExample","proto","."],"function":"def _to_sequence_example(image, decoder, vocab):\n \"\"\"Builds a SequenceExample proto for an image-caption pair.\n Args:\n image: An ImageMetadata object.\n decoder: An ImageDecoder object.\n vocab: A Vocabulary object.\n Returns:\n A SequenceExample proto.\n \"\"\"\n with tf.gfile.FastGFile(image.filename, \"r\") as f:\n encoded_image = f.read()\n\n try:\n decoder.decode_jpeg(encoded_image)\n except (tf.errors.InvalidArgumentError, AssertionError):\n print(\"Skipping file with invalid JPEG data: %s\" % image.filename)\n return\n context = tf.train.Features(feature={\n \"image\/id\": _int64_feature(image.id),\n \"image\/data\": _bytes_feature(encoded_image),\n })\n\n assert len(image.captions) == 1\n caption = image.captions[0]\n caption_ids = [vocab.word_to_id(word) for word in caption]\n feature_lists = tf.train.FeatureLists(feature_list={\n \"image\/caption\": _bytes_feature_list(caption),\n \"image\/caption_ids\": _int64_feature_list(caption_ids)\n })\n sequence_example = tf.train.SequenceExample(\n context=context, feature_lists=feature_lists)\n\n return sequence_example","function_tokens":["def","_to_sequence_example","(","image",",","decoder",",","vocab",")",":","with","tf",".","gfile",".","FastGFile","(","image",".","filename",",","\"r\"",")","as","f",":","encoded_image","=","f",".","read","(",")","try",":","decoder",".","decode_jpeg","(","encoded_image",")","except","(","tf",".","errors",".","InvalidArgumentError",",","AssertionError",")",":","print","(","\"Skipping file with invalid JPEG data: %s\"","%","image",".","filename",")","return","context","=","tf",".","train",".","Features","(","feature","=","{","\"image\/id\"",":","_int64_feature","(","image",".","id",")",",","\"image\/data\"",":","_bytes_feature","(","encoded_image",")",",","}",")","assert","len","(","image",".","captions",")","==","1","caption","=","image",".","captions","[","0","]","caption_ids","=","[","vocab",".","word_to_id","(","word",")","for","word","in","caption","]","feature_lists","=","tf",".","train",".","FeatureLists","(","feature_list","=","{","\"image\/caption\"",":","_bytes_feature_list","(","caption",")",",","\"image\/caption_ids\"",":","_int64_feature_list","(","caption_ids",")","}",")","sequence_example","=","tf",".","train",".","SequenceExample","(","context","=","context",",","feature_lists","=","feature_lists",")","return","sequence_example"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/build_tfrecord.py#L135-L167"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/build_tfrecord.py","language":"python","identifier":"_process_image_files","parameters":"(thread_index, ranges, name, images, decoder, vocab,\n num_shards)","argument_list":"","return_statement":"","docstring":"Processes and saves a subset of images as TFRecord files in one thread.\n Args:\n thread_index: Integer thread identifier within [0, len(ranges)].\n ranges: A list of pairs of integers specifying the ranges of the dataset to\n process in parallel.\n name: Unique identifier specifying the dataset.\n images: List of ImageMetadata.\n decoder: An ImageDecoder object.\n vocab: A Vocabulary object.\n num_shards: Integer number of shards for the output files.","docstring_summary":"Processes and saves a subset of images as TFRecord files in one thread.\n Args:\n thread_index: Integer thread identifier within [0, len(ranges)].\n ranges: A list of pairs of integers specifying the ranges of the dataset to\n process in parallel.\n name: Unique identifier specifying the dataset.\n images: List of ImageMetadata.\n decoder: An ImageDecoder object.\n vocab: A Vocabulary object.\n num_shards: Integer number of shards for the output files.","docstring_tokens":["Processes","and","saves","a","subset","of","images","as","TFRecord","files","in","one","thread",".","Args",":","thread_index",":","Integer","thread","identifier","within","[","0","len","(","ranges",")","]",".","ranges",":","A","list","of","pairs","of","integers","specifying","the","ranges","of","the","dataset","to","process","in","parallel",".","name",":","Unique","identifier","specifying","the","dataset",".","images",":","List","of","ImageMetadata",".","decoder",":","An","ImageDecoder","object",".","vocab",":","A","Vocabulary","object",".","num_shards",":","Integer","number","of","shards","for","the","output","files","."],"function":"def _process_image_files(thread_index, ranges, name, images, decoder, vocab,\n num_shards):\n \"\"\"Processes and saves a subset of images as TFRecord files in one thread.\n Args:\n thread_index: Integer thread identifier within [0, len(ranges)].\n ranges: A list of pairs of integers specifying the ranges of the dataset to\n process in parallel.\n name: Unique identifier specifying the dataset.\n images: List of ImageMetadata.\n decoder: An ImageDecoder object.\n vocab: A Vocabulary object.\n num_shards: Integer number of shards for the output files.\n \"\"\"\n # Each thread produces N shards where N = num_shards \/ num_threads. For\n # instance, if num_shards = 128, and num_threads = 2, then the first thread\n # would produce shards [0, 64).\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards \/ num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n for s in range(num_shards_per_batch):\n # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'\n shard = thread_index * num_shards_per_batch + s\n output_filename = \"%s-%.5d-of-%.5d\" % (name, shard, num_shards)\n output_file = os.path.join(FLAGS.output_dir, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in images_in_shard:\n image = images[i]\n\n sequence_example = _to_sequence_example(image, decoder, vocab)\n if sequence_example is not None:\n writer.write(sequence_example.SerializeToString())\n shard_counter += 1\n counter += 1\n\n if not counter % 1000:\n print(\"%s [thread %d]: Processed %d of %d items in thread batch.\" %\n (datetime.now(), thread_index, counter, num_images_in_thread))\n sys.stdout.flush()\n\n writer.close()\n print(\"%s [thread %d]: Wrote %d image-caption pairs to %s\" %\n (datetime.now(), thread_index, shard_counter, output_file))\n sys.stdout.flush()\n shard_counter = 0\n print(\"%s [thread %d]: Wrote %d image-caption pairs to %d shards.\" %\n (datetime.now(), thread_index, counter, num_shards_per_batch))\n sys.stdout.flush()","function_tokens":["def","_process_image_files","(","thread_index",",","ranges",",","name",",","images",",","decoder",",","vocab",",","num_shards",")",":","# Each thread produces N shards where N = num_shards \/ num_threads. For","# instance, if num_shards = 128, and num_threads = 2, then the first thread","# would produce shards [0, 64).","num_threads","=","len","(","ranges",")","assert","not","num_shards","%","num_threads","num_shards_per_batch","=","int","(","num_shards","\/","num_threads",")","shard_ranges","=","np",".","linspace","(","ranges","[","thread_index","]","[","0","]",",","ranges","[","thread_index","]","[","1","]",",","num_shards_per_batch","+","1",")",".","astype","(","int",")","num_images_in_thread","=","ranges","[","thread_index","]","[","1","]","-","ranges","[","thread_index","]","[","0","]","counter","=","0","for","s","in","range","(","num_shards_per_batch",")",":","# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'","shard","=","thread_index","*","num_shards_per_batch","+","s","output_filename","=","\"%s-%.5d-of-%.5d\"","%","(","name",",","shard",",","num_shards",")","output_file","=","os",".","path",".","join","(","FLAGS",".","output_dir",",","output_filename",")","writer","=","tf",".","python_io",".","TFRecordWriter","(","output_file",")","shard_counter","=","0","images_in_shard","=","np",".","arange","(","shard_ranges","[","s","]",",","shard_ranges","[","s","+","1","]",",","dtype","=","int",")","for","i","in","images_in_shard",":","image","=","images","[","i","]","sequence_example","=","_to_sequence_example","(","image",",","decoder",",","vocab",")","if","sequence_example","is","not","None",":","writer",".","write","(","sequence_example",".","SerializeToString","(",")",")","shard_counter","+=","1","counter","+=","1","if","not","counter","%","1000",":","print","(","\"%s [thread %d]: Processed %d of %d items in thread batch.\"","%","(","datetime",".","now","(",")",",","thread_index",",","counter",",","num_images_in_thread",")",")","sys",".","stdout",".","flush","(",")","writer",".","close","(",")","print","(","\"%s [thread %d]: Wrote %d image-caption pairs to %s\"","%","(","datetime",".","now","(",")",",","thread_index",",","shard_counter",",","output_file",")",")","sys",".","stdout",".","flush","(",")","shard_counter","=","0","print","(","\"%s [thread %d]: Wrote %d image-caption pairs to %d shards.\"","%","(","datetime",".","now","(",")",",","thread_index",",","counter",",","num_shards_per_batch",")",")","sys",".","stdout",".","flush","(",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/build_tfrecord.py#L170-L225"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/build_tfrecord.py","language":"python","identifier":"_process_dataset","parameters":"(name, images, vocab, num_shards)","argument_list":"","return_statement":"","docstring":"Processes a complete data set and saves it as a TFRecord.\n Args:\n name: Unique identifier specifying the dataset.\n images: List of ImageMetadata.\n vocab: A Vocabulary object.\n num_shards: Integer number of shards for the output files.","docstring_summary":"Processes a complete data set and saves it as a TFRecord.\n Args:\n name: Unique identifier specifying the dataset.\n images: List of ImageMetadata.\n vocab: A Vocabulary object.\n num_shards: Integer number of shards for the output files.","docstring_tokens":["Processes","a","complete","data","set","and","saves","it","as","a","TFRecord",".","Args",":","name",":","Unique","identifier","specifying","the","dataset",".","images",":","List","of","ImageMetadata",".","vocab",":","A","Vocabulary","object",".","num_shards",":","Integer","number","of","shards","for","the","output","files","."],"function":"def _process_dataset(name, images, vocab, num_shards):\n \"\"\"Processes a complete data set and saves it as a TFRecord.\n Args:\n name: Unique identifier specifying the dataset.\n images: List of ImageMetadata.\n vocab: A Vocabulary object.\n num_shards: Integer number of shards for the output files.\n \"\"\"\n # Break up each image into a separate entity for each caption.\n images = [ImageMetadata(image.id, image.filename, [caption])\n for image in images for caption in image.captions]\n\n # Shuffle the ordering of images. Make the randomization repeatable.\n random.seed(12345)\n random.shuffle(images)\n\n # Break the images into num_threads batches. Batch i is defined as\n # images[ranges[i][0]:ranges[i][1]].\n num_threads = min(num_shards, FLAGS.num_threads)\n spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)\n ranges = []\n threads = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n # Create a utility for decoding JPEG images to run sanity checks.\n decoder = ImageDecoder()\n\n # Launch a thread for each batch.\n print(\"Launching %d threads for spacings: %s\" % (num_threads, ranges))\n for thread_index in range(len(ranges)):\n args = (thread_index, ranges, name, images, decoder, vocab, num_shards)\n t = threading.Thread(target=_process_image_files, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print(\"%s: Finished processing all %d image-caption pairs in data set '%s'.\" %\n (datetime.now(), len(images), name))","function_tokens":["def","_process_dataset","(","name",",","images",",","vocab",",","num_shards",")",":","# Break up each image into a separate entity for each caption.","images","=","[","ImageMetadata","(","image",".","id",",","image",".","filename",",","[","caption","]",")","for","image","in","images","for","caption","in","image",".","captions","]","# Shuffle the ordering of images. Make the randomization repeatable.","random",".","seed","(","12345",")","random",".","shuffle","(","images",")","# Break the images into num_threads batches. Batch i is defined as","# images[ranges[i][0]:ranges[i][1]].","num_threads","=","min","(","num_shards",",","FLAGS",".","num_threads",")","spacing","=","np",".","linspace","(","0",",","len","(","images",")",",","num_threads","+","1",")",".","astype","(","np",".","int",")","ranges","=","[","]","threads","=","[","]","for","i","in","range","(","len","(","spacing",")","-","1",")",":","ranges",".","append","(","[","spacing","[","i","]",",","spacing","[","i","+","1","]","]",")","# Create a mechanism for monitoring when all threads are finished.","coord","=","tf",".","train",".","Coordinator","(",")","# Create a utility for decoding JPEG images to run sanity checks.","decoder","=","ImageDecoder","(",")","# Launch a thread for each batch.","print","(","\"Launching %d threads for spacings: %s\"","%","(","num_threads",",","ranges",")",")","for","thread_index","in","range","(","len","(","ranges",")",")",":","args","=","(","thread_index",",","ranges",",","name",",","images",",","decoder",",","vocab",",","num_shards",")","t","=","threading",".","Thread","(","target","=","_process_image_files",",","args","=","args",")","t",".","start","(",")","threads",".","append","(","t",")","# Wait for all the threads to terminate.","coord",".","join","(","threads",")","print","(","\"%s: Finished processing all %d image-caption pairs in data set '%s'.\"","%","(","datetime",".","now","(",")",",","len","(","images",")",",","name",")",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/build_tfrecord.py#L228-L270"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/build_tfrecord.py","language":"python","identifier":"_create_vocab","parameters":"(captions)","argument_list":"","return_statement":"return vocab","docstring":"Creates the vocabulary of word to word_id.\n The vocabulary is saved to disk in a text file of word counts. The id of each\n word in the file is its corresponding 0-based line number.\n Args:\n captions: A list of lists of strings.\n Returns:\n A Vocabulary object.","docstring_summary":"Creates the vocabulary of word to word_id.\n The vocabulary is saved to disk in a text file of word counts. The id of each\n word in the file is its corresponding 0-based line number.\n Args:\n captions: A list of lists of strings.\n Returns:\n A Vocabulary object.","docstring_tokens":["Creates","the","vocabulary","of","word","to","word_id",".","The","vocabulary","is","saved","to","disk","in","a","text","file","of","word","counts",".","The","id","of","each","word","in","the","file","is","its","corresponding","0","-","based","line","number",".","Args",":","captions",":","A","list","of","lists","of","strings",".","Returns",":","A","Vocabulary","object","."],"function":"def _create_vocab(captions):\n \"\"\"Creates the vocabulary of word to word_id.\n The vocabulary is saved to disk in a text file of word counts. The id of each\n word in the file is its corresponding 0-based line number.\n Args:\n captions: A list of lists of strings.\n Returns:\n A Vocabulary object.\n \"\"\"\n print(\"Creating vocabulary.\")\n counter = Counter()\n for c in captions:\n counter.update(c)\n print(\"Total words:\", len(counter))\n\n # Filter uncommon words and sort by descending count.\n word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]\n word_counts.sort(key=lambda x: x[1], reverse=True)\n print(\"Words in vocabulary:\", len(word_counts))\n\n # Write out the word counts file.\n with tf.gfile.FastGFile(FLAGS.word_counts_output_file, \"w\") as f:\n f.write(\"\\n\".join([\"%s %d\" % (w, c) for w, c in word_counts]))\n print(\"Wrote vocabulary file:\", FLAGS.word_counts_output_file)\n\n # Create the vocabulary dictionary.\n reverse_vocab = [x[0] for x in word_counts]\n unk_id = len(reverse_vocab)\n vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])\n vocab = Vocabulary(vocab_dict, unk_id)\n\n return vocab","function_tokens":["def","_create_vocab","(","captions",")",":","print","(","\"Creating vocabulary.\"",")","counter","=","Counter","(",")","for","c","in","captions",":","counter",".","update","(","c",")","print","(","\"Total words:\"",",","len","(","counter",")",")","# Filter uncommon words and sort by descending count.","word_counts","=","[","x","for","x","in","counter",".","items","(",")","if","x","[","1","]",">=","FLAGS",".","min_word_count","]","word_counts",".","sort","(","key","=","lambda","x",":","x","[","1","]",",","reverse","=","True",")","print","(","\"Words in vocabulary:\"",",","len","(","word_counts",")",")","# Write out the word counts file.","with","tf",".","gfile",".","FastGFile","(","FLAGS",".","word_counts_output_file",",","\"w\"",")","as","f",":","f",".","write","(","\"\\n\"",".","join","(","[","\"%s %d\"","%","(","w",",","c",")","for","w",",","c","in","word_counts","]",")",")","print","(","\"Wrote vocabulary file:\"",",","FLAGS",".","word_counts_output_file",")","# Create the vocabulary dictionary.","reverse_vocab","=","[","x","[","0","]","for","x","in","word_counts","]","unk_id","=","len","(","reverse_vocab",")","vocab_dict","=","dict","(","[","(","x",",","y",")","for","(","y",",","x",")","in","enumerate","(","reverse_vocab",")","]",")","vocab","=","Vocabulary","(","vocab_dict",",","unk_id",")","return","vocab"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/build_tfrecord.py#L273-L304"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/build_tfrecord.py","language":"python","identifier":"_process_caption_jieba","parameters":"(caption)","argument_list":"","return_statement":"return tokenized_caption","docstring":"Processes a Chinese caption string into a list of tonenized words.\n Args:\n caption: A string caption.\n Returns:\n A list of strings; the tokenized caption.","docstring_summary":"Processes a Chinese caption string into a list of tonenized words.\n Args:\n caption: A string caption.\n Returns:\n A list of strings; the tokenized caption.","docstring_tokens":["Processes","a","Chinese","caption","string","into","a","list","of","tonenized","words",".","Args",":","caption",":","A","string","caption",".","Returns",":","A","list","of","strings",";","the","tokenized","caption","."],"function":"def _process_caption_jieba(caption):\n \"\"\"Processes a Chinese caption string into a list of tonenized words.\n Args:\n caption: A string caption.\n Returns:\n A list of strings; the tokenized caption.\n \"\"\"\n tokenized_caption = [FLAGS.start_word]\n tokenized_caption.extend(jieba.cut(caption, cut_all=False))\n tokenized_caption.append(FLAGS.end_word)\n return tokenized_caption","function_tokens":["def","_process_caption_jieba","(","caption",")",":","tokenized_caption","=","[","FLAGS",".","start_word","]","tokenized_caption",".","extend","(","jieba",".","cut","(","caption",",","cut_all","=","False",")",")","tokenized_caption",".","append","(","FLAGS",".","end_word",")","return","tokenized_caption"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/build_tfrecord.py#L307-L317"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/build_tfrecord.py","language":"python","identifier":"_load_and_process_metadata","parameters":"(captions_file, image_dir)","argument_list":"","return_statement":"return image_metadata","docstring":"Loads image metadata from a JSON file and processes the captions.\n Args:\n captions_file: Json file containing caption annotations.\n image_dir: Directory containing the image files.\n Returns:\n A list of ImageMetadata.","docstring_summary":"Loads image metadata from a JSON file and processes the captions.\n Args:\n captions_file: Json file containing caption annotations.\n image_dir: Directory containing the image files.\n Returns:\n A list of ImageMetadata.","docstring_tokens":["Loads","image","metadata","from","a","JSON","file","and","processes","the","captions",".","Args",":","captions_file",":","Json","file","containing","caption","annotations",".","image_dir",":","Directory","containing","the","image","files",".","Returns",":","A","list","of","ImageMetadata","."],"function":"def _load_and_process_metadata(captions_file, image_dir):\n \"\"\"Loads image metadata from a JSON file and processes the captions.\n Args:\n captions_file: Json file containing caption annotations.\n image_dir: Directory containing the image files.\n Returns:\n A list of ImageMetadata.\n \"\"\"\n image_id = set([])\n id_to_captions = {}\n with open(captions_file, 'r') as f:\n caption_data = json.load(f)\n for data in caption_data:\n image_name = data['image_id'].split('.')[0]\n descriptions = data['caption']\n if image_name not in image_id:\n id_to_captions.setdefault(image_name, [])\n image_id.add(image_name)\n caption_num = len(descriptions)\n for i in range(caption_num):\n caption_temp = descriptions[i].strip().strip(\"\u3002\").replace('\\n', '')\n if caption_temp != '':\n id_to_captions[image_name].append(caption_temp)\n\n\n print(\"Loaded caption metadata for %d images from %s and image_id num is %s\" %\n (len(id_to_captions), captions_file, len(image_id)))\n # Process the captions and combine the data into a list of ImageMetadata.\n print(\"Proccessing captions.\")\n image_metadata = []\n num_captions = 0\n id = 0\n for base_filename in image_id:\n filename = os.path.join(image_dir, base_filename + '.jpg')\n # captions = [_process_caption(c) for c in id_to_captions[base_filename]]\n captions = [_process_caption_jieba(c) for c in id_to_captions[base_filename]]\n image_metadata.append(ImageMetadata(id, filename, captions))\n id = id + 1\n num_captions += len(captions)\n print(\"Finished processing %d captions for %d images in %s\" %\n (num_captions, len(id_to_captions), captions_file))\n return image_metadata","function_tokens":["def","_load_and_process_metadata","(","captions_file",",","image_dir",")",":","image_id","=","set","(","[","]",")","id_to_captions","=","{","}","with","open","(","captions_file",",","'r'",")","as","f",":","caption_data","=","json",".","load","(","f",")","for","data","in","caption_data",":","image_name","=","data","[","'image_id'","]",".","split","(","'.'",")","[","0","]","descriptions","=","data","[","'caption'","]","if","image_name","not","in","image_id",":","id_to_captions",".","setdefault","(","image_name",",","[","]",")","image_id",".","add","(","image_name",")","caption_num","=","len","(","descriptions",")","for","i","in","range","(","caption_num",")",":","caption_temp","=","descriptions","[","i","]",".","strip","(",")",".","strip","(","\"\u3002\").","r","e","place('","\\","n', ","'",")","","if","caption_temp","!=","''",":","id_to_captions","[","image_name","]",".","append","(","caption_temp",")","print","(","\"Loaded caption metadata for %d images from %s and image_id num is %s\"","%","(","len","(","id_to_captions",")",",","captions_file",",","len","(","image_id",")",")",")","# Process the captions and combine the data into a list of ImageMetadata.","print","(","\"Proccessing captions.\"",")","image_metadata","=","[","]","num_captions","=","0","id","=","0","for","base_filename","in","image_id",":","filename","=","os",".","path",".","join","(","image_dir",",","base_filename","+","'.jpg'",")","# captions = [_process_caption(c) for c in id_to_captions[base_filename]]","captions","=","[","_process_caption_jieba","(","c",")","for","c","in","id_to_captions","[","base_filename","]","]","image_metadata",".","append","(","ImageMetadata","(","id",",","filename",",","captions",")",")","id","=","id","+","1","num_captions","+=","len","(","captions",")","print","(","\"Finished processing %d captions for %d images in %s\"","%","(","num_captions",",","len","(","id_to_captions",")",",","captions_file",")",")","return","image_metadata"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/build_tfrecord.py#L320-L361"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/build_tfrecord.py","language":"python","identifier":"Vocabulary.__init__","parameters":"(self, vocab, unk_id)","argument_list":"","return_statement":"","docstring":"Initializes the vocabulary.\n Args:\n vocab: A dictionary of word to word_id.\n unk_id: Id of the special 'unknown' word.","docstring_summary":"Initializes the vocabulary.\n Args:\n vocab: A dictionary of word to word_id.\n unk_id: Id of the special 'unknown' word.","docstring_tokens":["Initializes","the","vocabulary",".","Args",":","vocab",":","A","dictionary","of","word","to","word_id",".","unk_id",":","Id","of","the","special","unknown","word","."],"function":"def __init__(self, vocab, unk_id):\n \"\"\"Initializes the vocabulary.\n Args:\n vocab: A dictionary of word to word_id.\n unk_id: Id of the special 'unknown' word.\n \"\"\"\n self._vocab = vocab\n self._unk_id = unk_id","function_tokens":["def","__init__","(","self",",","vocab",",","unk_id",")",":","self",".","_vocab","=","vocab","self",".","_unk_id","=","unk_id"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/build_tfrecord.py#L79-L86"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/build_tfrecord.py","language":"python","identifier":"Vocabulary.word_to_id","parameters":"(self, word)","argument_list":"","return_statement":"","docstring":"Returns the integer id of a word string.","docstring_summary":"Returns the integer id of a word string.","docstring_tokens":["Returns","the","integer","id","of","a","word","string","."],"function":"def word_to_id(self, word):\n \"\"\"Returns the integer id of a word string.\"\"\"\n if word in self._vocab:\n return self._vocab[word]\n else:\n return self._unk_id","function_tokens":["def","word_to_id","(","self",",","word",")",":","if","word","in","self",".","_vocab",":","return","self",".","_vocab","[","word","]","else",":","return","self",".","_unk_id"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/build_tfrecord.py#L88-L93"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/configuration.py","language":"python","identifier":"ModelConfig.__init__","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Sets the default model hyperparameters.","docstring_summary":"Sets the default model hyperparameters.","docstring_tokens":["Sets","the","default","model","hyperparameters","."],"function":"def __init__(self):\n \"\"\"Sets the default model hyperparameters.\"\"\"\n # File pattern of sharded TFRecord file containing SequenceExample protos.\n # Must be provided in training and evaluation modes.\n self.input_file_pattern = None\n\n # Image format (\"jpeg\" or \"png\").\n self.image_format = \"jpeg\"\n\n # Approximate number of values per input shard. Used to ensure sufficient\n # mixing between shards in training.\n self.values_per_input_shard = 2300\n # Minimum number of shards to keep in the input queue.\n self.input_queue_capacity_factor = 2\n # Number of threads for prefetching SequenceExample protos.\n self.num_input_reader_threads = 1\n\n # Name of the SequenceExample context feature containing image data.\n self.image_feature_name = \"image\/data\"\n # Name of the SequenceExample feature list containing integer captions.\n self.caption_feature_name = \"image\/caption_ids\"\n\n # Number of unique words in the vocab (plus 1, for ).\n # The default value is larger than the expected actual vocab size to allow\n # for differences between tokenizer versions used in preprocessing. There is\n # no harm in using a value greater than the actual vocab size, but using a\n # value less than the actual vocab size will result in an error.\n self.vocab_size = 20000\n\n # Number of threads for image preprocessing. Should be a multiple of 2.\n self.num_preprocess_threads = 4\n\n # Batch size.\n self.batch_size = 32\n\n # File containing an Inception v3 checkpoint to initialize the variables\n # of the Inception model. Must be provided when starting training for the\n # first time.\n self.inception_checkpoint_file = None\n\n # Dimensions of Inception v3 input images.\n self.image_height = 299\n self.image_width = 299\n\n # Scale used to initialize model variables.\n self.initializer_scale = 0.08\n\n # LSTM input and output dimensionality, respectively.\n self.embedding_size = 512\n self.num_lstm_units = 512\n\n # If < 1.0, the dropout keep probability applied to LSTM variables.\n self.lstm_dropout_keep_prob = 0.7","function_tokens":["def","__init__","(","self",")",":","# File pattern of sharded TFRecord file containing SequenceExample protos.","# Must be provided in training and evaluation modes.","self",".","input_file_pattern","=","None","# Image format (\"jpeg\" or \"png\").","self",".","image_format","=","\"jpeg\"","# Approximate number of values per input shard. Used to ensure sufficient","# mixing between shards in training.","self",".","values_per_input_shard","=","2300","# Minimum number of shards to keep in the input queue.","self",".","input_queue_capacity_factor","=","2","# Number of threads for prefetching SequenceExample protos.","self",".","num_input_reader_threads","=","1","# Name of the SequenceExample context feature containing image data.","self",".","image_feature_name","=","\"image\/data\"","# Name of the SequenceExample feature list containing integer captions.","self",".","caption_feature_name","=","\"image\/caption_ids\"","# Number of unique words in the vocab (plus 1, for ).","# The default value is larger than the expected actual vocab size to allow","# for differences between tokenizer versions used in preprocessing. There is","# no harm in using a value greater than the actual vocab size, but using a","# value less than the actual vocab size will result in an error.","self",".","vocab_size","=","20000","# Number of threads for image preprocessing. Should be a multiple of 2.","self",".","num_preprocess_threads","=","4","# Batch size.","self",".","batch_size","=","32","# File containing an Inception v3 checkpoint to initialize the variables","# of the Inception model. Must be provided when starting training for the","# first time.","self",".","inception_checkpoint_file","=","None","# Dimensions of Inception v3 input images.","self",".","image_height","=","299","self",".","image_width","=","299","# Scale used to initialize model variables.","self",".","initializer_scale","=","0.08","# LSTM input and output dimensionality, respectively.","self",".","embedding_size","=","512","self",".","num_lstm_units","=","512","# If < 1.0, the dropout keep probability applied to LSTM variables.","self",".","lstm_dropout_keep_prob","=","0.7"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/configuration.py#L26-L78"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/configuration.py","language":"python","identifier":"TrainingConfig.__init__","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Sets the default training hyperparameters.","docstring_summary":"Sets the default training hyperparameters.","docstring_tokens":["Sets","the","default","training","hyperparameters","."],"function":"def __init__(self):\n \"\"\"Sets the default training hyperparameters.\"\"\"\n # Number of examples per epoch of training data.\n self.num_examples_per_epoch = 586363\n\n # Optimizer for training the model.\n self.optimizer = \"SGD\"\n\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 2.0\n self.learning_rate_decay_factor = 0.5\n self.num_epochs_per_decay = 8.0\n\n # Learning rate when fine tuning the Inception v3 parameters.\n self.train_inception_learning_rate = 0.0005\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 5","function_tokens":["def","__init__","(","self",")",":","# Number of examples per epoch of training data.","self",".","num_examples_per_epoch","=","586363","# Optimizer for training the model.","self",".","optimizer","=","\"SGD\"","# Learning rate for the initial phase of training.","self",".","initial_learning_rate","=","2.0","self",".","learning_rate_decay_factor","=","0.5","self",".","num_epochs_per_decay","=","8.0","# Learning rate when fine tuning the Inception v3 parameters.","self",".","train_inception_learning_rate","=","0.0005","# If not None, clip gradients to this value.","self",".","clip_gradients","=","5.0","# How many model checkpoints to keep.","self",".","max_checkpoints_to_keep","=","5"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/configuration.py#L84-L104"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py","language":"python","identifier":"ShowAndTellModel.__init__","parameters":"(self, config, mode, train_inception=False)","argument_list":"","return_statement":"","docstring":"Basic setup.\n\n Args:\n config: Object containing configuration parameters.\n mode: \"train\", \"eval\" or \"inference\".\n train_inception: Whether the inception submodel variables are trainable.","docstring_summary":"Basic setup.","docstring_tokens":["Basic","setup","."],"function":"def __init__(self, config, mode, train_inception=False):\n \"\"\"Basic setup.\n\n Args:\n config: Object containing configuration parameters.\n mode: \"train\", \"eval\" or \"inference\".\n train_inception: Whether the inception submodel variables are trainable.\n \"\"\"\n assert mode in [\"train\", \"eval\", \"inference\"]\n self.config = config\n self.mode = mode\n self.train_inception = train_inception\n\n # Reader for the input data.\n self.reader = tf.TFRecordReader()\n\n # To match the \"Show and Tell\" paper we initialize all variables with a\n # random uniform initializer.\n self.initializer = tf.random_uniform_initializer(\n minval=-self.config.initializer_scale,\n maxval=self.config.initializer_scale)\n\n # A float32 Tensor with shape [batch_size, height, width, channels].\n self.images = None\n\n # An int32 Tensor with shape [batch_size, padded_length].\n self.input_seqs = None\n\n # An int32 Tensor with shape [batch_size, padded_length].\n self.target_seqs = None\n\n # An int32 0\/1 Tensor with shape [batch_size, padded_length].\n self.input_mask = None\n\n # A float32 Tensor with shape [batch_size, embedding_size].\n self.image_embeddings = None\n\n # A float32 Tensor with shape [batch_size, padded_length, embedding_size].\n self.seq_embeddings = None\n\n # A float32 scalar Tensor; the total loss for the trainer to optimize.\n self.total_loss = None\n\n # A float32 Tensor with shape [batch_size * padded_length].\n self.target_cross_entropy_losses = None\n\n # A float32 Tensor with shape [batch_size * padded_length].\n self.target_cross_entropy_loss_weights = None\n\n # Collection of variables from the inception submodel.\n self.inception_variables = []\n\n # Function to restore the inception submodel from checkpoint.\n self.init_fn = None\n\n # Global step Tensor.\n self.global_step = None","function_tokens":["def","__init__","(","self",",","config",",","mode",",","train_inception","=","False",")",":","assert","mode","in","[","\"train\"",",","\"eval\"",",","\"inference\"","]","self",".","config","=","config","self",".","mode","=","mode","self",".","train_inception","=","train_inception","# Reader for the input data.","self",".","reader","=","tf",".","TFRecordReader","(",")","# To match the \"Show and Tell\" paper we initialize all variables with a","# random uniform initializer.","self",".","initializer","=","tf",".","random_uniform_initializer","(","minval","=","-","self",".","config",".","initializer_scale",",","maxval","=","self",".","config",".","initializer_scale",")","# A float32 Tensor with shape [batch_size, height, width, channels].","self",".","images","=","None","# An int32 Tensor with shape [batch_size, padded_length].","self",".","input_seqs","=","None","# An int32 Tensor with shape [batch_size, padded_length].","self",".","target_seqs","=","None","# An int32 0\/1 Tensor with shape [batch_size, padded_length].","self",".","input_mask","=","None","# A float32 Tensor with shape [batch_size, embedding_size].","self",".","image_embeddings","=","None","# A float32 Tensor with shape [batch_size, padded_length, embedding_size].","self",".","seq_embeddings","=","None","# A float32 scalar Tensor; the total loss for the trainer to optimize.","self",".","total_loss","=","None","# A float32 Tensor with shape [batch_size * padded_length].","self",".","target_cross_entropy_losses","=","None","# A float32 Tensor with shape [batch_size * padded_length].","self",".","target_cross_entropy_loss_weights","=","None","# Collection of variables from the inception submodel.","self",".","inception_variables","=","[","]","# Function to restore the inception submodel from checkpoint.","self",".","init_fn","=","None","# Global step Tensor.","self",".","global_step","=","None"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py#L41-L97"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py","language":"python","identifier":"ShowAndTellModel.is_training","parameters":"(self)","argument_list":"","return_statement":"return self.mode == \"train\"","docstring":"Returns true if the model is built for training mode.","docstring_summary":"Returns true if the model is built for training mode.","docstring_tokens":["Returns","true","if","the","model","is","built","for","training","mode","."],"function":"def is_training(self):\n \"\"\"Returns true if the model is built for training mode.\"\"\"\n return self.mode == \"train\"","function_tokens":["def","is_training","(","self",")",":","return","self",".","mode","==","\"train\""],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py#L99-L101"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py","language":"python","identifier":"ShowAndTellModel.process_image","parameters":"(self, encoded_image, thread_id=0)","argument_list":"","return_statement":"return image_processing.process_image(encoded_image,\n is_training=self.is_training(),\n height=self.config.image_height,\n width=self.config.image_width,\n thread_id=thread_id,\n image_format=self.config.image_format)","docstring":"Decodes and processes an image string.\n\n Args:\n encoded_image: A scalar string Tensor; the encoded image.\n thread_id: Preprocessing thread id used to select the ordering of color\n distortions.\n\n Returns:\n A float32 Tensor of shape [height, width, 3]; the processed image.","docstring_summary":"Decodes and processes an image string.","docstring_tokens":["Decodes","and","processes","an","image","string","."],"function":"def process_image(self, encoded_image, thread_id=0):\n \"\"\"Decodes and processes an image string.\n\n Args:\n encoded_image: A scalar string Tensor; the encoded image.\n thread_id: Preprocessing thread id used to select the ordering of color\n distortions.\n\n Returns:\n A float32 Tensor of shape [height, width, 3]; the processed image.\n \"\"\"\n return image_processing.process_image(encoded_image,\n is_training=self.is_training(),\n height=self.config.image_height,\n width=self.config.image_width,\n thread_id=thread_id,\n image_format=self.config.image_format)","function_tokens":["def","process_image","(","self",",","encoded_image",",","thread_id","=","0",")",":","return","image_processing",".","process_image","(","encoded_image",",","is_training","=","self",".","is_training","(",")",",","height","=","self",".","config",".","image_height",",","width","=","self",".","config",".","image_width",",","thread_id","=","thread_id",",","image_format","=","self",".","config",".","image_format",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py#L103-L119"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py","language":"python","identifier":"ShowAndTellModel.build_inputs","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Input prefetching, preprocessing and batching.\n\n Outputs:\n self.images\n self.input_seqs\n self.target_seqs (training and eval only)\n self.input_mask (training and eval only)","docstring_summary":"Input prefetching, preprocessing and batching.","docstring_tokens":["Input","prefetching","preprocessing","and","batching","."],"function":"def build_inputs(self):\n \"\"\"Input prefetching, preprocessing and batching.\n\n Outputs:\n self.images\n self.input_seqs\n self.target_seqs (training and eval only)\n self.input_mask (training and eval only)\n \"\"\"\n if self.mode == \"inference\":\n # In inference mode, images and inputs are fed via placeholders.\n image_feed = tf.placeholder(dtype=tf.string, shape=[], name=\"image_feed\")\n input_feed = tf.placeholder(dtype=tf.int64,\n shape=[None], # batch_size\n name=\"input_feed\")\n\n # Process image and insert batch dimensions.\n images = tf.expand_dims(self.process_image(image_feed), 0)\n input_seqs = tf.expand_dims(input_feed, 1)\n\n # No target sequences or input mask in inference mode.\n target_seqs = None\n input_mask = None\n else:\n # Prefetch serialized SequenceExample protos.\n input_queue = input_ops.prefetch_input_data(\n self.reader,\n self.config.input_file_pattern,\n is_training=self.is_training(),\n batch_size=self.config.batch_size,\n values_per_shard=self.config.values_per_input_shard,\n input_queue_capacity_factor=self.config.input_queue_capacity_factor,\n num_reader_threads=self.config.num_input_reader_threads)\n\n # Image processing and random distortion. Split across multiple threads\n # with each thread applying a slightly different distortion.\n assert self.config.num_preprocess_threads % 2 == 0\n images_and_captions = []\n for thread_id in range(self.config.num_preprocess_threads):\n serialized_sequence_example = input_queue.dequeue()\n encoded_image, caption = input_ops.parse_sequence_example(\n serialized_sequence_example,\n image_feature=self.config.image_feature_name,\n caption_feature=self.config.caption_feature_name)\n image = self.process_image(encoded_image, thread_id=thread_id)\n images_and_captions.append([image, caption])\n\n # Batch inputs.\n queue_capacity = (2 * self.config.num_preprocess_threads *\n self.config.batch_size)\n images, input_seqs, target_seqs, input_mask = (\n input_ops.batch_with_dynamic_pad(images_and_captions,\n batch_size=self.config.batch_size,\n queue_capacity=queue_capacity))\n\n self.images = images\n self.input_seqs = input_seqs\n self.target_seqs = target_seqs\n self.input_mask = input_mask","function_tokens":["def","build_inputs","(","self",")",":","if","self",".","mode","==","\"inference\"",":","# In inference mode, images and inputs are fed via placeholders.","image_feed","=","tf",".","placeholder","(","dtype","=","tf",".","string",",","shape","=","[","]",",","name","=","\"image_feed\"",")","input_feed","=","tf",".","placeholder","(","dtype","=","tf",".","int64",",","shape","=","[","None","]",",","# batch_size","name","=","\"input_feed\"",")","# Process image and insert batch dimensions.","images","=","tf",".","expand_dims","(","self",".","process_image","(","image_feed",")",",","0",")","input_seqs","=","tf",".","expand_dims","(","input_feed",",","1",")","# No target sequences or input mask in inference mode.","target_seqs","=","None","input_mask","=","None","else",":","# Prefetch serialized SequenceExample protos.","input_queue","=","input_ops",".","prefetch_input_data","(","self",".","reader",",","self",".","config",".","input_file_pattern",",","is_training","=","self",".","is_training","(",")",",","batch_size","=","self",".","config",".","batch_size",",","values_per_shard","=","self",".","config",".","values_per_input_shard",",","input_queue_capacity_factor","=","self",".","config",".","input_queue_capacity_factor",",","num_reader_threads","=","self",".","config",".","num_input_reader_threads",")","# Image processing and random distortion. Split across multiple threads","# with each thread applying a slightly different distortion.","assert","self",".","config",".","num_preprocess_threads","%","2","==","0","images_and_captions","=","[","]","for","thread_id","in","range","(","self",".","config",".","num_preprocess_threads",")",":","serialized_sequence_example","=","input_queue",".","dequeue","(",")","encoded_image",",","caption","=","input_ops",".","parse_sequence_example","(","serialized_sequence_example",",","image_feature","=","self",".","config",".","image_feature_name",",","caption_feature","=","self",".","config",".","caption_feature_name",")","image","=","self",".","process_image","(","encoded_image",",","thread_id","=","thread_id",")","images_and_captions",".","append","(","[","image",",","caption","]",")","# Batch inputs.","queue_capacity","=","(","2","*","self",".","config",".","num_preprocess_threads","*","self",".","config",".","batch_size",")","images",",","input_seqs",",","target_seqs",",","input_mask","=","(","input_ops",".","batch_with_dynamic_pad","(","images_and_captions",",","batch_size","=","self",".","config",".","batch_size",",","queue_capacity","=","queue_capacity",")",")","self",".","images","=","images","self",".","input_seqs","=","input_seqs","self",".","target_seqs","=","target_seqs","self",".","input_mask","=","input_mask"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py#L121-L179"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py","language":"python","identifier":"ShowAndTellModel.build_image_embeddings","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Builds the image model subgraph and generates image embeddings.\n\n Inputs:\n self.images\n\n Outputs:\n self.image_embeddings","docstring_summary":"Builds the image model subgraph and generates image embeddings.","docstring_tokens":["Builds","the","image","model","subgraph","and","generates","image","embeddings","."],"function":"def build_image_embeddings(self):\n \"\"\"Builds the image model subgraph and generates image embeddings.\n\n Inputs:\n self.images\n\n Outputs:\n self.image_embeddings\n \"\"\"\n inception_output = image_embedding.inception_v3(\n self.images,\n trainable=self.train_inception,\n is_training=self.is_training())\n self.inception_variables = tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES, scope=\"InceptionV3\")\n\n # Map inception output into embedding space.\n with tf.variable_scope(\"image_embedding\") as scope:\n image_embeddings = tf.contrib.layers.fully_connected(\n inputs=inception_output,\n num_outputs=self.config.embedding_size,\n activation_fn=None,\n weights_initializer=self.initializer,\n biases_initializer=None,\n scope=scope)\n\n # Save the embedding size in the graph.\n tf.constant(self.config.embedding_size, name=\"embedding_size\")\n\n self.image_embeddings = image_embeddings","function_tokens":["def","build_image_embeddings","(","self",")",":","inception_output","=","image_embedding",".","inception_v3","(","self",".","images",",","trainable","=","self",".","train_inception",",","is_training","=","self",".","is_training","(",")",")","self",".","inception_variables","=","tf",".","get_collection","(","tf",".","GraphKeys",".","GLOBAL_VARIABLES",",","scope","=","\"InceptionV3\"",")","# Map inception output into embedding space.","with","tf",".","variable_scope","(","\"image_embedding\"",")","as","scope",":","image_embeddings","=","tf",".","contrib",".","layers",".","fully_connected","(","inputs","=","inception_output",",","num_outputs","=","self",".","config",".","embedding_size",",","activation_fn","=","None",",","weights_initializer","=","self",".","initializer",",","biases_initializer","=","None",",","scope","=","scope",")","# Save the embedding size in the graph.","tf",".","constant","(","self",".","config",".","embedding_size",",","name","=","\"embedding_size\"",")","self",".","image_embeddings","=","image_embeddings"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py#L181-L210"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py","language":"python","identifier":"ShowAndTellModel.build_seq_embeddings","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Builds the input sequence embeddings.\n\n Inputs:\n self.input_seqs\n\n Outputs:\n self.seq_embeddings","docstring_summary":"Builds the input sequence embeddings.","docstring_tokens":["Builds","the","input","sequence","embeddings","."],"function":"def build_seq_embeddings(self):\n \"\"\"Builds the input sequence embeddings.\n\n Inputs:\n self.input_seqs\n\n Outputs:\n self.seq_embeddings\n \"\"\"\n with tf.variable_scope(\"seq_embedding\"), tf.device(\"\/cpu:0\"):\n embedding_map = tf.get_variable(\n name=\"map\",\n shape=[self.config.vocab_size, self.config.embedding_size],\n initializer=self.initializer)\n seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs)\n\n self.seq_embeddings = seq_embeddings","function_tokens":["def","build_seq_embeddings","(","self",")",":","with","tf",".","variable_scope","(","\"seq_embedding\"",")",",","tf",".","device","(","\"\/cpu:0\"",")",":","embedding_map","=","tf",".","get_variable","(","name","=","\"map\"",",","shape","=","[","self",".","config",".","vocab_size",",","self",".","config",".","embedding_size","]",",","initializer","=","self",".","initializer",")","seq_embeddings","=","tf",".","nn",".","embedding_lookup","(","embedding_map",",","self",".","input_seqs",")","self",".","seq_embeddings","=","seq_embeddings"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py#L212-L228"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py","language":"python","identifier":"ShowAndTellModel.build_model","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Builds the model.\n\n Inputs:\n self.image_embeddings\n self.seq_embeddings\n self.target_seqs (training and eval only)\n self.input_mask (training and eval only)\n\n Outputs:\n self.total_loss (training and eval only)\n self.target_cross_entropy_losses (training and eval only)\n self.target_cross_entropy_loss_weights (training and eval only)","docstring_summary":"Builds the model.","docstring_tokens":["Builds","the","model","."],"function":"def build_model(self):\n \"\"\"Builds the model.\n\n Inputs:\n self.image_embeddings\n self.seq_embeddings\n self.target_seqs (training and eval only)\n self.input_mask (training and eval only)\n\n Outputs:\n self.total_loss (training and eval only)\n self.target_cross_entropy_losses (training and eval only)\n self.target_cross_entropy_loss_weights (training and eval only)\n \"\"\"\n # This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the\n # modified LSTM in the \"Show and Tell\" paper has no biases and outputs\n # new_c * sigmoid(o).\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(\n num_units=self.config.num_lstm_units, state_is_tuple=True)\n if self.mode == \"train\":\n lstm_cell = tf.contrib.rnn.DropoutWrapper(\n lstm_cell,\n input_keep_prob=self.config.lstm_dropout_keep_prob,\n output_keep_prob=self.config.lstm_dropout_keep_prob)\n\n with tf.variable_scope(\"lstm\", initializer=self.initializer) as lstm_scope:\n # Feed the image embeddings to set the initial LSTM state.\n zero_state = lstm_cell.zero_state(\n batch_size=self.image_embeddings.get_shape()[0], dtype=tf.float32)\n _, initial_state = lstm_cell(self.image_embeddings, zero_state)\n\n # Allow the LSTM variables to be reused.\n lstm_scope.reuse_variables()\n\n if self.mode == \"inference\":\n # In inference mode, use concatenated states for convenient feeding and\n # fetching.\n tf.concat(initial_state, 1, name=\"initial_state\")\n\n # Placeholder for feeding a batch of concatenated states.\n state_feed = tf.placeholder(dtype=tf.float32,\n shape=[None, sum(lstm_cell.state_size)],\n name=\"state_feed\")\n state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1)\n\n # Run a single LSTM step.\n lstm_outputs, state_tuple = lstm_cell(\n inputs=tf.squeeze(self.seq_embeddings, squeeze_dims=[1]),\n state=state_tuple)\n\n # Concatentate the resulting state.\n tf.concat(state_tuple, 1, name=\"state\")\n else:\n # Run the batch of sequence embeddings through the LSTM.\n sequence_length = tf.reduce_sum(self.input_mask, 1)\n lstm_outputs, _ = tf.nn.dynamic_rnn(cell=lstm_cell,\n inputs=self.seq_embeddings,\n sequence_length=sequence_length,\n initial_state=initial_state,\n dtype=tf.float32,\n scope=lstm_scope)\n\n # Stack batches vertically.\n lstm_outputs = tf.reshape(lstm_outputs, [-1, lstm_cell.output_size])\n\n with tf.variable_scope(\"logits\") as logits_scope:\n logits = tf.contrib.layers.fully_connected(\n inputs=lstm_outputs,\n num_outputs=self.config.vocab_size,\n activation_fn=None,\n weights_initializer=self.initializer,\n scope=logits_scope)\n\n if self.mode == \"inference\":\n tf.nn.softmax(logits, name=\"softmax\")\n else:\n targets = tf.reshape(self.target_seqs, [-1])\n weights = tf.to_float(tf.reshape(self.input_mask, [-1]))\n\n # Compute losses.\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets,\n logits=logits)\n batch_loss = tf.div(tf.reduce_sum(tf.multiply(losses, weights)),\n tf.reduce_sum(weights),\n name=\"batch_loss\")\n tf.losses.add_loss(batch_loss)\n total_loss = tf.losses.get_total_loss()\n\n # Add summaries.\n tf.summary.scalar(\"losses\/batch_loss\", batch_loss)\n tf.summary.scalar(\"losses\/total_loss\", total_loss)\n for var in tf.trainable_variables():\n tf.summary.histogram(\"parameters\/\" + var.op.name, var)\n\n self.total_loss = total_loss\n self.target_cross_entropy_losses = losses # Used in evaluation.\n self.target_cross_entropy_loss_weights = weights","function_tokens":["def","build_model","(","self",")",":","# This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the","# modified LSTM in the \"Show and Tell\" paper has no biases and outputs","# new_c * sigmoid(o).","lstm_cell","=","tf",".","contrib",".","rnn",".","BasicLSTMCell","(","num_units","=","self",".","config",".","num_lstm_units",",","state_is_tuple","=","True",")","if","self",".","mode","==","\"train\"",":","lstm_cell","=","tf",".","contrib",".","rnn",".","DropoutWrapper","(","lstm_cell",",","input_keep_prob","=","self",".","config",".","lstm_dropout_keep_prob",",","output_keep_prob","=","self",".","config",".","lstm_dropout_keep_prob",")","with","tf",".","variable_scope","(","\"lstm\"",",","initializer","=","self",".","initializer",")","as","lstm_scope",":","# Feed the image embeddings to set the initial LSTM state.","zero_state","=","lstm_cell",".","zero_state","(","batch_size","=","self",".","image_embeddings",".","get_shape","(",")","[","0","]",",","dtype","=","tf",".","float32",")","_",",","initial_state","=","lstm_cell","(","self",".","image_embeddings",",","zero_state",")","# Allow the LSTM variables to be reused.","lstm_scope",".","reuse_variables","(",")","if","self",".","mode","==","\"inference\"",":","# In inference mode, use concatenated states for convenient feeding and","# fetching.","tf",".","concat","(","initial_state",",","1",",","name","=","\"initial_state\"",")","# Placeholder for feeding a batch of concatenated states.","state_feed","=","tf",".","placeholder","(","dtype","=","tf",".","float32",",","shape","=","[","None",",","sum","(","lstm_cell",".","state_size",")","]",",","name","=","\"state_feed\"",")","state_tuple","=","tf",".","split","(","value","=","state_feed",",","num_or_size_splits","=","2",",","axis","=","1",")","# Run a single LSTM step.","lstm_outputs",",","state_tuple","=","lstm_cell","(","inputs","=","tf",".","squeeze","(","self",".","seq_embeddings",",","squeeze_dims","=","[","1","]",")",",","state","=","state_tuple",")","# Concatentate the resulting state.","tf",".","concat","(","state_tuple",",","1",",","name","=","\"state\"",")","else",":","# Run the batch of sequence embeddings through the LSTM.","sequence_length","=","tf",".","reduce_sum","(","self",".","input_mask",",","1",")","lstm_outputs",",","_","=","tf",".","nn",".","dynamic_rnn","(","cell","=","lstm_cell",",","inputs","=","self",".","seq_embeddings",",","sequence_length","=","sequence_length",",","initial_state","=","initial_state",",","dtype","=","tf",".","float32",",","scope","=","lstm_scope",")","# Stack batches vertically.","lstm_outputs","=","tf",".","reshape","(","lstm_outputs",",","[","-","1",",","lstm_cell",".","output_size","]",")","with","tf",".","variable_scope","(","\"logits\"",")","as","logits_scope",":","logits","=","tf",".","contrib",".","layers",".","fully_connected","(","inputs","=","lstm_outputs",",","num_outputs","=","self",".","config",".","vocab_size",",","activation_fn","=","None",",","weights_initializer","=","self",".","initializer",",","scope","=","logits_scope",")","if","self",".","mode","==","\"inference\"",":","tf",".","nn",".","softmax","(","logits",",","name","=","\"softmax\"",")","else",":","targets","=","tf",".","reshape","(","self",".","target_seqs",",","[","-","1","]",")","weights","=","tf",".","to_float","(","tf",".","reshape","(","self",".","input_mask",",","[","-","1","]",")",")","# Compute losses.","losses","=","tf",".","nn",".","sparse_softmax_cross_entropy_with_logits","(","labels","=","targets",",","logits","=","logits",")","batch_loss","=","tf",".","div","(","tf",".","reduce_sum","(","tf",".","multiply","(","losses",",","weights",")",")",",","tf",".","reduce_sum","(","weights",")",",","name","=","\"batch_loss\"",")","tf",".","losses",".","add_loss","(","batch_loss",")","total_loss","=","tf",".","losses",".","get_total_loss","(",")","# Add summaries.","tf",".","summary",".","scalar","(","\"losses\/batch_loss\"",",","batch_loss",")","tf",".","summary",".","scalar","(","\"losses\/total_loss\"",",","total_loss",")","for","var","in","tf",".","trainable_variables","(",")",":","tf",".","summary",".","histogram","(","\"parameters\/\"","+","var",".","op",".","name",",","var",")","self",".","total_loss","=","total_loss","self",".","target_cross_entropy_losses","=","losses","# Used in evaluation.","self",".","target_cross_entropy_loss_weights","=","weights"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py#L230-L326"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py","language":"python","identifier":"ShowAndTellModel.setup_inception_initializer","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Sets up the function to restore inception variables from checkpoint.","docstring_summary":"Sets up the function to restore inception variables from checkpoint.","docstring_tokens":["Sets","up","the","function","to","restore","inception","variables","from","checkpoint","."],"function":"def setup_inception_initializer(self):\n \"\"\"Sets up the function to restore inception variables from checkpoint.\"\"\"\n if self.mode != \"inference\":\n # Restore inception variables only.\n saver = tf.train.Saver(self.inception_variables)\n\n def restore_fn(sess):\n tf.logging.info(\"Restoring Inception variables from checkpoint file %s\",\n self.config.inception_checkpoint_file)\n saver.restore(sess, self.config.inception_checkpoint_file)\n\n self.init_fn = restore_fn","function_tokens":["def","setup_inception_initializer","(","self",")",":","if","self",".","mode","!=","\"inference\"",":","# Restore inception variables only.","saver","=","tf",".","train",".","Saver","(","self",".","inception_variables",")","def","restore_fn","(","sess",")",":","tf",".","logging",".","info","(","\"Restoring Inception variables from checkpoint file %s\"",",","self",".","config",".","inception_checkpoint_file",")","saver",".","restore","(","sess",",","self",".","config",".","inception_checkpoint_file",")","self",".","init_fn","=","restore_fn"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py#L328-L339"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py","language":"python","identifier":"ShowAndTellModel.setup_global_step","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Sets up the global step Tensor.","docstring_summary":"Sets up the global step Tensor.","docstring_tokens":["Sets","up","the","global","step","Tensor","."],"function":"def setup_global_step(self):\n \"\"\"Sets up the global step Tensor.\"\"\"\n global_step = tf.Variable(\n initial_value=0,\n name=\"global_step\",\n trainable=False,\n collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])\n\n self.global_step = global_step","function_tokens":["def","setup_global_step","(","self",")",":","global_step","=","tf",".","Variable","(","initial_value","=","0",",","name","=","\"global_step\"",",","trainable","=","False",",","collections","=","[","tf",".","GraphKeys",".","GLOBAL_STEP",",","tf",".","GraphKeys",".","GLOBAL_VARIABLES","]",")","self",".","global_step","=","global_step"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py#L341-L349"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py","language":"python","identifier":"ShowAndTellModel.build","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Creates all ops for training and evaluation.","docstring_summary":"Creates all ops for training and evaluation.","docstring_tokens":["Creates","all","ops","for","training","and","evaluation","."],"function":"def build(self):\n \"\"\"Creates all ops for training and evaluation.\"\"\"\n self.build_inputs()\n self.build_image_embeddings()\n self.build_seq_embeddings()\n self.build_model()\n self.setup_inception_initializer()\n self.setup_global_step()","function_tokens":["def","build","(","self",")",":","self",".","build_inputs","(",")","self",".","build_image_embeddings","(",")","self",".","build_seq_embeddings","(",")","self",".","build_model","(",")","self",".","setup_inception_initializer","(",")","self",".","setup_global_step","(",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/show_and_tell_model.py#L351-L358"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/ops\/image_embedding.py","language":"python","identifier":"inception_v3","parameters":"(images,\n trainable=True,\n is_training=True,\n weight_decay=0.00004,\n stddev=0.1,\n dropout_keep_prob=0.8,\n use_batch_norm=True,\n batch_norm_params=None,\n add_summaries=True,\n scope=\"InceptionV3\")","argument_list":"","return_statement":"return net","docstring":"Builds an Inception V3 subgraph for image embeddings.\n\n Args:\n images: A float32 Tensor of shape [batch, height, width, channels].\n trainable: Whether the inception submodel should be trainable or not.\n is_training: Boolean indicating training mode or not.\n weight_decay: Coefficient for weight regularization.\n stddev: The standard deviation of the trunctated normal weight initializer.\n dropout_keep_prob: Dropout keep probability.\n use_batch_norm: Whether to use batch normalization.\n batch_norm_params: Parameters for batch normalization. See\n tf.contrib.layers.batch_norm for details.\n add_summaries: Whether to add activation summaries.\n scope: Optional Variable scope.\n\n Returns:\n end_points: A dictionary of activations from inception_v3 layers.","docstring_summary":"Builds an Inception V3 subgraph for image embeddings.","docstring_tokens":["Builds","an","Inception","V3","subgraph","for","image","embeddings","."],"function":"def inception_v3(images,\n trainable=True,\n is_training=True,\n weight_decay=0.00004,\n stddev=0.1,\n dropout_keep_prob=0.8,\n use_batch_norm=True,\n batch_norm_params=None,\n add_summaries=True,\n scope=\"InceptionV3\"):\n \"\"\"Builds an Inception V3 subgraph for image embeddings.\n\n Args:\n images: A float32 Tensor of shape [batch, height, width, channels].\n trainable: Whether the inception submodel should be trainable or not.\n is_training: Boolean indicating training mode or not.\n weight_decay: Coefficient for weight regularization.\n stddev: The standard deviation of the trunctated normal weight initializer.\n dropout_keep_prob: Dropout keep probability.\n use_batch_norm: Whether to use batch normalization.\n batch_norm_params: Parameters for batch normalization. See\n tf.contrib.layers.batch_norm for details.\n add_summaries: Whether to add activation summaries.\n scope: Optional Variable scope.\n\n Returns:\n end_points: A dictionary of activations from inception_v3 layers.\n \"\"\"\n # Only consider the inception model to be in training mode if it's trainable.\n is_inception_model_training = trainable and is_training\n\n if use_batch_norm:\n # Default parameters for batch normalization.\n if not batch_norm_params:\n batch_norm_params = {\n \"is_training\": is_inception_model_training,\n \"trainable\": trainable,\n # Decay for the moving averages.\n \"decay\": 0.9997,\n # Epsilon to prevent 0s in variance.\n \"epsilon\": 0.001,\n # Collection containing the moving mean and moving variance.\n \"variables_collections\": {\n \"beta\": None,\n \"gamma\": None,\n \"moving_mean\": [\"moving_vars\"],\n \"moving_variance\": [\"moving_vars\"],\n }\n }\n else:\n batch_norm_params = None\n\n if trainable:\n weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)\n else:\n weights_regularizer = None\n\n with tf.variable_scope(scope, \"InceptionV3\", [images]) as scope:\n with slim.arg_scope(\n [slim.conv2d, slim.fully_connected],\n weights_regularizer=weights_regularizer,\n trainable=trainable):\n with slim.arg_scope(\n [slim.conv2d],\n weights_initializer=tf.truncated_normal_initializer(stddev=stddev),\n activation_fn=tf.nn.relu,\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params):\n net, end_points = inception_v3_base(images, scope=scope)\n with tf.variable_scope(\"logits\"):\n shape = net.get_shape()\n net = slim.avg_pool2d(net, shape[1:3], padding=\"VALID\", scope=\"pool\")\n net = slim.dropout(\n net,\n keep_prob=dropout_keep_prob,\n is_training=is_inception_model_training,\n scope=\"dropout\")\n net = slim.flatten(net, scope=\"flatten\")\n\n # Add summaries.\n if add_summaries:\n for v in end_points.values():\n tf.contrib.layers.summaries.summarize_activation(v)\n\n return net","function_tokens":["def","inception_v3","(","images",",","trainable","=","True",",","is_training","=","True",",","weight_decay","=","0.00004",",","stddev","=","0.1",",","dropout_keep_prob","=","0.8",",","use_batch_norm","=","True",",","batch_norm_params","=","None",",","add_summaries","=","True",",","scope","=","\"InceptionV3\"",")",":","# Only consider the inception model to be in training mode if it's trainable.","is_inception_model_training","=","trainable","and","is_training","if","use_batch_norm",":","# Default parameters for batch normalization.","if","not","batch_norm_params",":","batch_norm_params","=","{","\"is_training\"",":","is_inception_model_training",",","\"trainable\"",":","trainable",",","# Decay for the moving averages.","\"decay\"",":","0.9997",",","# Epsilon to prevent 0s in variance.","\"epsilon\"",":","0.001",",","# Collection containing the moving mean and moving variance.","\"variables_collections\"",":","{","\"beta\"",":","None",",","\"gamma\"",":","None",",","\"moving_mean\"",":","[","\"moving_vars\"","]",",","\"moving_variance\"",":","[","\"moving_vars\"","]",",","}","}","else",":","batch_norm_params","=","None","if","trainable",":","weights_regularizer","=","tf",".","contrib",".","layers",".","l2_regularizer","(","weight_decay",")","else",":","weights_regularizer","=","None","with","tf",".","variable_scope","(","scope",",","\"InceptionV3\"",",","[","images","]",")","as","scope",":","with","slim",".","arg_scope","(","[","slim",".","conv2d",",","slim",".","fully_connected","]",",","weights_regularizer","=","weights_regularizer",",","trainable","=","trainable",")",":","with","slim",".","arg_scope","(","[","slim",".","conv2d","]",",","weights_initializer","=","tf",".","truncated_normal_initializer","(","stddev","=","stddev",")",",","activation_fn","=","tf",".","nn",".","relu",",","normalizer_fn","=","slim",".","batch_norm",",","normalizer_params","=","batch_norm_params",")",":","net",",","end_points","=","inception_v3_base","(","images",",","scope","=","scope",")","with","tf",".","variable_scope","(","\"logits\"",")",":","shape","=","net",".","get_shape","(",")","net","=","slim",".","avg_pool2d","(","net",",","shape","[","1",":","3","]",",","padding","=","\"VALID\"",",","scope","=","\"pool\"",")","net","=","slim",".","dropout","(","net",",","keep_prob","=","dropout_keep_prob",",","is_training","=","is_inception_model_training",",","scope","=","\"dropout\"",")","net","=","slim",".","flatten","(","net",",","scope","=","\"flatten\"",")","# Add summaries.","if","add_summaries",":","for","v","in","end_points",".","values","(",")",":","tf",".","contrib",".","layers",".","summaries",".","summarize_activation","(","v",")","return","net"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/ops\/image_embedding.py#L30-L114"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/ops\/image_processing.py","language":"python","identifier":"distort_image","parameters":"(image, thread_id)","argument_list":"","return_statement":"return image","docstring":"Perform random distortions on an image.\n\n Args:\n image: A float32 Tensor of shape [height, width, 3] with values in [0, 1).\n thread_id: Preprocessing thread id used to select the ordering of color\n distortions. There should be a multiple of 2 preprocessing threads.\n\n Returns:\n distorted_image: A float32 Tensor of shape [height, width, 3] with values in\n [0, 1].","docstring_summary":"Perform random distortions on an image.","docstring_tokens":["Perform","random","distortions","on","an","image","."],"function":"def distort_image(image, thread_id):\n \"\"\"Perform random distortions on an image.\n\n Args:\n image: A float32 Tensor of shape [height, width, 3] with values in [0, 1).\n thread_id: Preprocessing thread id used to select the ordering of color\n distortions. There should be a multiple of 2 preprocessing threads.\n\n Returns:\n distorted_image: A float32 Tensor of shape [height, width, 3] with values in\n [0, 1].\n \"\"\"\n # Randomly flip horizontally.\n with tf.name_scope(\"flip_horizontal\", values=[image]):\n image = tf.image.random_flip_left_right(image)\n\n # Randomly distort the colors based on thread id.\n color_ordering = thread_id % 2\n with tf.name_scope(\"distort_color\", values=[image]):\n if color_ordering == 0:\n image = tf.image.random_brightness(image, max_delta=32. \/ 255.)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.032)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n elif color_ordering == 1:\n image = tf.image.random_brightness(image, max_delta=32. \/ 255.)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.032)\n\n # The random_* ops do not necessarily clamp.\n image = tf.clip_by_value(image, 0.0, 1.0)\n\n return image","function_tokens":["def","distort_image","(","image",",","thread_id",")",":","# Randomly flip horizontally.","with","tf",".","name_scope","(","\"flip_horizontal\"",",","values","=","[","image","]",")",":","image","=","tf",".","image",".","random_flip_left_right","(","image",")","# Randomly distort the colors based on thread id.","color_ordering","=","thread_id","%","2","with","tf",".","name_scope","(","\"distort_color\"",",","values","=","[","image","]",")",":","if","color_ordering","==","0",":","image","=","tf",".","image",".","random_brightness","(","image",",","max_delta","=","32.","\/","255.",")","image","=","tf",".","image",".","random_saturation","(","image",",","lower","=","0.5",",","upper","=","1.5",")","image","=","tf",".","image",".","random_hue","(","image",",","max_delta","=","0.032",")","image","=","tf",".","image",".","random_contrast","(","image",",","lower","=","0.5",",","upper","=","1.5",")","elif","color_ordering","==","1",":","image","=","tf",".","image",".","random_brightness","(","image",",","max_delta","=","32.","\/","255.",")","image","=","tf",".","image",".","random_contrast","(","image",",","lower","=","0.5",",","upper","=","1.5",")","image","=","tf",".","image",".","random_saturation","(","image",",","lower","=","0.5",",","upper","=","1.5",")","image","=","tf",".","image",".","random_hue","(","image",",","max_delta","=","0.032",")","# The random_* ops do not necessarily clamp.","image","=","tf",".","clip_by_value","(","image",",","0.0",",","1.0",")","return","image"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/ops\/image_processing.py#L26-L59"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/ops\/image_processing.py","language":"python","identifier":"process_image","parameters":"(encoded_image,\n is_training,\n height,\n width,\n resize_height=346,\n resize_width=346,\n thread_id=0,\n image_format=\"jpeg\")","argument_list":"","return_statement":"return image","docstring":"Decode an image, resize and apply random distortions.\n\n In training, images are distorted slightly differently depending on thread_id.\n\n Args:\n encoded_image: String Tensor containing the image.\n is_training: Boolean; whether preprocessing for training or eval.\n height: Height of the output image.\n width: Width of the output image.\n resize_height: If > 0, resize height before crop to final dimensions.\n resize_width: If > 0, resize width before crop to final dimensions.\n thread_id: Preprocessing thread id used to select the ordering of color\n distortions. There should be a multiple of 2 preprocessing threads.\n image_format: \"jpeg\" or \"png\".\n\n Returns:\n A float32 Tensor of shape [height, width, 3] with values in [-1, 1].\n\n Raises:\n ValueError: If image_format is invalid.","docstring_summary":"Decode an image, resize and apply random distortions.","docstring_tokens":["Decode","an","image","resize","and","apply","random","distortions","."],"function":"def process_image(encoded_image,\n is_training,\n height,\n width,\n resize_height=346,\n resize_width=346,\n thread_id=0,\n image_format=\"jpeg\"):\n \"\"\"Decode an image, resize and apply random distortions.\n\n In training, images are distorted slightly differently depending on thread_id.\n\n Args:\n encoded_image: String Tensor containing the image.\n is_training: Boolean; whether preprocessing for training or eval.\n height: Height of the output image.\n width: Width of the output image.\n resize_height: If > 0, resize height before crop to final dimensions.\n resize_width: If > 0, resize width before crop to final dimensions.\n thread_id: Preprocessing thread id used to select the ordering of color\n distortions. There should be a multiple of 2 preprocessing threads.\n image_format: \"jpeg\" or \"png\".\n\n Returns:\n A float32 Tensor of shape [height, width, 3] with values in [-1, 1].\n\n Raises:\n ValueError: If image_format is invalid.\n \"\"\"\n # Helper function to log an image summary to the visualizer. Summaries are\n # only logged in thread 0.\n def image_summary(name, image):\n if not thread_id:\n tf.summary.image(name, tf.expand_dims(image, 0))\n\n # Decode image into a float32 Tensor of shape [?, ?, 3] with values in [0, 1).\n with tf.name_scope(\"decode\", values=[encoded_image]):\n if image_format == \"jpeg\":\n image = tf.image.decode_jpeg(encoded_image, channels=3)\n elif image_format == \"png\":\n image = tf.image.decode_png(encoded_image, channels=3)\n else:\n raise ValueError(\"Invalid image format: %s\" % image_format)\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n image_summary(\"original_image\", image)\n\n # Resize image.\n assert (resize_height > 0) == (resize_width > 0)\n if resize_height:\n image = tf.image.resize_images(image,\n size=[resize_height, resize_width],\n method=tf.image.ResizeMethod.BILINEAR)\n\n # Crop to final dimensions.\n if is_training:\n image = tf.random_crop(image, [height, width, 3])\n else:\n # Central crop, assuming resize_height > height, resize_width > width.\n image = tf.image.resize_image_with_crop_or_pad(image, height, width)\n\n image_summary(\"resized_image\", image)\n\n # Randomly distort the image.\n if is_training:\n image = distort_image(image, thread_id)\n\n image_summary(\"final_image\", image)\n\n # Rescale to [-1,1] instead of [0, 1]\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n return image","function_tokens":["def","process_image","(","encoded_image",",","is_training",",","height",",","width",",","resize_height","=","346",",","resize_width","=","346",",","thread_id","=","0",",","image_format","=","\"jpeg\"",")",":","# Helper function to log an image summary to the visualizer. Summaries are","# only logged in thread 0.","def","image_summary","(","name",",","image",")",":","if","not","thread_id",":","tf",".","summary",".","image","(","name",",","tf",".","expand_dims","(","image",",","0",")",")","# Decode image into a float32 Tensor of shape [?, ?, 3] with values in [0, 1).","with","tf",".","name_scope","(","\"decode\"",",","values","=","[","encoded_image","]",")",":","if","image_format","==","\"jpeg\"",":","image","=","tf",".","image",".","decode_jpeg","(","encoded_image",",","channels","=","3",")","elif","image_format","==","\"png\"",":","image","=","tf",".","image",".","decode_png","(","encoded_image",",","channels","=","3",")","else",":","raise","ValueError","(","\"Invalid image format: %s\"","%","image_format",")","image","=","tf",".","image",".","convert_image_dtype","(","image",",","dtype","=","tf",".","float32",")","image_summary","(","\"original_image\"",",","image",")","# Resize image.","assert","(","resize_height",">","0",")","==","(","resize_width",">","0",")","if","resize_height",":","image","=","tf",".","image",".","resize_images","(","image",",","size","=","[","resize_height",",","resize_width","]",",","method","=","tf",".","image",".","ResizeMethod",".","BILINEAR",")","# Crop to final dimensions.","if","is_training",":","image","=","tf",".","random_crop","(","image",",","[","height",",","width",",","3","]",")","else",":","# Central crop, assuming resize_height > height, resize_width > width.","image","=","tf",".","image",".","resize_image_with_crop_or_pad","(","image",",","height",",","width",")","image_summary","(","\"resized_image\"",",","image",")","# Randomly distort the image.","if","is_training",":","image","=","distort_image","(","image",",","thread_id",")","image_summary","(","\"final_image\"",",","image",")","# Rescale to [-1,1] instead of [0, 1]","image","=","tf",".","subtract","(","image",",","0.5",")","image","=","tf",".","multiply","(","image",",","2.0",")","return","image"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/ops\/image_processing.py#L62-L133"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/ops\/inputs.py","language":"python","identifier":"parse_sequence_example","parameters":"(serialized, image_feature, caption_feature)","argument_list":"","return_statement":"return encoded_image, caption","docstring":"Parses a tensorflow.SequenceExample into an image and caption.\n\n Args:\n serialized: A scalar string Tensor; a single serialized SequenceExample.\n image_feature: Name of SequenceExample context feature containing image\n data.\n caption_feature: Name of SequenceExample feature list containing integer\n captions.\n\n Returns:\n encoded_image: A scalar string Tensor containing a JPEG encoded image.\n caption: A 1-D uint64 Tensor with dynamically specified length.","docstring_summary":"Parses a tensorflow.SequenceExample into an image and caption.","docstring_tokens":["Parses","a","tensorflow",".","SequenceExample","into","an","image","and","caption","."],"function":"def parse_sequence_example(serialized, image_feature, caption_feature):\n \"\"\"Parses a tensorflow.SequenceExample into an image and caption.\n\n Args:\n serialized: A scalar string Tensor; a single serialized SequenceExample.\n image_feature: Name of SequenceExample context feature containing image\n data.\n caption_feature: Name of SequenceExample feature list containing integer\n captions.\n\n Returns:\n encoded_image: A scalar string Tensor containing a JPEG encoded image.\n caption: A 1-D uint64 Tensor with dynamically specified length.\n \"\"\"\n context, sequence = tf.parse_single_sequence_example(\n serialized,\n context_features={\n image_feature: tf.FixedLenFeature([], dtype=tf.string)\n },\n sequence_features={\n caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),\n })\n\n encoded_image = context[image_feature]\n caption = sequence[caption_feature]\n return encoded_image, caption","function_tokens":["def","parse_sequence_example","(","serialized",",","image_feature",",","caption_feature",")",":","context",",","sequence","=","tf",".","parse_single_sequence_example","(","serialized",",","context_features","=","{","image_feature",":","tf",".","FixedLenFeature","(","[","]",",","dtype","=","tf",".","string",")","}",",","sequence_features","=","{","caption_feature",":","tf",".","FixedLenSequenceFeature","(","[","]",",","dtype","=","tf",".","int64",")",",","}",")","encoded_image","=","context","[","image_feature","]","caption","=","sequence","[","caption_feature","]","return","encoded_image",",","caption"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/ops\/inputs.py#L26-L51"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/ops\/inputs.py","language":"python","identifier":"prefetch_input_data","parameters":"(reader,\n file_pattern,\n is_training,\n batch_size,\n values_per_shard,\n input_queue_capacity_factor=16,\n num_reader_threads=1,\n shard_queue_name=\"filename_queue\",\n value_queue_name=\"input_queue\")","argument_list":"","return_statement":"return values_queue","docstring":"Prefetches string values from disk into an input queue.\n\n In training the capacity of the queue is important because a larger queue\n means better mixing of training examples between shards. The minimum number of\n values kept in the queue is values_per_shard * input_queue_capacity_factor,\n where input_queue_memory factor should be chosen to trade-off better mixing\n with memory usage.\n\n Args:\n reader: Instance of tf.ReaderBase.\n file_pattern: Comma-separated list of file patterns (e.g.\n \/tmp\/train_data-?????-of-00100).\n is_training: Boolean; whether prefetching for training or eval.\n batch_size: Model batch size used to determine queue capacity.\n values_per_shard: Approximate number of values per shard.\n input_queue_capacity_factor: Minimum number of values to keep in the queue\n in multiples of values_per_shard. See comments above.\n num_reader_threads: Number of reader threads to fill the queue.\n shard_queue_name: Name for the shards filename queue.\n value_queue_name: Name for the values input queue.\n\n Returns:\n A Queue containing prefetched string values.","docstring_summary":"Prefetches string values from disk into an input queue.","docstring_tokens":["Prefetches","string","values","from","disk","into","an","input","queue","."],"function":"def prefetch_input_data(reader,\n file_pattern,\n is_training,\n batch_size,\n values_per_shard,\n input_queue_capacity_factor=16,\n num_reader_threads=1,\n shard_queue_name=\"filename_queue\",\n value_queue_name=\"input_queue\"):\n \"\"\"Prefetches string values from disk into an input queue.\n\n In training the capacity of the queue is important because a larger queue\n means better mixing of training examples between shards. The minimum number of\n values kept in the queue is values_per_shard * input_queue_capacity_factor,\n where input_queue_memory factor should be chosen to trade-off better mixing\n with memory usage.\n\n Args:\n reader: Instance of tf.ReaderBase.\n file_pattern: Comma-separated list of file patterns (e.g.\n \/tmp\/train_data-?????-of-00100).\n is_training: Boolean; whether prefetching for training or eval.\n batch_size: Model batch size used to determine queue capacity.\n values_per_shard: Approximate number of values per shard.\n input_queue_capacity_factor: Minimum number of values to keep in the queue\n in multiples of values_per_shard. See comments above.\n num_reader_threads: Number of reader threads to fill the queue.\n shard_queue_name: Name for the shards filename queue.\n value_queue_name: Name for the values input queue.\n\n Returns:\n A Queue containing prefetched string values.\n \"\"\"\n data_files = []\n for pattern in file_pattern.split(\",\"):\n data_files.extend(tf.gfile.Glob(pattern))\n if not data_files:\n tf.logging.fatal(\"Found no input files matching %s\", file_pattern)\n else:\n tf.logging.info(\"Prefetching values from %d files matching %s\",\n len(data_files), file_pattern)\n\n if is_training:\n filename_queue = tf.train.string_input_producer(\n data_files, shuffle=True, capacity=16, name=shard_queue_name)\n min_queue_examples = values_per_shard * input_queue_capacity_factor\n capacity = min_queue_examples + 100 * batch_size\n values_queue = tf.RandomShuffleQueue(\n capacity=capacity,\n min_after_dequeue=min_queue_examples,\n dtypes=[tf.string],\n name=\"random_\" + value_queue_name)\n else:\n filename_queue = tf.train.string_input_producer(\n data_files, shuffle=False, capacity=1, name=shard_queue_name)\n capacity = values_per_shard + 3 * batch_size\n values_queue = tf.FIFOQueue(\n capacity=capacity, dtypes=[tf.string], name=\"fifo_\" + value_queue_name)\n\n enqueue_ops = []\n for _ in range(num_reader_threads):\n _, value = reader.read(filename_queue)\n enqueue_ops.append(values_queue.enqueue([value]))\n tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(\n values_queue, enqueue_ops))\n tf.summary.scalar(\n \"queue\/%s\/fraction_of_%d_full\" % (values_queue.name, capacity),\n tf.cast(values_queue.size(), tf.float32) * (1. \/ capacity))\n\n return values_queue","function_tokens":["def","prefetch_input_data","(","reader",",","file_pattern",",","is_training",",","batch_size",",","values_per_shard",",","input_queue_capacity_factor","=","16",",","num_reader_threads","=","1",",","shard_queue_name","=","\"filename_queue\"",",","value_queue_name","=","\"input_queue\"",")",":","data_files","=","[","]","for","pattern","in","file_pattern",".","split","(","\",\"",")",":","data_files",".","extend","(","tf",".","gfile",".","Glob","(","pattern",")",")","if","not","data_files",":","tf",".","logging",".","fatal","(","\"Found no input files matching %s\"",",","file_pattern",")","else",":","tf",".","logging",".","info","(","\"Prefetching values from %d files matching %s\"",",","len","(","data_files",")",",","file_pattern",")","if","is_training",":","filename_queue","=","tf",".","train",".","string_input_producer","(","data_files",",","shuffle","=","True",",","capacity","=","16",",","name","=","shard_queue_name",")","min_queue_examples","=","values_per_shard","*","input_queue_capacity_factor","capacity","=","min_queue_examples","+","100","*","batch_size","values_queue","=","tf",".","RandomShuffleQueue","(","capacity","=","capacity",",","min_after_dequeue","=","min_queue_examples",",","dtypes","=","[","tf",".","string","]",",","name","=","\"random_\"","+","value_queue_name",")","else",":","filename_queue","=","tf",".","train",".","string_input_producer","(","data_files",",","shuffle","=","False",",","capacity","=","1",",","name","=","shard_queue_name",")","capacity","=","values_per_shard","+","3","*","batch_size","values_queue","=","tf",".","FIFOQueue","(","capacity","=","capacity",",","dtypes","=","[","tf",".","string","]",",","name","=","\"fifo_\"","+","value_queue_name",")","enqueue_ops","=","[","]","for","_","in","range","(","num_reader_threads",")",":","_",",","value","=","reader",".","read","(","filename_queue",")","enqueue_ops",".","append","(","values_queue",".","enqueue","(","[","value","]",")",")","tf",".","train",".","queue_runner",".","add_queue_runner","(","tf",".","train",".","queue_runner",".","QueueRunner","(","values_queue",",","enqueue_ops",")",")","tf",".","summary",".","scalar","(","\"queue\/%s\/fraction_of_%d_full\"","%","(","values_queue",".","name",",","capacity",")",",","tf",".","cast","(","values_queue",".","size","(",")",",","tf",".","float32",")","*","(","1.","\/","capacity",")",")","return","values_queue"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/ops\/inputs.py#L54-L123"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/ops\/inputs.py","language":"python","identifier":"batch_with_dynamic_pad","parameters":"(images_and_captions,\n batch_size,\n queue_capacity,\n add_summaries=True)","argument_list":"","return_statement":"return images, input_seqs, target_seqs, mask","docstring":"Batches input images and captions.\n\n This function splits the caption into an input sequence and a target sequence,\n where the target sequence is the input sequence right-shifted by 1. Input and\n target sequences are batched and padded up to the maximum length of sequences\n in the batch. A mask is created to distinguish real words from padding words.\n\n Example:\n Actual captions in the batch ('-' denotes padded character):\n [\n [ 1 2 5 4 5 ],\n [ 1 2 3 4 - ],\n [ 1 2 3 - - ],\n ]\n\n input_seqs:\n [\n [ 1 2 3 4 ],\n [ 1 2 3 - ],\n [ 1 2 - - ],\n ]\n\n target_seqs:\n [\n [ 2 3 4 5 ],\n [ 2 3 4 - ],\n [ 2 3 - - ],\n ]\n\n mask:\n [\n [ 1 1 1 1 ],\n [ 1 1 1 0 ],\n [ 1 1 0 0 ],\n ]\n\n Args:\n images_and_captions: A list of pairs [image, caption], where image is a\n Tensor of shape [height, width, channels] and caption is a 1-D Tensor of\n any length. Each pair will be processed and added to the queue in a\n separate thread.\n batch_size: Batch size.\n queue_capacity: Queue capacity.\n add_summaries: If true, add caption length summaries.\n\n Returns:\n images: A Tensor of shape [batch_size, height, width, channels].\n input_seqs: An int32 Tensor of shape [batch_size, padded_length].\n target_seqs: An int32 Tensor of shape [batch_size, padded_length].\n mask: An int32 0\/1 Tensor of shape [batch_size, padded_length].","docstring_summary":"Batches input images and captions.","docstring_tokens":["Batches","input","images","and","captions","."],"function":"def batch_with_dynamic_pad(images_and_captions,\n batch_size,\n queue_capacity,\n add_summaries=True):\n \"\"\"Batches input images and captions.\n\n This function splits the caption into an input sequence and a target sequence,\n where the target sequence is the input sequence right-shifted by 1. Input and\n target sequences are batched and padded up to the maximum length of sequences\n in the batch. A mask is created to distinguish real words from padding words.\n\n Example:\n Actual captions in the batch ('-' denotes padded character):\n [\n [ 1 2 5 4 5 ],\n [ 1 2 3 4 - ],\n [ 1 2 3 - - ],\n ]\n\n input_seqs:\n [\n [ 1 2 3 4 ],\n [ 1 2 3 - ],\n [ 1 2 - - ],\n ]\n\n target_seqs:\n [\n [ 2 3 4 5 ],\n [ 2 3 4 - ],\n [ 2 3 - - ],\n ]\n\n mask:\n [\n [ 1 1 1 1 ],\n [ 1 1 1 0 ],\n [ 1 1 0 0 ],\n ]\n\n Args:\n images_and_captions: A list of pairs [image, caption], where image is a\n Tensor of shape [height, width, channels] and caption is a 1-D Tensor of\n any length. Each pair will be processed and added to the queue in a\n separate thread.\n batch_size: Batch size.\n queue_capacity: Queue capacity.\n add_summaries: If true, add caption length summaries.\n\n Returns:\n images: A Tensor of shape [batch_size, height, width, channels].\n input_seqs: An int32 Tensor of shape [batch_size, padded_length].\n target_seqs: An int32 Tensor of shape [batch_size, padded_length].\n mask: An int32 0\/1 Tensor of shape [batch_size, padded_length].\n \"\"\"\n enqueue_list = []\n for image, caption in images_and_captions:\n caption_length = tf.shape(caption)[0]\n input_length = tf.expand_dims(tf.subtract(caption_length, 1), 0)\n\n input_seq = tf.slice(caption, [0], input_length)\n target_seq = tf.slice(caption, [1], input_length)\n indicator = tf.ones(input_length, dtype=tf.int32)\n enqueue_list.append([image, input_seq, target_seq, indicator])\n\n images, input_seqs, target_seqs, mask = tf.train.batch_join(\n enqueue_list,\n batch_size=batch_size,\n capacity=queue_capacity,\n dynamic_pad=True,\n name=\"batch_and_pad\")\n\n if add_summaries:\n lengths = tf.add(tf.reduce_sum(mask, 1), 1)\n tf.summary.scalar(\"caption_length\/batch_min\", tf.reduce_min(lengths))\n tf.summary.scalar(\"caption_length\/batch_max\", tf.reduce_max(lengths))\n tf.summary.scalar(\"caption_length\/batch_mean\", tf.reduce_mean(lengths))\n\n return images, input_seqs, target_seqs, mask","function_tokens":["def","batch_with_dynamic_pad","(","images_and_captions",",","batch_size",",","queue_capacity",",","add_summaries","=","True",")",":","enqueue_list","=","[","]","for","image",",","caption","in","images_and_captions",":","caption_length","=","tf",".","shape","(","caption",")","[","0","]","input_length","=","tf",".","expand_dims","(","tf",".","subtract","(","caption_length",",","1",")",",","0",")","input_seq","=","tf",".","slice","(","caption",",","[","0","]",",","input_length",")","target_seq","=","tf",".","slice","(","caption",",","[","1","]",",","input_length",")","indicator","=","tf",".","ones","(","input_length",",","dtype","=","tf",".","int32",")","enqueue_list",".","append","(","[","image",",","input_seq",",","target_seq",",","indicator","]",")","images",",","input_seqs",",","target_seqs",",","mask","=","tf",".","train",".","batch_join","(","enqueue_list",",","batch_size","=","batch_size",",","capacity","=","queue_capacity",",","dynamic_pad","=","True",",","name","=","\"batch_and_pad\"",")","if","add_summaries",":","lengths","=","tf",".","add","(","tf",".","reduce_sum","(","mask",",","1",")",",","1",")","tf",".","summary",".","scalar","(","\"caption_length\/batch_min\"",",","tf",".","reduce_min","(","lengths",")",")","tf",".","summary",".","scalar","(","\"caption_length\/batch_max\"",",","tf",".","reduce_max","(","lengths",")",")","tf",".","summary",".","scalar","(","\"caption_length\/batch_mean\"",",","tf",".","reduce_mean","(","lengths",")",")","return","images",",","input_seqs",",","target_seqs",",","mask"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/ops\/inputs.py#L126-L204"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/vocabulary.py","language":"python","identifier":"Vocabulary.__init__","parameters":"(self,\n vocab_file,\n start_word=\"\",\n end_word=\"<\/S>\",\n unk_word=\"\")","argument_list":"","return_statement":"","docstring":"Initializes the vocabulary.\n\n Args:\n vocab_file: File containing the vocabulary, where the words are the first\n whitespace-separated token on each line (other tokens are ignored) and\n the word ids are the corresponding line numbers.\n start_word: Special word denoting sentence start.\n end_word: Special word denoting sentence end.\n unk_word: Special word denoting unknown words.","docstring_summary":"Initializes the vocabulary.","docstring_tokens":["Initializes","the","vocabulary","."],"function":"def __init__(self,\n vocab_file,\n start_word=\"\",\n end_word=\"<\/S>\",\n unk_word=\"\"):\n \"\"\"Initializes the vocabulary.\n\n Args:\n vocab_file: File containing the vocabulary, where the words are the first\n whitespace-separated token on each line (other tokens are ignored) and\n the word ids are the corresponding line numbers.\n start_word: Special word denoting sentence start.\n end_word: Special word denoting sentence end.\n unk_word: Special word denoting unknown words.\n \"\"\"\n if not tf.gfile.Exists(vocab_file):\n tf.logging.fatal(\"Vocab file %s not found.\", vocab_file)\n tf.logging.info(\"Initializing vocabulary from file: %s\", vocab_file)\n\n with tf.gfile.GFile(vocab_file, mode=\"r\") as f:\n reverse_vocab = list(f.readlines())\n reverse_vocab = [line.split()[0] for line in reverse_vocab]\n assert start_word in reverse_vocab\n assert end_word in reverse_vocab\n if unk_word not in reverse_vocab:\n reverse_vocab.append(unk_word)\n vocab = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])\n\n tf.logging.info(\"Created vocabulary with %d words\" % len(vocab))\n\n self.vocab = vocab # vocab[word] = id\n self.reverse_vocab = reverse_vocab # reverse_vocab[id] = word\n\n # Save special word ids.\n self.start_id = vocab[start_word]\n self.end_id = vocab[end_word]\n self.unk_id = vocab[unk_word]","function_tokens":["def","__init__","(","self",",","vocab_file",",","start_word","=","\"\"",",","end_word","=","\"<\/S>\"",",","unk_word","=","\"\"",")",":","if","not","tf",".","gfile",".","Exists","(","vocab_file",")",":","tf",".","logging",".","fatal","(","\"Vocab file %s not found.\"",",","vocab_file",")","tf",".","logging",".","info","(","\"Initializing vocabulary from file: %s\"",",","vocab_file",")","with","tf",".","gfile",".","GFile","(","vocab_file",",","mode","=","\"r\"",")","as","f",":","reverse_vocab","=","list","(","f",".","readlines","(",")",")","reverse_vocab","=","[","line",".","split","(",")","[","0","]","for","line","in","reverse_vocab","]","assert","start_word","in","reverse_vocab","assert","end_word","in","reverse_vocab","if","unk_word","not","in","reverse_vocab",":","reverse_vocab",".","append","(","unk_word",")","vocab","=","dict","(","[","(","x",",","y",")","for","(","y",",","x",")","in","enumerate","(","reverse_vocab",")","]",")","tf",".","logging",".","info","(","\"Created vocabulary with %d words\"","%","len","(","vocab",")",")","self",".","vocab","=","vocab","# vocab[word] = id","self",".","reverse_vocab","=","reverse_vocab","# reverse_vocab[id] = word","# Save special word ids.","self",".","start_id","=","vocab","[","start_word","]","self",".","end_id","=","vocab","[","end_word","]","self",".","unk_id","=","vocab","[","unk_word","]"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/vocabulary.py#L28-L64"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/vocabulary.py","language":"python","identifier":"Vocabulary.word_to_id","parameters":"(self, word)","argument_list":"","return_statement":"","docstring":"Returns the integer word id of a word string.","docstring_summary":"Returns the integer word id of a word string.","docstring_tokens":["Returns","the","integer","word","id","of","a","word","string","."],"function":"def word_to_id(self, word):\n \"\"\"Returns the integer word id of a word string.\"\"\"\n if word in self.vocab:\n return self.vocab[word]\n else:\n return self.unk_id","function_tokens":["def","word_to_id","(","self",",","word",")",":","if","word","in","self",".","vocab",":","return","self",".","vocab","[","word","]","else",":","return","self",".","unk_id"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/vocabulary.py#L66-L71"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/vocabulary.py","language":"python","identifier":"Vocabulary.id_to_word","parameters":"(self, word_id)","argument_list":"","return_statement":"","docstring":"Returns the word string of an integer word id.","docstring_summary":"Returns the word string of an integer word id.","docstring_tokens":["Returns","the","word","string","of","an","integer","word","id","."],"function":"def id_to_word(self, word_id):\n \"\"\"Returns the word string of an integer word id.\"\"\"\n if word_id >= len(self.reverse_vocab):\n return self.reverse_vocab[self.unk_id]\n else:\n return self.reverse_vocab[word_id]","function_tokens":["def","id_to_word","(","self",",","word_id",")",":","if","word_id",">=","len","(","self",".","reverse_vocab",")",":","return","self",".","reverse_vocab","[","self",".","unk_id","]","else",":","return","self",".","reverse_vocab","[","word_id","]"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/vocabulary.py#L73-L78"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/inference_wrapper_base.py","language":"python","identifier":"InferenceWrapperBase.build_model","parameters":"(self, model_config)","argument_list":"","return_statement":"","docstring":"Builds the model for inference.\n\n Args:\n model_config: Object containing configuration for building the model.\n\n Returns:\n model: The model object.","docstring_summary":"Builds the model for inference.","docstring_tokens":["Builds","the","model","for","inference","."],"function":"def build_model(self, model_config):\n \"\"\"Builds the model for inference.\n\n Args:\n model_config: Object containing configuration for building the model.\n\n Returns:\n model: The model object.\n \"\"\"\n tf.logging.fatal(\"Please implement build_model in subclass\")","function_tokens":["def","build_model","(","self",",","model_config",")",":","tf",".","logging",".","fatal","(","\"Please implement build_model in subclass\"",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/inference_wrapper_base.py#L62-L71"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/inference_wrapper_base.py","language":"python","identifier":"InferenceWrapperBase._create_restore_fn","parameters":"(self, checkpoint_path, saver)","argument_list":"","return_statement":"return _restore_fn","docstring":"Creates a function that restores a model from checkpoint.\n\n Args:\n checkpoint_path: Checkpoint file or a directory containing a checkpoint\n file.\n saver: Saver for restoring variables from the checkpoint file.\n\n Returns:\n restore_fn: A function such that restore_fn(sess) loads model variables\n from the checkpoint file.\n\n Raises:\n ValueError: If checkpoint_path does not refer to a checkpoint file or a\n directory containing a checkpoint file.","docstring_summary":"Creates a function that restores a model from checkpoint.","docstring_tokens":["Creates","a","function","that","restores","a","model","from","checkpoint","."],"function":"def _create_restore_fn(self, checkpoint_path, saver):\n \"\"\"Creates a function that restores a model from checkpoint.\n\n Args:\n checkpoint_path: Checkpoint file or a directory containing a checkpoint\n file.\n saver: Saver for restoring variables from the checkpoint file.\n\n Returns:\n restore_fn: A function such that restore_fn(sess) loads model variables\n from the checkpoint file.\n\n Raises:\n ValueError: If checkpoint_path does not refer to a checkpoint file or a\n directory containing a checkpoint file.\n \"\"\"\n if tf.gfile.IsDirectory(checkpoint_path):\n checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)\n if not checkpoint_path:\n raise ValueError(\"No checkpoint file found in: %s\" % checkpoint_path)\n\n def _restore_fn(sess):\n tf.logging.info(\"Loading model from checkpoint: %s\", checkpoint_path)\n saver.restore(sess, checkpoint_path)\n tf.logging.info(\"Successfully loaded checkpoint: %s\",\n os.path.basename(checkpoint_path))\n\n return _restore_fn","function_tokens":["def","_create_restore_fn","(","self",",","checkpoint_path",",","saver",")",":","if","tf",".","gfile",".","IsDirectory","(","checkpoint_path",")",":","checkpoint_path","=","tf",".","train",".","latest_checkpoint","(","checkpoint_path",")","if","not","checkpoint_path",":","raise","ValueError","(","\"No checkpoint file found in: %s\"","%","checkpoint_path",")","def","_restore_fn","(","sess",")",":","tf",".","logging",".","info","(","\"Loading model from checkpoint: %s\"",",","checkpoint_path",")","saver",".","restore","(","sess",",","checkpoint_path",")","tf",".","logging",".","info","(","\"Successfully loaded checkpoint: %s\"",",","os",".","path",".","basename","(","checkpoint_path",")",")","return","_restore_fn"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/inference_wrapper_base.py#L73-L100"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/inference_wrapper_base.py","language":"python","identifier":"InferenceWrapperBase.build_graph_from_config","parameters":"(self, model_config, checkpoint_path)","argument_list":"","return_statement":"return self._create_restore_fn(checkpoint_path, saver)","docstring":"Builds the inference graph from a configuration object.\n\n Args:\n model_config: Object containing configuration for building the model.\n checkpoint_path: Checkpoint file or a directory containing a checkpoint\n file.\n\n Returns:\n restore_fn: A function such that restore_fn(sess) loads model variables\n from the checkpoint file.","docstring_summary":"Builds the inference graph from a configuration object.","docstring_tokens":["Builds","the","inference","graph","from","a","configuration","object","."],"function":"def build_graph_from_config(self, model_config, checkpoint_path):\n \"\"\"Builds the inference graph from a configuration object.\n\n Args:\n model_config: Object containing configuration for building the model.\n checkpoint_path: Checkpoint file or a directory containing a checkpoint\n file.\n\n Returns:\n restore_fn: A function such that restore_fn(sess) loads model variables\n from the checkpoint file.\n \"\"\"\n tf.logging.info(\"Building model.\")\n self.build_model(model_config)\n saver = tf.train.Saver()\n\n return self._create_restore_fn(checkpoint_path, saver)","function_tokens":["def","build_graph_from_config","(","self",",","model_config",",","checkpoint_path",")",":","tf",".","logging",".","info","(","\"Building model.\"",")","self",".","build_model","(","model_config",")","saver","=","tf",".","train",".","Saver","(",")","return","self",".","_create_restore_fn","(","checkpoint_path",",","saver",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/inference_wrapper_base.py#L102-L118"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/inference_wrapper_base.py","language":"python","identifier":"InferenceWrapperBase.build_graph_from_proto","parameters":"(self, graph_def_file, saver_def_file,\n checkpoint_path)","argument_list":"","return_statement":"return self._create_restore_fn(checkpoint_path, saver)","docstring":"Builds the inference graph from serialized GraphDef and SaverDef protos.\n\n Args:\n graph_def_file: File containing a serialized GraphDef proto.\n saver_def_file: File containing a serialized SaverDef proto.\n checkpoint_path: Checkpoint file or a directory containing a checkpoint\n file.\n\n Returns:\n restore_fn: A function such that restore_fn(sess) loads model variables\n from the checkpoint file.","docstring_summary":"Builds the inference graph from serialized GraphDef and SaverDef protos.","docstring_tokens":["Builds","the","inference","graph","from","serialized","GraphDef","and","SaverDef","protos","."],"function":"def build_graph_from_proto(self, graph_def_file, saver_def_file,\n checkpoint_path):\n \"\"\"Builds the inference graph from serialized GraphDef and SaverDef protos.\n\n Args:\n graph_def_file: File containing a serialized GraphDef proto.\n saver_def_file: File containing a serialized SaverDef proto.\n checkpoint_path: Checkpoint file or a directory containing a checkpoint\n file.\n\n Returns:\n restore_fn: A function such that restore_fn(sess) loads model variables\n from the checkpoint file.\n \"\"\"\n # Load the Graph.\n tf.logging.info(\"Loading GraphDef from file: %s\", graph_def_file)\n graph_def = tf.GraphDef()\n with tf.gfile.FastGFile(graph_def_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name=\"\")\n\n # Load the Saver.\n tf.logging.info(\"Loading SaverDef from file: %s\", saver_def_file)\n saver_def = tf.train.SaverDef()\n with tf.gfile.FastGFile(saver_def_file, \"rb\") as f:\n saver_def.ParseFromString(f.read())\n saver = tf.train.Saver(saver_def=saver_def)\n\n return self._create_restore_fn(checkpoint_path, saver)","function_tokens":["def","build_graph_from_proto","(","self",",","graph_def_file",",","saver_def_file",",","checkpoint_path",")",":","# Load the Graph.","tf",".","logging",".","info","(","\"Loading GraphDef from file: %s\"",",","graph_def_file",")","graph_def","=","tf",".","GraphDef","(",")","with","tf",".","gfile",".","FastGFile","(","graph_def_file",",","\"rb\"",")","as","f",":","graph_def",".","ParseFromString","(","f",".","read","(",")",")","tf",".","import_graph_def","(","graph_def",",","name","=","\"\"",")","# Load the Saver.","tf",".","logging",".","info","(","\"Loading SaverDef from file: %s\"",",","saver_def_file",")","saver_def","=","tf",".","train",".","SaverDef","(",")","with","tf",".","gfile",".","FastGFile","(","saver_def_file",",","\"rb\"",")","as","f",":","saver_def",".","ParseFromString","(","f",".","read","(",")",")","saver","=","tf",".","train",".","Saver","(","saver_def","=","saver_def",")","return","self",".","_create_restore_fn","(","checkpoint_path",",","saver",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/inference_wrapper_base.py#L120-L148"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/inference_wrapper_base.py","language":"python","identifier":"InferenceWrapperBase.feed_image","parameters":"(self, sess, encoded_image)","argument_list":"","return_statement":"","docstring":"Feeds an image and returns the initial model state.\n\n See comments at the top of file.\n\n Args:\n sess: TensorFlow Session object.\n encoded_image: An encoded image string.\n\n Returns:\n state: A numpy array of shape [1, state_size].","docstring_summary":"Feeds an image and returns the initial model state.","docstring_tokens":["Feeds","an","image","and","returns","the","initial","model","state","."],"function":"def feed_image(self, sess, encoded_image):\n \"\"\"Feeds an image and returns the initial model state.\n\n See comments at the top of file.\n\n Args:\n sess: TensorFlow Session object.\n encoded_image: An encoded image string.\n\n Returns:\n state: A numpy array of shape [1, state_size].\n \"\"\"\n tf.logging.fatal(\"Please implement feed_image in subclass\")","function_tokens":["def","feed_image","(","self",",","sess",",","encoded_image",")",":","tf",".","logging",".","fatal","(","\"Please implement feed_image in subclass\"",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/inference_wrapper_base.py#L150-L162"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/inference_wrapper_base.py","language":"python","identifier":"InferenceWrapperBase.inference_step","parameters":"(self, sess, input_feed, state_feed)","argument_list":"","return_statement":"","docstring":"Runs one step of inference.\n\n Args:\n sess: TensorFlow Session object.\n input_feed: A numpy array of shape [batch_size].\n state_feed: A numpy array of shape [batch_size, state_size].\n\n Returns:\n softmax_output: A numpy array of shape [batch_size, vocab_size].\n new_state: A numpy array of shape [batch_size, state_size].\n metadata: Optional. If not None, a string containing metadata about the\n current inference step (e.g. serialized numpy array containing\n activations from a particular model layer.).","docstring_summary":"Runs one step of inference.","docstring_tokens":["Runs","one","step","of","inference","."],"function":"def inference_step(self, sess, input_feed, state_feed):\n \"\"\"Runs one step of inference.\n\n Args:\n sess: TensorFlow Session object.\n input_feed: A numpy array of shape [batch_size].\n state_feed: A numpy array of shape [batch_size, state_size].\n\n Returns:\n softmax_output: A numpy array of shape [batch_size, vocab_size].\n new_state: A numpy array of shape [batch_size, state_size].\n metadata: Optional. If not None, a string containing metadata about the\n current inference step (e.g. serialized numpy array containing\n activations from a particular model layer.).\n \"\"\"\n tf.logging.fatal(\"Please implement inference_step in subclass\")","function_tokens":["def","inference_step","(","self",",","sess",",","input_feed",",","state_feed",")",":","tf",".","logging",".","fatal","(","\"Please implement inference_step in subclass\"",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/inference_wrapper_base.py#L164-L179"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py","language":"python","identifier":"Caption.__init__","parameters":"(self, sentence, state, logprob, score, metadata=None)","argument_list":"","return_statement":"","docstring":"Initializes the Caption.\n\n Args:\n sentence: List of word ids in the caption.\n state: Model state after generating the previous word.\n logprob: Log-probability of the caption.\n score: Score of the caption.\n metadata: Optional metadata associated with the partial sentence. If not\n None, a list of strings with the same length as 'sentence'.","docstring_summary":"Initializes the Caption.","docstring_tokens":["Initializes","the","Caption","."],"function":"def __init__(self, sentence, state, logprob, score, metadata=None):\n \"\"\"Initializes the Caption.\n\n Args:\n sentence: List of word ids in the caption.\n state: Model state after generating the previous word.\n logprob: Log-probability of the caption.\n score: Score of the caption.\n metadata: Optional metadata associated with the partial sentence. If not\n None, a list of strings with the same length as 'sentence'.\n \"\"\"\n self.sentence = sentence\n self.state = state\n self.logprob = logprob\n self.score = score\n self.metadata = metadata","function_tokens":["def","__init__","(","self",",","sentence",",","state",",","logprob",",","score",",","metadata","=","None",")",":","self",".","sentence","=","sentence","self",".","state","=","state","self",".","logprob","=","logprob","self",".","score","=","score","self",".","metadata","=","metadata"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py#L31-L46"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py","language":"python","identifier":"Caption.__cmp__","parameters":"(self, other)","argument_list":"","return_statement":"","docstring":"Compares Captions by score.","docstring_summary":"Compares Captions by score.","docstring_tokens":["Compares","Captions","by","score","."],"function":"def __cmp__(self, other):\n \"\"\"Compares Captions by score.\"\"\"\n assert isinstance(other, Caption)\n if self.score == other.score:\n return 0\n elif self.score < other.score:\n return -1\n else:\n return 1","function_tokens":["def","__cmp__","(","self",",","other",")",":","assert","isinstance","(","other",",","Caption",")","if","self",".","score","==","other",".","score",":","return","0","elif","self",".","score","<","other",".","score",":","return","-","1","else",":","return","1"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py#L48-L56"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py","language":"python","identifier":"TopN.push","parameters":"(self, x)","argument_list":"","return_statement":"","docstring":"Pushes a new element.","docstring_summary":"Pushes a new element.","docstring_tokens":["Pushes","a","new","element","."],"function":"def push(self, x):\n \"\"\"Pushes a new element.\"\"\"\n assert self._data is not None\n if len(self._data) < self._n:\n heapq.heappush(self._data, x)\n else:\n heapq.heappushpop(self._data, x)","function_tokens":["def","push","(","self",",","x",")",":","assert","self",".","_data","is","not","None","if","len","(","self",".","_data",")","<","self",".","_n",":","heapq",".","heappush","(","self",".","_data",",","x",")","else",":","heapq",".","heappushpop","(","self",".","_data",",","x",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py#L80-L86"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py","language":"python","identifier":"TopN.extract","parameters":"(self, sort=False)","argument_list":"","return_statement":"return data","docstring":"Extracts all elements from the TopN. This is a destructive operation.\n\n The only method that can be called immediately after extract() is reset().\n\n Args:\n sort: Whether to return the elements in descending sorted order.\n\n Returns:\n A list of data; the top n elements provided to the set.","docstring_summary":"Extracts all elements from the TopN. This is a destructive operation.","docstring_tokens":["Extracts","all","elements","from","the","TopN",".","This","is","a","destructive","operation","."],"function":"def extract(self, sort=False):\n \"\"\"Extracts all elements from the TopN. This is a destructive operation.\n\n The only method that can be called immediately after extract() is reset().\n\n Args:\n sort: Whether to return the elements in descending sorted order.\n\n Returns:\n A list of data; the top n elements provided to the set.\n \"\"\"\n assert self._data is not None\n data = self._data\n self._data = None\n if sort:\n data.sort(reverse=True)\n return data","function_tokens":["def","extract","(","self",",","sort","=","False",")",":","assert","self",".","_data","is","not","None","data","=","self",".","_data","self",".","_data","=","None","if","sort",":","data",".","sort","(","reverse","=","True",")","return","data"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py#L88-L104"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py","language":"python","identifier":"TopN.reset","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Returns the TopN to an empty state.","docstring_summary":"Returns the TopN to an empty state.","docstring_tokens":["Returns","the","TopN","to","an","empty","state","."],"function":"def reset(self):\n \"\"\"Returns the TopN to an empty state.\"\"\"\n self._data = []","function_tokens":["def","reset","(","self",")",":","self",".","_data","=","[","]"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py#L106-L108"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py","language":"python","identifier":"CaptionGenerator.__init__","parameters":"(self,\n model,\n vocab,\n beam_size=3,\n max_caption_length=20,\n length_normalization_factor=0.0)","argument_list":"","return_statement":"","docstring":"Initializes the generator.\n\n Args:\n model: Object encapsulating a trained image-to-text model. Must have\n methods feed_image() and inference_step(). For example, an instance of\n InferenceWrapperBase.\n vocab: A Vocabulary object.\n beam_size: Beam size to use when generating captions.\n max_caption_length: The maximum caption length before stopping the search.\n length_normalization_factor: If != 0, a number x such that captions are\n scored by logprob\/length^x, rather than logprob. This changes the\n relative scores of captions depending on their lengths. For example, if\n x > 0 then longer captions will be favored.","docstring_summary":"Initializes the generator.","docstring_tokens":["Initializes","the","generator","."],"function":"def __init__(self,\n model,\n vocab,\n beam_size=3,\n max_caption_length=20,\n length_normalization_factor=0.0):\n \"\"\"Initializes the generator.\n\n Args:\n model: Object encapsulating a trained image-to-text model. Must have\n methods feed_image() and inference_step(). For example, an instance of\n InferenceWrapperBase.\n vocab: A Vocabulary object.\n beam_size: Beam size to use when generating captions.\n max_caption_length: The maximum caption length before stopping the search.\n length_normalization_factor: If != 0, a number x such that captions are\n scored by logprob\/length^x, rather than logprob. This changes the\n relative scores of captions depending on their lengths. For example, if\n x > 0 then longer captions will be favored.\n \"\"\"\n self.vocab = vocab\n self.model = model\n\n self.beam_size = beam_size\n self.max_caption_length = max_caption_length\n self.length_normalization_factor = length_normalization_factor","function_tokens":["def","__init__","(","self",",","model",",","vocab",",","beam_size","=","3",",","max_caption_length","=","20",",","length_normalization_factor","=","0.0",")",":","self",".","vocab","=","vocab","self",".","model","=","model","self",".","beam_size","=","beam_size","self",".","max_caption_length","=","max_caption_length","self",".","length_normalization_factor","=","length_normalization_factor"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py#L114-L139"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py","language":"python","identifier":"CaptionGenerator.beam_search","parameters":"(self, sess, encoded_image)","argument_list":"","return_statement":"return complete_captions.extract(sort=True)","docstring":"Runs beam search caption generation on a single image.\n\n Args:\n sess: TensorFlow Session object.\n encoded_image: An encoded image string.\n\n Returns:\n A list of Caption sorted by descending score.","docstring_summary":"Runs beam search caption generation on a single image.","docstring_tokens":["Runs","beam","search","caption","generation","on","a","single","image","."],"function":"def beam_search(self, sess, encoded_image):\n \"\"\"Runs beam search caption generation on a single image.\n\n Args:\n sess: TensorFlow Session object.\n encoded_image: An encoded image string.\n\n Returns:\n A list of Caption sorted by descending score.\n \"\"\"\n # Feed in the image to get the initial state.\n initial_state = self.model.feed_image(sess, encoded_image)\n\n initial_beam = Caption(\n sentence=[self.vocab.start_id],\n state=initial_state[0],\n logprob=0.0,\n score=0.0,\n metadata=[\"\"])\n partial_captions = TopN(self.beam_size)\n partial_captions.push(initial_beam)\n complete_captions = TopN(self.beam_size)\n\n # Run beam search.\n for _ in range(self.max_caption_length - 1):\n partial_captions_list = partial_captions.extract()\n partial_captions.reset()\n input_feed = np.array([c.sentence[-1] for c in partial_captions_list])\n state_feed = np.array([c.state for c in partial_captions_list])\n\n softmax, new_states, metadata = self.model.inference_step(sess,\n input_feed,\n state_feed)\n\n for i, partial_caption in enumerate(partial_captions_list):\n word_probabilities = softmax[i]\n state = new_states[i]\n # For this partial caption, get the beam_size most probable next words.\n words_and_probs = list(enumerate(word_probabilities))\n words_and_probs.sort(key=lambda x: -x[1])\n words_and_probs = words_and_probs[0:self.beam_size]\n # Each next word gives a new partial caption.\n for w, p in words_and_probs:\n if p < 1e-12:\n continue # Avoid log(0).\n sentence = partial_caption.sentence + [w]\n logprob = partial_caption.logprob + math.log(p)\n score = logprob\n if metadata:\n metadata_list = partial_caption.metadata + [metadata[i]]\n else:\n metadata_list = None\n if w == self.vocab.end_id:\n if self.length_normalization_factor > 0:\n score \/= len(sentence)**self.length_normalization_factor\n beam = Caption(sentence, state, logprob, score, metadata_list)\n complete_captions.push(beam)\n else:\n beam = Caption(sentence, state, logprob, score, metadata_list)\n partial_captions.push(beam)\n if partial_captions.size() == 0:\n # We have run out of partial candidates; happens when beam_size = 1.\n break\n\n # If we have no complete captions then fall back to the partial captions.\n # But never output a mixture of complete and partial captions because a\n # partial caption could have a higher score than all the complete captions.\n if not complete_captions.size():\n complete_captions = partial_captions\n\n return complete_captions.extract(sort=True)","function_tokens":["def","beam_search","(","self",",","sess",",","encoded_image",")",":","# Feed in the image to get the initial state.","initial_state","=","self",".","model",".","feed_image","(","sess",",","encoded_image",")","initial_beam","=","Caption","(","sentence","=","[","self",".","vocab",".","start_id","]",",","state","=","initial_state","[","0","]",",","logprob","=","0.0",",","score","=","0.0",",","metadata","=","[","\"\"","]",")","partial_captions","=","TopN","(","self",".","beam_size",")","partial_captions",".","push","(","initial_beam",")","complete_captions","=","TopN","(","self",".","beam_size",")","# Run beam search.","for","_","in","range","(","self",".","max_caption_length","-","1",")",":","partial_captions_list","=","partial_captions",".","extract","(",")","partial_captions",".","reset","(",")","input_feed","=","np",".","array","(","[","c",".","sentence","[","-","1","]","for","c","in","partial_captions_list","]",")","state_feed","=","np",".","array","(","[","c",".","state","for","c","in","partial_captions_list","]",")","softmax",",","new_states",",","metadata","=","self",".","model",".","inference_step","(","sess",",","input_feed",",","state_feed",")","for","i",",","partial_caption","in","enumerate","(","partial_captions_list",")",":","word_probabilities","=","softmax","[","i","]","state","=","new_states","[","i","]","# For this partial caption, get the beam_size most probable next words.","words_and_probs","=","list","(","enumerate","(","word_probabilities",")",")","words_and_probs",".","sort","(","key","=","lambda","x",":","-","x","[","1","]",")","words_and_probs","=","words_and_probs","[","0",":","self",".","beam_size","]","# Each next word gives a new partial caption.","for","w",",","p","in","words_and_probs",":","if","p","<","1e-12",":","continue","# Avoid log(0).","sentence","=","partial_caption",".","sentence","+","[","w","]","logprob","=","partial_caption",".","logprob","+","math",".","log","(","p",")","score","=","logprob","if","metadata",":","metadata_list","=","partial_caption",".","metadata","+","[","metadata","[","i","]","]","else",":","metadata_list","=","None","if","w","==","self",".","vocab",".","end_id",":","if","self",".","length_normalization_factor",">","0",":","score","\/=","len","(","sentence",")","**","self",".","length_normalization_factor","beam","=","Caption","(","sentence",",","state",",","logprob",",","score",",","metadata_list",")","complete_captions",".","push","(","beam",")","else",":","beam","=","Caption","(","sentence",",","state",",","logprob",",","score",",","metadata_list",")","partial_captions",".","push","(","beam",")","if","partial_captions",".","size","(",")","==","0",":","# We have run out of partial candidates; happens when beam_size = 1.","break","# If we have no complete captions then fall back to the partial captions.","# But never output a mixture of complete and partial captions because a","# partial caption could have a higher score than all the complete captions.","if","not","complete_captions",".","size","(",")",":","complete_captions","=","partial_captions","return","complete_captions",".","extract","(","sort","=","True",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/caption_baseline\/im2txt\/im2txt\/inference_utils\/caption_generator.py#L141-L211"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py","language":"python","identifier":"enable_parallel","parameters":"(processnum=None)","argument_list":"","return_statement":"","docstring":"Change the module's `cut` and `cut_for_search` functions to the\n parallel version.\n\n Note that this only works using dt, custom Tokenizer\n instances are not supported.","docstring_summary":"Change the module's `cut` and `cut_for_search` functions to the\n parallel version.","docstring_tokens":["Change","the","module","s","cut","and","cut_for_search","functions","to","the","parallel","version","."],"function":"def enable_parallel(processnum=None):\n \"\"\"\n Change the module's `cut` and `cut_for_search` functions to the\n parallel version.\n\n Note that this only works using dt, custom Tokenizer\n instances are not supported.\n \"\"\"\n global pool, dt, cut, cut_for_search\n from multiprocessing import cpu_count\n if os.name == 'nt':\n raise NotImplementedError(\n \"jieba: parallel mode only supports posix system\")\n else:\n from multiprocessing import Pool\n dt.check_initialized()\n if processnum is None:\n processnum = cpu_count()\n pool = Pool(processnum)\n cut = _pcut\n cut_for_search = _pcut_for_search","function_tokens":["def","enable_parallel","(","processnum","=","None",")",":","global","pool",",","dt",",","cut",",","cut_for_search","from","multiprocessing","import","cpu_count","if","os",".","name","==","'nt'",":","raise","NotImplementedError","(","\"jieba: parallel mode only supports posix system\"",")","else",":","from","multiprocessing","import","Pool","dt",".","check_initialized","(",")","if","processnum","is","None",":","processnum","=","cpu_count","(",")","pool","=","Pool","(","processnum",")","cut","=","_pcut","cut_for_search","=","_pcut_for_search"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py#L566-L586"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py","language":"python","identifier":"Tokenizer.cut","parameters":"(self, sentence, cut_all=False, HMM=True)","argument_list":"","return_statement":"","docstring":"The main function that segments an entire sentence that contains\n Chinese characters into seperated words.\n\n Parameter:\n - sentence: The str(unicode) to be segmented.\n - cut_all: Model type. True for full pattern, False for accurate pattern.\n - HMM: Whether to use the Hidden Markov Model.","docstring_summary":"The main function that segments an entire sentence that contains\n Chinese characters into seperated words.","docstring_tokens":["The","main","function","that","segments","an","entire","sentence","that","contains","Chinese","characters","into","seperated","words","."],"function":"def cut(self, sentence, cut_all=False, HMM=True):\n '''\n The main function that segments an entire sentence that contains\n Chinese characters into seperated words.\n\n Parameter:\n - sentence: The str(unicode) to be segmented.\n - cut_all: Model type. True for full pattern, False for accurate pattern.\n - HMM: Whether to use the Hidden Markov Model.\n '''\n sentence = strdecode(sentence)\n\n if cut_all:\n re_han = re_han_cut_all\n re_skip = re_skip_cut_all\n else:\n re_han = re_han_default\n re_skip = re_skip_default\n if cut_all:\n cut_block = self.__cut_all\n elif HMM:\n cut_block = self.__cut_DAG\n else:\n cut_block = self.__cut_DAG_NO_HMM\n blocks = re_han.split(sentence)\n for blk in blocks:\n if not blk:\n continue\n if re_han.match(blk):\n for word in cut_block(blk):\n yield word\n else:\n tmp = re_skip.split(blk)\n for x in tmp:\n if re_skip.match(x):\n yield x\n elif not cut_all:\n for xx in x:\n yield xx\n else:\n yield x","function_tokens":["def","cut","(","self",",","sentence",",","cut_all","=","False",",","HMM","=","True",")",":","sentence","=","strdecode","(","sentence",")","if","cut_all",":","re_han","=","re_han_cut_all","re_skip","=","re_skip_cut_all","else",":","re_han","=","re_han_default","re_skip","=","re_skip_default","if","cut_all",":","cut_block","=","self",".","__cut_all","elif","HMM",":","cut_block","=","self",".","__cut_DAG","else",":","cut_block","=","self",".","__cut_DAG_NO_HMM","blocks","=","re_han",".","split","(","sentence",")","for","blk","in","blocks",":","if","not","blk",":","continue","if","re_han",".","match","(","blk",")",":","for","word","in","cut_block","(","blk",")",":","yield","word","else",":","tmp","=","re_skip",".","split","(","blk",")","for","x","in","tmp",":","if","re_skip",".","match","(","x",")",":","yield","x","elif","not","cut_all",":","for","xx","in","x",":","yield","xx","else",":","yield","x"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py#L272-L312"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py","language":"python","identifier":"Tokenizer.cut_for_search","parameters":"(self, sentence, HMM=True)","argument_list":"","return_statement":"","docstring":"Finer segmentation for search engines.","docstring_summary":"Finer segmentation for search engines.","docstring_tokens":["Finer","segmentation","for","search","engines","."],"function":"def cut_for_search(self, sentence, HMM=True):\n \"\"\"\n Finer segmentation for search engines.\n \"\"\"\n words = self.cut(sentence, HMM=HMM)\n for w in words:\n if len(w) > 2:\n for i in xrange(len(w) - 1):\n gram2 = w[i:i + 2]\n if self.FREQ.get(gram2):\n yield gram2\n if len(w) > 3:\n for i in xrange(len(w) - 2):\n gram3 = w[i:i + 3]\n if self.FREQ.get(gram3):\n yield gram3\n yield w","function_tokens":["def","cut_for_search","(","self",",","sentence",",","HMM","=","True",")",":","words","=","self",".","cut","(","sentence",",","HMM","=","HMM",")","for","w","in","words",":","if","len","(","w",")",">","2",":","for","i","in","xrange","(","len","(","w",")","-","1",")",":","gram2","=","w","[","i",":","i","+","2","]","if","self",".","FREQ",".","get","(","gram2",")",":","yield","gram2","if","len","(","w",")",">","3",":","for","i","in","xrange","(","len","(","w",")","-","2",")",":","gram3","=","w","[","i",":","i","+","3","]","if","self",".","FREQ",".","get","(","gram3",")",":","yield","gram3","yield","w"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py#L314-L330"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py","language":"python","identifier":"Tokenizer.load_userdict","parameters":"(self, f)","argument_list":"","return_statement":"","docstring":"Load personalized dict to improve detect rate.\n\n Parameter:\n - f : A plain text file contains words and their ocurrences.\n Can be a file-like object, or the path of the dictionary file,\n whose encoding must be utf-8.\n\n Structure of dict file:\n word1 freq1 word_type1\n word2 freq2 word_type2\n ...\n Word type may be ignored","docstring_summary":"Load personalized dict to improve detect rate.","docstring_tokens":["Load","personalized","dict","to","improve","detect","rate","."],"function":"def load_userdict(self, f):\n '''\n Load personalized dict to improve detect rate.\n\n Parameter:\n - f : A plain text file contains words and their ocurrences.\n Can be a file-like object, or the path of the dictionary file,\n whose encoding must be utf-8.\n\n Structure of dict file:\n word1 freq1 word_type1\n word2 freq2 word_type2\n ...\n Word type may be ignored\n '''\n self.check_initialized()\n if isinstance(f, string_types):\n f_name = f\n f = open(f, 'rb')\n else:\n f_name = resolve_filename(f)\n for lineno, ln in enumerate(f, 1):\n line = ln.strip()\n if not isinstance(line, text_type):\n try:\n line = line.decode('utf-8').lstrip('\\ufeff')\n except UnicodeDecodeError:\n raise ValueError('dictionary file %s must be utf-8' % f_name)\n if not line:\n continue\n # match won't be None because there's at least one character\n word, freq, tag = re_userdict.match(line).groups()\n if freq is not None:\n freq = freq.strip()\n if tag is not None:\n tag = tag.strip()\n self.add_word(word, freq, tag)","function_tokens":["def","load_userdict","(","self",",","f",")",":","self",".","check_initialized","(",")","if","isinstance","(","f",",","string_types",")",":","f_name","=","f","f","=","open","(","f",",","'rb'",")","else",":","f_name","=","resolve_filename","(","f",")","for","lineno",",","ln","in","enumerate","(","f",",","1",")",":","line","=","ln",".","strip","(",")","if","not","isinstance","(","line",",","text_type",")",":","try",":","line","=","line",".","decode","(","'utf-8'",")",".","lstrip","(","'\\ufeff'",")","except","UnicodeDecodeError",":","raise","ValueError","(","'dictionary file %s must be utf-8'","%","f_name",")","if","not","line",":","continue","# match won't be None because there's at least one character","word",",","freq",",","tag","=","re_userdict",".","match","(","line",")",".","groups","(",")","if","freq","is","not","None",":","freq","=","freq",".","strip","(",")","if","tag","is","not","None",":","tag","=","tag",".","strip","(",")","self",".","add_word","(","word",",","freq",",","tag",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py#L356-L392"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py","language":"python","identifier":"Tokenizer.add_word","parameters":"(self, word, freq=None, tag=None)","argument_list":"","return_statement":"","docstring":"Add a word to dictionary.\n\n freq and tag can be omitted, freq defaults to be a calculated value\n that ensures the word can be cut out.","docstring_summary":"Add a word to dictionary.","docstring_tokens":["Add","a","word","to","dictionary","."],"function":"def add_word(self, word, freq=None, tag=None):\n \"\"\"\n Add a word to dictionary.\n\n freq and tag can be omitted, freq defaults to be a calculated value\n that ensures the word can be cut out.\n \"\"\"\n self.check_initialized()\n word = strdecode(word)\n freq = int(freq) if freq is not None else self.suggest_freq(word, False)\n self.FREQ[word] = freq\n self.total += freq\n if tag:\n self.user_word_tag_tab[word] = tag\n for ch in xrange(len(word)):\n wfrag = word[:ch + 1]\n if wfrag not in self.FREQ:\n self.FREQ[wfrag] = 0\n if freq == 0:\n finalseg.add_force_split(word)","function_tokens":["def","add_word","(","self",",","word",",","freq","=","None",",","tag","=","None",")",":","self",".","check_initialized","(",")","word","=","strdecode","(","word",")","freq","=","int","(","freq",")","if","freq","is","not","None","else","self",".","suggest_freq","(","word",",","False",")","self",".","FREQ","[","word","]","=","freq","self",".","total","+=","freq","if","tag",":","self",".","user_word_tag_tab","[","word","]","=","tag","for","ch","in","xrange","(","len","(","word",")",")",":","wfrag","=","word","[",":","ch","+","1","]","if","wfrag","not","in","self",".","FREQ",":","self",".","FREQ","[","wfrag","]","=","0","if","freq","==","0",":","finalseg",".","add_force_split","(","word",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py#L394-L413"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py","language":"python","identifier":"Tokenizer.del_word","parameters":"(self, word)","argument_list":"","return_statement":"","docstring":"Convenient function for deleting a word.","docstring_summary":"Convenient function for deleting a word.","docstring_tokens":["Convenient","function","for","deleting","a","word","."],"function":"def del_word(self, word):\n \"\"\"\n Convenient function for deleting a word.\n \"\"\"\n self.add_word(word, 0)","function_tokens":["def","del_word","(","self",",","word",")",":","self",".","add_word","(","word",",","0",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py#L415-L419"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py","language":"python","identifier":"Tokenizer.suggest_freq","parameters":"(self, segment, tune=False)","argument_list":"","return_statement":"return freq","docstring":"Suggest word frequency to force the characters in a word to be\n joined or splitted.\n\n Parameter:\n - segment : The segments that the word is expected to be cut into,\n If the word should be treated as a whole, use a str.\n - tune : If True, tune the word frequency.\n\n Note that HMM may affect the final result. If the result doesn't change,\n set HMM=False.","docstring_summary":"Suggest word frequency to force the characters in a word to be\n joined or splitted.","docstring_tokens":["Suggest","word","frequency","to","force","the","characters","in","a","word","to","be","joined","or","splitted","."],"function":"def suggest_freq(self, segment, tune=False):\n \"\"\"\n Suggest word frequency to force the characters in a word to be\n joined or splitted.\n\n Parameter:\n - segment : The segments that the word is expected to be cut into,\n If the word should be treated as a whole, use a str.\n - tune : If True, tune the word frequency.\n\n Note that HMM may affect the final result. If the result doesn't change,\n set HMM=False.\n \"\"\"\n self.check_initialized()\n ftotal = float(self.total)\n freq = 1\n if isinstance(segment, string_types):\n word = segment\n for seg in self.cut(word, HMM=False):\n freq *= self.FREQ.get(seg, 1) \/ ftotal\n freq = max(int(freq * self.total) + 1, self.FREQ.get(word, 1))\n else:\n segment = tuple(map(strdecode, segment))\n word = ''.join(segment)\n for seg in segment:\n freq *= self.FREQ.get(seg, 1) \/ ftotal\n freq = min(int(freq * self.total), self.FREQ.get(word, 0))\n if tune:\n add_word(word, freq)\n return freq","function_tokens":["def","suggest_freq","(","self",",","segment",",","tune","=","False",")",":","self",".","check_initialized","(",")","ftotal","=","float","(","self",".","total",")","freq","=","1","if","isinstance","(","segment",",","string_types",")",":","word","=","segment","for","seg","in","self",".","cut","(","word",",","HMM","=","False",")",":","freq","*=","self",".","FREQ",".","get","(","seg",",","1",")","\/","ftotal","freq","=","max","(","int","(","freq","*","self",".","total",")","+","1",",","self",".","FREQ",".","get","(","word",",","1",")",")","else",":","segment","=","tuple","(","map","(","strdecode",",","segment",")",")","word","=","''",".","join","(","segment",")","for","seg","in","segment",":","freq","*=","self",".","FREQ",".","get","(","seg",",","1",")","\/","ftotal","freq","=","min","(","int","(","freq","*","self",".","total",")",",","self",".","FREQ",".","get","(","word",",","0",")",")","if","tune",":","add_word","(","word",",","freq",")","return","freq"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py#L421-L450"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py","language":"python","identifier":"Tokenizer.tokenize","parameters":"(self, unicode_sentence, mode=\"default\", HMM=True)","argument_list":"","return_statement":"","docstring":"Tokenize a sentence and yields tuples of (word, start, end)\n\n Parameter:\n - sentence: the str(unicode) to be segmented.\n - mode: \"default\" or \"search\", \"search\" is for finer segmentation.\n - HMM: whether to use the Hidden Markov Model.","docstring_summary":"Tokenize a sentence and yields tuples of (word, start, end)","docstring_tokens":["Tokenize","a","sentence","and","yields","tuples","of","(","word","start","end",")"],"function":"def tokenize(self, unicode_sentence, mode=\"default\", HMM=True):\n \"\"\"\n Tokenize a sentence and yields tuples of (word, start, end)\n\n Parameter:\n - sentence: the str(unicode) to be segmented.\n - mode: \"default\" or \"search\", \"search\" is for finer segmentation.\n - HMM: whether to use the Hidden Markov Model.\n \"\"\"\n if not isinstance(unicode_sentence, text_type):\n raise ValueError(\"jieba: the input parameter should be unicode.\")\n start = 0\n if mode == 'default':\n for w in self.cut(unicode_sentence, HMM=HMM):\n width = len(w)\n yield (w, start, start + width)\n start += width\n else:\n for w in self.cut(unicode_sentence, HMM=HMM):\n width = len(w)\n if len(w) > 2:\n for i in xrange(len(w) - 1):\n gram2 = w[i:i + 2]\n if self.FREQ.get(gram2):\n yield (gram2, start + i, start + i + 2)\n if len(w) > 3:\n for i in xrange(len(w) - 2):\n gram3 = w[i:i + 3]\n if self.FREQ.get(gram3):\n yield (gram3, start + i, start + i + 3)\n yield (w, start, start + width)\n start += width","function_tokens":["def","tokenize","(","self",",","unicode_sentence",",","mode","=","\"default\"",",","HMM","=","True",")",":","if","not","isinstance","(","unicode_sentence",",","text_type",")",":","raise","ValueError","(","\"jieba: the input parameter should be unicode.\"",")","start","=","0","if","mode","==","'default'",":","for","w","in","self",".","cut","(","unicode_sentence",",","HMM","=","HMM",")",":","width","=","len","(","w",")","yield","(","w",",","start",",","start","+","width",")","start","+=","width","else",":","for","w","in","self",".","cut","(","unicode_sentence",",","HMM","=","HMM",")",":","width","=","len","(","w",")","if","len","(","w",")",">","2",":","for","i","in","xrange","(","len","(","w",")","-","1",")",":","gram2","=","w","[","i",":","i","+","2","]","if","self",".","FREQ",".","get","(","gram2",")",":","yield","(","gram2",",","start","+","i",",","start","+","i","+","2",")","if","len","(","w",")",">","3",":","for","i","in","xrange","(","len","(","w",")","-","2",")",":","gram3","=","w","[","i",":","i","+","3","]","if","self",".","FREQ",".","get","(","gram3",")",":","yield","(","gram3",",","start","+","i",",","start","+","i","+","3",")","yield","(","w",",","start",",","start","+","width",")","start","+=","width"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/__init__.py#L452-L483"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/analyse\/tfidf.py","language":"python","identifier":"TFIDF.extract_tags","parameters":"(self, sentence, topK=20, withWeight=False, allowPOS=(), withFlag=False)","argument_list":"","return_statement":"","docstring":"Extract keywords from sentence using TF-IDF algorithm.\n Parameter:\n - topK: return how many top keywords. `None` for all possible words.\n - withWeight: if True, return a list of (word, weight);\n if False, return a list of words.\n - allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].\n if the POS of w is not in this list,it will be filtered.\n - withFlag: only work with allowPOS is not empty.\n if True, return a list of pair(word, weight) like posseg.cut\n if False, return a list of words","docstring_summary":"Extract keywords from sentence using TF-IDF algorithm.\n Parameter:\n - topK: return how many top keywords. `None` for all possible words.\n - withWeight: if True, return a list of (word, weight);\n if False, return a list of words.\n - allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].\n if the POS of w is not in this list,it will be filtered.\n - withFlag: only work with allowPOS is not empty.\n if True, return a list of pair(word, weight) like posseg.cut\n if False, return a list of words","docstring_tokens":["Extract","keywords","from","sentence","using","TF","-","IDF","algorithm",".","Parameter",":","-","topK",":","return","how","many","top","keywords",".","None","for","all","possible","words",".","-","withWeight",":","if","True","return","a","list","of","(","word","weight",")",";","if","False","return","a","list","of","words",".","-","allowPOS",":","the","allowed","POS","list","eg",".","[","ns","n","vn","v","nr","]",".","if","the","POS","of","w","is","not","in","this","list","it","will","be","filtered",".","-","withFlag",":","only","work","with","allowPOS","is","not","empty",".","if","True","return","a","list","of","pair","(","word","weight",")","like","posseg",".","cut","if","False","return","a","list","of","words"],"function":"def extract_tags(self, sentence, topK=20, withWeight=False, allowPOS=(), withFlag=False):\n \"\"\"\n Extract keywords from sentence using TF-IDF algorithm.\n Parameter:\n - topK: return how many top keywords. `None` for all possible words.\n - withWeight: if True, return a list of (word, weight);\n if False, return a list of words.\n - allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].\n if the POS of w is not in this list,it will be filtered.\n - withFlag: only work with allowPOS is not empty.\n if True, return a list of pair(word, weight) like posseg.cut\n if False, return a list of words\n \"\"\"\n if allowPOS:\n allowPOS = frozenset(allowPOS)\n words = self.postokenizer.cut(sentence)\n else:\n words = self.tokenizer.cut(sentence)\n freq = {}\n for w in words:\n if allowPOS:\n if w.flag not in allowPOS:\n continue\n elif not withFlag:\n w = w.word\n wc = w.word if allowPOS and withFlag else w\n if len(wc.strip()) < 2 or wc.lower() in self.stop_words:\n continue\n freq[w] = freq.get(w, 0.0) + 1.0\n total = sum(freq.values())\n for k in freq:\n kw = k.word if allowPOS and withFlag else k\n freq[k] *= self.idf_freq.get(kw, self.median_idf) \/ total\n\n if withWeight:\n tags = sorted(freq.items(), key=itemgetter(1), reverse=True)\n else:\n tags = sorted(freq, key=freq.__getitem__, reverse=True)\n if topK:\n return tags[:topK]\n else:\n return tags","function_tokens":["def","extract_tags","(","self",",","sentence",",","topK","=","20",",","withWeight","=","False",",","allowPOS","=","(",")",",","withFlag","=","False",")",":","if","allowPOS",":","allowPOS","=","frozenset","(","allowPOS",")","words","=","self",".","postokenizer",".","cut","(","sentence",")","else",":","words","=","self",".","tokenizer",".","cut","(","sentence",")","freq","=","{","}","for","w","in","words",":","if","allowPOS",":","if","w",".","flag","not","in","allowPOS",":","continue","elif","not","withFlag",":","w","=","w",".","word","wc","=","w",".","word","if","allowPOS","and","withFlag","else","w","if","len","(","wc",".","strip","(",")",")","<","2","or","wc",".","lower","(",")","in","self",".","stop_words",":","continue","freq","[","w","]","=","freq",".","get","(","w",",","0.0",")","+","1.0","total","=","sum","(","freq",".","values","(",")",")","for","k","in","freq",":","kw","=","k",".","word","if","allowPOS","and","withFlag","else","k","freq","[","k","]","*=","self",".","idf_freq",".","get","(","kw",",","self",".","median_idf",")","\/","total","if","withWeight",":","tags","=","sorted","(","freq",".","items","(",")",",","key","=","itemgetter","(","1",")",",","reverse","=","True",")","else",":","tags","=","sorted","(","freq",",","key","=","freq",".","__getitem__",",","reverse","=","True",")","if","topK",":","return","tags","[",":","topK","]","else",":","return","tags"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/analyse\/tfidf.py#L75-L116"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/analyse\/textrank.py","language":"python","identifier":"TextRank.textrank","parameters":"(self, sentence, topK=20, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v'), withFlag=False)","argument_list":"","return_statement":"","docstring":"Extract keywords from sentence using TextRank algorithm.\n Parameter:\n - topK: return how many top keywords. `None` for all possible words.\n - withWeight: if True, return a list of (word, weight);\n if False, return a list of words.\n - allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].\n if the POS of w is not in this list, it will be filtered.\n - withFlag: if True, return a list of pair(word, weight) like posseg.cut\n if False, return a list of words","docstring_summary":"Extract keywords from sentence using TextRank algorithm.\n Parameter:\n - topK: return how many top keywords. `None` for all possible words.\n - withWeight: if True, return a list of (word, weight);\n if False, return a list of words.\n - allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].\n if the POS of w is not in this list, it will be filtered.\n - withFlag: if True, return a list of pair(word, weight) like posseg.cut\n if False, return a list of words","docstring_tokens":["Extract","keywords","from","sentence","using","TextRank","algorithm",".","Parameter",":","-","topK",":","return","how","many","top","keywords",".","None","for","all","possible","words",".","-","withWeight",":","if","True","return","a","list","of","(","word","weight",")",";","if","False","return","a","list","of","words",".","-","allowPOS",":","the","allowed","POS","list","eg",".","[","ns","n","vn","v","]",".","if","the","POS","of","w","is","not","in","this","list","it","will","be","filtered",".","-","withFlag",":","if","True","return","a","list","of","pair","(","word","weight",")","like","posseg",".","cut","if","False","return","a","list","of","words"],"function":"def textrank(self, sentence, topK=20, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v'), withFlag=False):\n \"\"\"\n Extract keywords from sentence using TextRank algorithm.\n Parameter:\n - topK: return how many top keywords. `None` for all possible words.\n - withWeight: if True, return a list of (word, weight);\n if False, return a list of words.\n - allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].\n if the POS of w is not in this list, it will be filtered.\n - withFlag: if True, return a list of pair(word, weight) like posseg.cut\n if False, return a list of words\n \"\"\"\n self.pos_filt = frozenset(allowPOS)\n g = UndirectWeightedGraph()\n cm = defaultdict(int)\n words = tuple(self.tokenizer.cut(sentence))\n for i, wp in enumerate(words):\n if self.pairfilter(wp):\n for j in xrange(i + 1, i + self.span):\n if j >= len(words):\n break\n if not self.pairfilter(words[j]):\n continue\n if allowPOS and withFlag:\n cm[(wp, words[j])] += 1\n else:\n cm[(wp.word, words[j].word)] += 1\n\n for terms, w in cm.items():\n g.addEdge(terms[0], terms[1], w)\n nodes_rank = g.rank()\n if withWeight:\n tags = sorted(nodes_rank.items(), key=itemgetter(1), reverse=True)\n else:\n tags = sorted(nodes_rank, key=nodes_rank.__getitem__, reverse=True)\n\n if topK:\n return tags[:topK]\n else:\n return tags","function_tokens":["def","textrank","(","self",",","sentence",",","topK","=","20",",","withWeight","=","False",",","allowPOS","=","(","'ns'",",","'n'",",","'vn'",",","'v'",")",",","withFlag","=","False",")",":","self",".","pos_filt","=","frozenset","(","allowPOS",")","g","=","UndirectWeightedGraph","(",")","cm","=","defaultdict","(","int",")","words","=","tuple","(","self",".","tokenizer",".","cut","(","sentence",")",")","for","i",",","wp","in","enumerate","(","words",")",":","if","self",".","pairfilter","(","wp",")",":","for","j","in","xrange","(","i","+","1",",","i","+","self",".","span",")",":","if","j",">=","len","(","words",")",":","break","if","not","self",".","pairfilter","(","words","[","j","]",")",":","continue","if","allowPOS","and","withFlag",":","cm","[","(","wp",",","words","[","j","]",")","]","+=","1","else",":","cm","[","(","wp",".","word",",","words","[","j","]",".","word",")","]","+=","1","for","terms",",","w","in","cm",".","items","(",")",":","g",".","addEdge","(","terms","[","0","]",",","terms","[","1","]",",","w",")","nodes_rank","=","g",".","rank","(",")","if","withWeight",":","tags","=","sorted","(","nodes_rank",".","items","(",")",",","key","=","itemgetter","(","1",")",",","reverse","=","True",")","else",":","tags","=","sorted","(","nodes_rank",",","key","=","nodes_rank",".","__getitem__",",","reverse","=","True",")","if","topK",":","return","tags","[",":","topK","]","else",":","return","tags"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/analyse\/textrank.py#L69-L108"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/posseg\/__init__.py","language":"python","identifier":"cut","parameters":"(sentence, HMM=True)","argument_list":"","return_statement":"","docstring":"Global `cut` function that supports parallel processing.\n\n Note that this only works using dt, custom POSTokenizer\n instances are not supported.","docstring_summary":"Global `cut` function that supports parallel processing.","docstring_tokens":["Global","cut","function","that","supports","parallel","processing","."],"function":"def cut(sentence, HMM=True):\n \"\"\"\n Global `cut` function that supports parallel processing.\n\n Note that this only works using dt, custom POSTokenizer\n instances are not supported.\n \"\"\"\n global dt\n if jieba.pool is None:\n for w in dt.cut(sentence, HMM=HMM):\n yield w\n else:\n parts = strdecode(sentence).splitlines(True)\n if HMM:\n result = jieba.pool.map(_lcut_internal, parts)\n else:\n result = jieba.pool.map(_lcut_internal_no_hmm, parts)\n for r in result:\n for w in r:\n yield w","function_tokens":["def","cut","(","sentence",",","HMM","=","True",")",":","global","dt","if","jieba",".","pool","is","None",":","for","w","in","dt",".","cut","(","sentence",",","HMM","=","HMM",")",":","yield","w","else",":","parts","=","strdecode","(","sentence",")",".","splitlines","(","True",")","if","HMM",":","result","=","jieba",".","pool",".","map","(","_lcut_internal",",","parts",")","else",":","result","=","jieba",".","pool",".","map","(","_lcut_internal_no_hmm",",","parts",")","for","r","in","result",":","for","w","in","r",":","yield","w"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Baselines\/translation_and_interpretation_baseline\/train\/prepare_data\/jieba\/posseg\/__init__.py#L272-L291"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/keypoint_eval\/keypoint_eval.py","language":"python","identifier":"load_annotations","parameters":"(anno_file, return_dict)","argument_list":"","return_statement":"return annotations","docstring":"Convert annotation JSON file.","docstring_summary":"Convert annotation JSON file.","docstring_tokens":["Convert","annotation","JSON","file","."],"function":"def load_annotations(anno_file, return_dict):\n \"\"\"Convert annotation JSON file.\"\"\"\n\n annotations = dict()\n annotations['image_ids'] = set([])\n annotations['annos'] = dict()\n annotations['delta'] = 2*np.array([0.01388152, 0.01515228, 0.01057665, 0.01417709, \\\n 0.01497891, 0.01402144, 0.03909642, 0.03686941, 0.01981803, \\\n 0.03843971, 0.03412318, 0.02415081, 0.01291456, 0.01236173])\n try:\n annos = json.load(open(anno_file, 'r'))\n except Exception:\n return_dict['error'] = 'Annotation file does not exist or is an invalid JSON file.'\n exit(return_dict['error'])\n\n for anno in annos:\n annotations['image_ids'].add(anno['image_id'])\n annotations['annos'][anno['image_id']] = dict()\n annotations['annos'][anno['image_id']]['human_annos'] = anno['human_annotations']\n annotations['annos'][anno['image_id']]['keypoint_annos'] = anno['keypoint_annotations']\n\n return annotations","function_tokens":["def","load_annotations","(","anno_file",",","return_dict",")",":","annotations","=","dict","(",")","annotations","[","'image_ids'","]","=","set","(","[","]",")","annotations","[","'annos'","]","=","dict","(",")","annotations","[","'delta'","]","=","2","*","np",".","array","(","[","0.01388152",",","0.01515228",",","0.01057665",",","0.01417709",",","0.01497891",",","0.01402144",",","0.03909642",",","0.03686941",",","0.01981803",",","0.03843971",",","0.03412318",",","0.02415081",",","0.01291456",",","0.01236173","]",")","try",":","annos","=","json",".","load","(","open","(","anno_file",",","'r'",")",")","except","Exception",":","return_dict","[","'error'","]","=","'Annotation file does not exist or is an invalid JSON file.'","exit","(","return_dict","[","'error'","]",")","for","anno","in","annos",":","annotations","[","'image_ids'","]",".","add","(","anno","[","'image_id'","]",")","annotations","[","'annos'","]","[","anno","[","'image_id'","]","]","=","dict","(",")","annotations","[","'annos'","]","[","anno","[","'image_id'","]","]","[","'human_annos'","]","=","anno","[","'human_annotations'","]","annotations","[","'annos'","]","[","anno","[","'image_id'","]","]","[","'keypoint_annos'","]","=","anno","[","'keypoint_annotations'","]","return","annotations"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/keypoint_eval\/keypoint_eval.py#L44-L65"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/keypoint_eval\/keypoint_eval.py","language":"python","identifier":"load_predictions","parameters":"(prediction_file, return_dict)","argument_list":"","return_statement":"return predictions","docstring":"Convert prediction JSON file.","docstring_summary":"Convert prediction JSON file.","docstring_tokens":["Convert","prediction","JSON","file","."],"function":"def load_predictions(prediction_file, return_dict):\n \"\"\"Convert prediction JSON file.\"\"\"\n\n predictions = dict()\n predictions['image_ids'] = []\n predictions['annos'] = dict()\n id_set = set([])\n\n try:\n preds = json.load(open(prediction_file, 'r'))\n except Exception:\n return_dict['error'] = 'Prediction file does not exist or is an invalid JSON file.'\n exit(return_dict['error'])\n\n for pred in preds:\n if 'image_id' not in pred.keys():\n return_dict['warning'].append('There is an invalid annotation info, \\\n likely missing key \\'image_id\\'.')\n continue\n if 'keypoint_annotations' not in pred.keys():\n return_dict['warning'].append(pred['image_id']+\\\n ' does not have key \\'keypoint_annotations\\'.')\n continue\n image_id = pred['image_id'].split('.')[0]\n if image_id in id_set:\n return_dict['warning'].append(pred['image_id']+\\\n ' is duplicated in prediction JSON file.')\n else:\n id_set.add(image_id)\n predictions['image_ids'].append(image_id)\n predictions['annos'][pred['image_id']] = dict()\n predictions['annos'][pred['image_id']]['keypoint_annos'] = pred['keypoint_annotations']\n\n return predictions","function_tokens":["def","load_predictions","(","prediction_file",",","return_dict",")",":","predictions","=","dict","(",")","predictions","[","'image_ids'","]","=","[","]","predictions","[","'annos'","]","=","dict","(",")","id_set","=","set","(","[","]",")","try",":","preds","=","json",".","load","(","open","(","prediction_file",",","'r'",")",")","except","Exception",":","return_dict","[","'error'","]","=","'Prediction file does not exist or is an invalid JSON file.'","exit","(","return_dict","[","'error'","]",")","for","pred","in","preds",":","if","'image_id'","not","in","pred",".","keys","(",")",":","return_dict","[","'warning'","]",".","append","(","'There is an invalid annotation info, \\\n likely missing key \\'image_id\\'.'",")","continue","if","'keypoint_annotations'","not","in","pred",".","keys","(",")",":","return_dict","[","'warning'","]",".","append","(","pred","[","'image_id'","]","+","' does not have key \\'keypoint_annotations\\'.'",")","continue","image_id","=","pred","[","'image_id'","]",".","split","(","'.'",")","[","0","]","if","image_id","in","id_set",":","return_dict","[","'warning'","]",".","append","(","pred","[","'image_id'","]","+","' is duplicated in prediction JSON file.'",")","else",":","id_set",".","add","(","image_id",")","predictions","[","'image_ids'","]",".","append","(","image_id",")","predictions","[","'annos'","]","[","pred","[","'image_id'","]","]","=","dict","(",")","predictions","[","'annos'","]","[","pred","[","'image_id'","]","]","[","'keypoint_annos'","]","=","pred","[","'keypoint_annotations'","]","return","predictions"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/keypoint_eval\/keypoint_eval.py#L68-L101"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/keypoint_eval\/keypoint_eval.py","language":"python","identifier":"compute_oks","parameters":"(anno, predict, delta)","argument_list":"","return_statement":"return oks","docstring":"Compute oks matrix (size gtN*pN).","docstring_summary":"Compute oks matrix (size gtN*pN).","docstring_tokens":["Compute","oks","matrix","(","size","gtN","*","pN",")","."],"function":"def compute_oks(anno, predict, delta):\n \"\"\"Compute oks matrix (size gtN*pN).\"\"\"\n\n anno_count = len(anno['keypoint_annos'].keys())\n predict_count = len(predict.keys())\n oks = np.zeros((anno_count, predict_count))\n if predict_count == 0:\n return oks.T\n\n # for every human keypoint annotation\n for i in range(anno_count):\n anno_key = anno['keypoint_annos'].keys()[i]\n anno_keypoints = np.reshape(anno['keypoint_annos'][anno_key], (14, 3))\n visible = anno_keypoints[:, 2] == 1\n bbox = anno['human_annos'][anno_key]\n scale = np.float32((bbox[3]-bbox[1])*(bbox[2]-bbox[0]))\n if np.sum(visible) == 0:\n for j in range(predict_count):\n oks[i, j] = 0\n else:\n # for every predicted human\n for j in range(predict_count):\n predict_key = predict.keys()[j]\n predict_keypoints = np.reshape(predict[predict_key], (14, 3))\n dis = np.sum((anno_keypoints[visible, :2] \\\n - predict_keypoints[visible, :2])**2, axis=1)\n oks[i, j] = np.mean(np.exp(-dis\/2\/delta[visible]**2\/(scale+1)))\n return oks","function_tokens":["def","compute_oks","(","anno",",","predict",",","delta",")",":","anno_count","=","len","(","anno","[","'keypoint_annos'","]",".","keys","(",")",")","predict_count","=","len","(","predict",".","keys","(",")",")","oks","=","np",".","zeros","(","(","anno_count",",","predict_count",")",")","if","predict_count","==","0",":","return","oks",".","T","# for every human keypoint annotation","for","i","in","range","(","anno_count",")",":","anno_key","=","anno","[","'keypoint_annos'","]",".","keys","(",")","[","i","]","anno_keypoints","=","np",".","reshape","(","anno","[","'keypoint_annos'","]","[","anno_key","]",",","(","14",",","3",")",")","visible","=","anno_keypoints","[",":",",","2","]","==","1","bbox","=","anno","[","'human_annos'","]","[","anno_key","]","scale","=","np",".","float32","(","(","bbox","[","3","]","-","bbox","[","1","]",")","*","(","bbox","[","2","]","-","bbox","[","0","]",")",")","if","np",".","sum","(","visible",")","==","0",":","for","j","in","range","(","predict_count",")",":","oks","[","i",",","j","]","=","0","else",":","# for every predicted human","for","j","in","range","(","predict_count",")",":","predict_key","=","predict",".","keys","(",")","[","j","]","predict_keypoints","=","np",".","reshape","(","predict","[","predict_key","]",",","(","14",",","3",")",")","dis","=","np",".","sum","(","(","anno_keypoints","[","visible",",",":","2","]","-","predict_keypoints","[","visible",",",":","2","]",")","**","2",",","axis","=","1",")","oks","[","i",",","j","]","=","np",".","mean","(","np",".","exp","(","-","dis","\/","2","\/","delta","[","visible","]","**","2","\/","(","scale","+","1",")",")",")","return","oks"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/keypoint_eval\/keypoint_eval.py#L104-L131"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/keypoint_eval\/keypoint_eval.py","language":"python","identifier":"keypoint_eval","parameters":"(predictions, annotations, return_dict)","argument_list":"","return_statement":"return return_dict","docstring":"Evaluate predicted_file and return mAP.","docstring_summary":"Evaluate predicted_file and return mAP.","docstring_tokens":["Evaluate","predicted_file","and","return","mAP","."],"function":"def keypoint_eval(predictions, annotations, return_dict):\n \"\"\"Evaluate predicted_file and return mAP.\"\"\"\n\n oks_all = np.zeros((0))\n oks_num = 0\n \n # Construct set to speed up id searching.\n prediction_id_set = set(predictions['image_ids'])\n\n # for every annotation in our test\/validation set\n for image_id in annotations['image_ids']:\n # if the image in the predictions, then compute oks\n if image_id in prediction_id_set:\n oks = compute_oks(anno=annotations['annos'][image_id], \\\n predict=predictions['annos'][image_id]['keypoint_annos'], \\\n delta=annotations['delta'])\n # view pairs with max OKSs as match ones, add to oks_all\n oks_all = np.concatenate((oks_all, np.max(oks, axis=1)), axis=0)\n # accumulate total num by max(gtN,pN)\n oks_num += np.max(oks.shape)\n else:\n # otherwise report warning\n return_dict['warning'].append(image_id+' is not in the prediction JSON file.')\n # number of humen in ground truth annotations\n gt_n = len(annotations['annos'][image_id]['human_annos'].keys())\n # fill 0 in oks scores\n oks_all = np.concatenate((oks_all, np.zeros((gt_n))), axis=0)\n # accumulate total num by ground truth number\n oks_num += gt_n\n\n # compute mAP by APs under different oks thresholds\n average_precision = []\n for threshold in np.linspace(0.5, 0.95, 10):\n average_precision.append(np.sum(oks_all > threshold)\/np.float32(oks_num))\n return_dict['score'] = np.mean(average_precision)\n\n return return_dict","function_tokens":["def","keypoint_eval","(","predictions",",","annotations",",","return_dict",")",":","oks_all","=","np",".","zeros","(","(","0",")",")","oks_num","=","0","# Construct set to speed up id searching.","prediction_id_set","=","set","(","predictions","[","'image_ids'","]",")","# for every annotation in our test\/validation set","for","image_id","in","annotations","[","'image_ids'","]",":","# if the image in the predictions, then compute oks","if","image_id","in","prediction_id_set",":","oks","=","compute_oks","(","anno","=","annotations","[","'annos'","]","[","image_id","]",",","predict","=","predictions","[","'annos'","]","[","image_id","]","[","'keypoint_annos'","]",",","delta","=","annotations","[","'delta'","]",")","# view pairs with max OKSs as match ones, add to oks_all","oks_all","=","np",".","concatenate","(","(","oks_all",",","np",".","max","(","oks",",","axis","=","1",")",")",",","axis","=","0",")","# accumulate total num by max(gtN,pN)","oks_num","+=","np",".","max","(","oks",".","shape",")","else",":","# otherwise report warning","return_dict","[","'warning'","]",".","append","(","image_id","+","' is not in the prediction JSON file.'",")","# number of humen in ground truth annotations","gt_n","=","len","(","annotations","[","'annos'","]","[","image_id","]","[","'human_annos'","]",".","keys","(",")",")","# fill 0 in oks scores","oks_all","=","np",".","concatenate","(","(","oks_all",",","np",".","zeros","(","(","gt_n",")",")",")",",","axis","=","0",")","# accumulate total num by ground truth number","oks_num","+=","gt_n","# compute mAP by APs under different oks thresholds","average_precision","=","[","]","for","threshold","in","np",".","linspace","(","0.5",",","0.95",",","10",")",":","average_precision",".","append","(","np",".","sum","(","oks_all",">","threshold",")","\/","np",".","float32","(","oks_num",")",")","return_dict","[","'score'","]","=","np",".","mean","(","average_precision",")","return","return_dict"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/keypoint_eval\/keypoint_eval.py#L134-L170"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/keypoint_eval\/keypoint_eval.py","language":"python","identifier":"main","parameters":"()","argument_list":"","return_statement":"","docstring":"The evaluator.","docstring_summary":"The evaluator.","docstring_tokens":["The","evaluator","."],"function":"def main():\n \"\"\"The evaluator.\"\"\"\n\n # Arguments parser\n parser = argparse.ArgumentParser()\n parser.add_argument('--submit', help='prediction json file', type=str,\n default='keypoint_predictions_example.json')\n parser.add_argument('--ref', help='annotation json file', type=str,\n default='keypoint_annotations_example.json')\n args = parser.parse_args()\n\n # Initialize return_dict\n return_dict = dict()\n return_dict['error'] = None\n return_dict['warning'] = []\n return_dict['score'] = None\n\n # Load annotation JSON file\n start_time = time.time()\n annotations = load_annotations(anno_file=args.ref,\n return_dict=return_dict)\n print 'Complete reading annotation JSON file in %.2f seconds.' %(time.time() - start_time)\n\n # Load prediction JSON file\n start_time = time.time()\n predictions = load_predictions(prediction_file=args.submit,\n return_dict=return_dict)\n print 'Complete reading prediction JSON file in %.2f seconds.' %(time.time() - start_time)\n\n # Keypoint evaluation\n start_time = time.time()\n return_dict = keypoint_eval(predictions=predictions,\n annotations=annotations,\n return_dict=return_dict)\n print 'Complete evaluation in %.2f seconds.' %(time.time() - start_time)\n\n # Print return_dict and final score\n pprint.pprint(return_dict)\n print 'Score: ', '%.8f' % return_dict['score']","function_tokens":["def","main","(",")",":","# Arguments parser","parser","=","argparse",".","ArgumentParser","(",")","parser",".","add_argument","(","'--submit'",",","help","=","'prediction json file'",",","type","=","str",",","default","=","'keypoint_predictions_example.json'",")","parser",".","add_argument","(","'--ref'",",","help","=","'annotation json file'",",","type","=","str",",","default","=","'keypoint_annotations_example.json'",")","args","=","parser",".","parse_args","(",")","# Initialize return_dict","return_dict","=","dict","(",")","return_dict","[","'error'","]","=","None","return_dict","[","'warning'","]","=","[","]","return_dict","[","'score'","]","=","None","# Load annotation JSON file","start_time","=","time",".","time","(",")","annotations","=","load_annotations","(","anno_file","=","args",".","ref",",","return_dict","=","return_dict",")","print","'Complete reading annotation JSON file in %.2f seconds.'","%","(","time",".","time","(",")","-","start_time",")","# Load prediction JSON file","start_time","=","time",".","time","(",")","predictions","=","load_predictions","(","prediction_file","=","args",".","submit",",","return_dict","=","return_dict",")","print","'Complete reading prediction JSON file in %.2f seconds.'","%","(","time",".","time","(",")","-","start_time",")","# Keypoint evaluation","start_time","=","time",".","time","(",")","return_dict","=","keypoint_eval","(","predictions","=","predictions",",","annotations","=","annotations",",","return_dict","=","return_dict",")","print","'Complete evaluation in %.2f seconds.'","%","(","time",".","time","(",")","-","start_time",")","# Print return_dict and final score","pprint",".","pprint","(","return_dict",")","print","'Score: '",",","'%.8f'","%","return_dict","[","'score'","]"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/keypoint_eval\/keypoint_eval.py#L173-L211"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/run_evaluations.py","language":"python","identifier":"compute_m1","parameters":"(json_predictions_file, reference_file)","argument_list":"","return_statement":"return m1_score","docstring":"Compute m1_score","docstring_summary":"Compute m1_score","docstring_tokens":["Compute","m1_score"],"function":"def compute_m1(json_predictions_file, reference_file):\n \"\"\"Compute m1_score\"\"\"\n m1_score = {}\n m1_score['error'] = 0\n try:\n coco = COCO(reference_file)\n coco_res = coco.loadRes(json_predictions_file)\n\n # create coco_eval object.\n coco_eval = COCOEvalCap(coco, coco_res)\n\n # evaluate results\n coco_eval.evaluate()\n except Exception:\n m1_score['error'] = 1\n else:\n # print output evaluation scores\n for metric, score in coco_eval.eval.items():\n print '%s: %.3f'%(metric, score)\n m1_score[metric] = score\n return m1_score","function_tokens":["def","compute_m1","(","json_predictions_file",",","reference_file",")",":","m1_score","=","{","}","m1_score","[","'error'","]","=","0","try",":","coco","=","COCO","(","reference_file",")","coco_res","=","coco",".","loadRes","(","json_predictions_file",")","# create coco_eval object.","coco_eval","=","COCOEvalCap","(","coco",",","coco_res",")","# evaluate results","coco_eval",".","evaluate","(",")","except","Exception",":","m1_score","[","'error'","]","=","1","else",":","# print output evaluation scores","for","metric",",","score","in","coco_eval",".","eval",".","items","(",")",":","print","'%s: %.3f'","%","(","metric",",","score",")","m1_score","[","metric","]","=","score","return","m1_score"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/run_evaluations.py#L29-L49"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/run_evaluations.py","language":"python","identifier":"main","parameters":"()","argument_list":"","return_statement":"","docstring":"The evaluator.","docstring_summary":"The evaluator.","docstring_tokens":["The","evaluator","."],"function":"def main():\n \"\"\"The evaluator.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-submit\", \"--submit\", type=str, required=True,\n help=' JSON containing submit sentences.')\n parser.add_argument(\"-ref\", \"--ref\", type=str,\n help=' JSON references.')\n args = parser.parse_args()\n\n json_predictions_file = args.submit\n reference_file = args.ref\n print compute_m1(json_predictions_file, reference_file)","function_tokens":["def","main","(",")",":","parser","=","argparse",".","ArgumentParser","(",")","parser",".","add_argument","(","\"-submit\"",",","\"--submit\"",",","type","=","str",",","required","=","True",",","help","=","' JSON containing submit sentences.'",")","parser",".","add_argument","(","\"-ref\"",",","\"--ref\"",",","type","=","str",",","help","=","' JSON references.'",")","args","=","parser",".","parse_args","(",")","json_predictions_file","=","args",".","submit","reference_file","=","args",".","ref","print","compute_m1","(","json_predictions_file",",","reference_file",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/run_evaluations.py#L52-L63"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py","language":"python","identifier":"precook","parameters":"(s, n=4, out=False)","argument_list":"","return_statement":"return counts","docstring":"Takes a string as input and returns an object that can be given to\n either cook_refs or cook_test. This is optional: cook_refs and cook_test\n can take string arguments as well.\n :param s: string : sentence to be converted into ngrams\n :param n: int : number of ngrams for which representation is calculated\n :return: term frequency vector for occuring ngrams","docstring_summary":"Takes a string as input and returns an object that can be given to\n either cook_refs or cook_test. This is optional: cook_refs and cook_test\n can take string arguments as well.\n :param s: string : sentence to be converted into ngrams\n :param n: int : number of ngrams for which representation is calculated\n :return: term frequency vector for occuring ngrams","docstring_tokens":["Takes","a","string","as","input","and","returns","an","object","that","can","be","given","to","either","cook_refs","or","cook_test",".","This","is","optional",":","cook_refs","and","cook_test","can","take","string","arguments","as","well",".",":","param","s",":","string",":","sentence","to","be","converted","into","ngrams",":","param","n",":","int",":","number","of","ngrams","for","which","representation","is","calculated",":","return",":","term","frequency","vector","for","occuring","ngrams"],"function":"def precook(s, n=4, out=False):\n \"\"\"\n Takes a string as input and returns an object that can be given to\n either cook_refs or cook_test. This is optional: cook_refs and cook_test\n can take string arguments as well.\n :param s: string : sentence to be converted into ngrams\n :param n: int : number of ngrams for which representation is calculated\n :return: term frequency vector for occuring ngrams\n \"\"\"\n words = s.split()\n counts = defaultdict(int)\n for k in xrange(1,n+1):\n for i in xrange(len(words)-k+1):\n ngram = tuple(words[i:i+k])\n counts[ngram] += 1\n return counts","function_tokens":["def","precook","(","s",",","n","=","4",",","out","=","False",")",":","words","=","s",".","split","(",")","counts","=","defaultdict","(","int",")","for","k","in","xrange","(","1",",","n","+","1",")",":","for","i","in","xrange","(","len","(","words",")","-","k","+","1",")",":","ngram","=","tuple","(","words","[","i",":","i","+","k","]",")","counts","[","ngram","]","+=","1","return","counts"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py#L11-L26"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py","language":"python","identifier":"cook_refs","parameters":"(refs, n=4)","argument_list":"","return_statement":"return [precook(ref, n) for ref in refs]","docstring":"Takes a list of reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them.\n :param refs: list of string : reference sentences for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (list of dict)","docstring_summary":"Takes a list of reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them.\n :param refs: list of string : reference sentences for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (list of dict)","docstring_tokens":["Takes","a","list","of","reference","sentences","for","a","single","segment","and","returns","an","object","that","encapsulates","everything","that","BLEU","needs","to","know","about","them",".",":","param","refs",":","list","of","string",":","reference","sentences","for","some","image",":","param","n",":","int",":","number","of","ngrams","for","which","(","ngram",")","representation","is","calculated",":","return",":","result","(","list","of","dict",")"],"function":"def cook_refs(refs, n=4): ## lhuang: oracle will call with \"average\"\n '''Takes a list of reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them.\n :param refs: list of string : reference sentences for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (list of dict)\n '''\n return [precook(ref, n) for ref in refs]","function_tokens":["def","cook_refs","(","refs",",","n","=","4",")",":","## lhuang: oracle will call with \"average\"","return","[","precook","(","ref",",","n",")","for","ref","in","refs","]"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py#L28-L36"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py","language":"python","identifier":"cook_test","parameters":"(test, n=4)","argument_list":"","return_statement":"return precook(test, n, True)","docstring":"Takes a test sentence and returns an object that\n encapsulates everything that BLEU needs to know about it.\n :param test: list of string : hypothesis sentence for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (dict)","docstring_summary":"Takes a test sentence and returns an object that\n encapsulates everything that BLEU needs to know about it.\n :param test: list of string : hypothesis sentence for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (dict)","docstring_tokens":["Takes","a","test","sentence","and","returns","an","object","that","encapsulates","everything","that","BLEU","needs","to","know","about","it",".",":","param","test",":","list","of","string",":","hypothesis","sentence","for","some","image",":","param","n",":","int",":","number","of","ngrams","for","which","(","ngram",")","representation","is","calculated",":","return",":","result","(","dict",")"],"function":"def cook_test(test, n=4):\n '''Takes a test sentence and returns an object that\n encapsulates everything that BLEU needs to know about it.\n :param test: list of string : hypothesis sentence for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (dict)\n '''\n return precook(test, n, True)","function_tokens":["def","cook_test","(","test",",","n","=","4",")",":","return","precook","(","test",",","n",",","True",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py#L38-L45"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py","language":"python","identifier":"CiderScorer.copy","parameters":"(self)","argument_list":"","return_statement":"return new","docstring":"copy the refs.","docstring_summary":"copy the refs.","docstring_tokens":["copy","the","refs","."],"function":"def copy(self):\n ''' copy the refs.'''\n new = CiderScorer(n=self.n)\n new.ctest = copy.copy(self.ctest)\n new.crefs = copy.copy(self.crefs)\n return new","function_tokens":["def","copy","(","self",")",":","new","=","CiderScorer","(","n","=","self",".","n",")","new",".","ctest","=","copy",".","copy","(","self",".","ctest",")","new",".","crefs","=","copy",".","copy","(","self",".","crefs",")","return","new"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py#L51-L56"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py","language":"python","identifier":"CiderScorer.__init__","parameters":"(self, test=None, refs=None, n=4, sigma=6.0)","argument_list":"","return_statement":"","docstring":"singular instance","docstring_summary":"singular instance","docstring_tokens":["singular","instance"],"function":"def __init__(self, test=None, refs=None, n=4, sigma=6.0):\n ''' singular instance '''\n self.n = n\n self.sigma = sigma\n self.crefs = []\n self.ctest = []\n self.document_frequency = defaultdict(float)\n self.cook_append(test, refs)\n self.ref_len = None","function_tokens":["def","__init__","(","self",",","test","=","None",",","refs","=","None",",","n","=","4",",","sigma","=","6.0",")",":","self",".","n","=","n","self",".","sigma","=","sigma","self",".","crefs","=","[","]","self",".","ctest","=","[","]","self",".","document_frequency","=","defaultdict","(","float",")","self",".","cook_append","(","test",",","refs",")","self",".","ref_len","=","None"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py#L58-L66"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py","language":"python","identifier":"CiderScorer.cook_append","parameters":"(self, test, refs)","argument_list":"","return_statement":"","docstring":"called by constructor and __iadd__ to avoid creating new instances.","docstring_summary":"called by constructor and __iadd__ to avoid creating new instances.","docstring_tokens":["called","by","constructor","and","__iadd__","to","avoid","creating","new","instances","."],"function":"def cook_append(self, test, refs):\n '''called by constructor and __iadd__ to avoid creating new instances.'''\n\n if refs is not None:\n self.crefs.append(cook_refs(refs))\n if test is not None:\n self.ctest.append(cook_test(test)) ## N.B.: -1\n else:\n self.ctest.append(None)","function_tokens":["def","cook_append","(","self",",","test",",","refs",")",":","if","refs","is","not","None",":","self",".","crefs",".","append","(","cook_refs","(","refs",")",")","if","test","is","not","None",":","self",".","ctest",".","append","(","cook_test","(","test",")",")","## N.B.: -1","else",":","self",".","ctest",".","append","(","None",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py#L68-L76"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py","language":"python","identifier":"CiderScorer.__iadd__","parameters":"(self, other)","argument_list":"","return_statement":"return self","docstring":"add an instance (e.g., from another sentence).","docstring_summary":"add an instance (e.g., from another sentence).","docstring_tokens":["add","an","instance","(","e",".","g",".","from","another","sentence",")","."],"function":"def __iadd__(self, other):\n '''add an instance (e.g., from another sentence).'''\n\n if type(other) is tuple:\n ## avoid creating new CiderScorer instances\n self.cook_append(other[0], other[1])\n else:\n self.ctest.extend(other.ctest)\n self.crefs.extend(other.crefs)\n\n return self","function_tokens":["def","__iadd__","(","self",",","other",")",":","if","type","(","other",")","is","tuple",":","## avoid creating new CiderScorer instances","self",".","cook_append","(","other","[","0","]",",","other","[","1","]",")","else",":","self",".","ctest",".","extend","(","other",".","ctest",")","self",".","crefs",".","extend","(","other",".","crefs",")","return","self"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py#L82-L92"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py","language":"python","identifier":"CiderScorer.compute_doc_freq","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Compute term frequency for reference data.\n This will be used to compute idf (inverse document frequency later)\n The term frequency is stored in the object\n :return: None","docstring_summary":"Compute term frequency for reference data.\n This will be used to compute idf (inverse document frequency later)\n The term frequency is stored in the object\n :return: None","docstring_tokens":["Compute","term","frequency","for","reference","data",".","This","will","be","used","to","compute","idf","(","inverse","document","frequency","later",")","The","term","frequency","is","stored","in","the","object",":","return",":","None"],"function":"def compute_doc_freq(self):\n '''\n Compute term frequency for reference data.\n This will be used to compute idf (inverse document frequency later)\n The term frequency is stored in the object\n :return: None\n '''\n for refs in self.crefs:\n # refs, k ref captions of one image\n for ngram in set([ngram for ref in refs for (ngram,count) in ref.iteritems()]):\n self.document_frequency[ngram] += 1","function_tokens":["def","compute_doc_freq","(","self",")",":","for","refs","in","self",".","crefs",":","# refs, k ref captions of one image","for","ngram","in","set","(","[","ngram","for","ref","in","refs","for","(","ngram",",","count",")","in","ref",".","iteritems","(",")","]",")",":","self",".","document_frequency","[","ngram","]","+=","1"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider_scorer.py#L93-L103"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider.py","language":"python","identifier":"Cider.compute_score","parameters":"(self, gts, res)","argument_list":"","return_statement":"return score, scores","docstring":"Main function to compute CIDEr score\n :param hypo_for_image (dict) : dictionary with key and value \n ref_for_image (dict) : dictionary with key and value \n :return: cider (float) : computed CIDEr score for the corpus","docstring_summary":"Main function to compute CIDEr score\n :param hypo_for_image (dict) : dictionary with key and value \n ref_for_image (dict) : dictionary with key and value \n :return: cider (float) : computed CIDEr score for the corpus","docstring_tokens":["Main","function","to","compute","CIDEr","score",":","param","hypo_for_image","(","dict",")",":","dictionary","with","key","","and","value","","ref_for_image","(","dict",")",":","dictionary","with","key","","and","value","",":","return",":","cider","(","float",")",":","computed","CIDEr","score","for","the","corpus"],"function":"def compute_score(self, gts, res):\n \"\"\"\n Main function to compute CIDEr score\n :param hypo_for_image (dict) : dictionary with key and value \n ref_for_image (dict) : dictionary with key and value \n :return: cider (float) : computed CIDEr score for the corpus \n \"\"\"\n\n assert(gts.keys() == res.keys())\n imgIds = gts.keys()\n\n cider_scorer = CiderScorer(n=self._n, sigma=self._sigma)\n\n for id in imgIds:\n hypo = res[id]\n ref = gts[id]\n\n # Sanity check.\n assert(type(hypo) is list)\n assert(len(hypo) == 1)\n assert(type(ref) is list)\n assert(len(ref) > 0)\n\n cider_scorer += (hypo[0], ref)\n\n (score, scores) = cider_scorer.compute_score()\n\n return score, scores","function_tokens":["def","compute_score","(","self",",","gts",",","res",")",":","assert","(","gts",".","keys","(",")","==","res",".","keys","(",")",")","imgIds","=","gts",".","keys","(",")","cider_scorer","=","CiderScorer","(","n","=","self",".","_n",",","sigma","=","self",".","_sigma",")","for","id","in","imgIds",":","hypo","=","res","[","id","]","ref","=","gts","[","id","]","# Sanity check.","assert","(","type","(","hypo",")","is","list",")","assert","(","len","(","hypo",")","==","1",")","assert","(","type","(","ref",")","is","list",")","assert","(","len","(","ref",")",">","0",")","cider_scorer","+=","(","hypo","[","0","]",",","ref",")","(","score",",","scores",")","=","cider_scorer",".","compute_score","(",")","return","score",",","scores"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/cider\/cider.py#L24-L51"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py","language":"python","identifier":"precook","parameters":"(s, n=4, out=False)","argument_list":"","return_statement":"return (len(words), counts)","docstring":"Takes a string as input and returns an object that can be given to\n either cook_refs or cook_test. This is optional: cook_refs and cook_test\n can take string arguments as well.","docstring_summary":"Takes a string as input and returns an object that can be given to\n either cook_refs or cook_test. This is optional: cook_refs and cook_test\n can take string arguments as well.","docstring_tokens":["Takes","a","string","as","input","and","returns","an","object","that","can","be","given","to","either","cook_refs","or","cook_test",".","This","is","optional",":","cook_refs","and","cook_test","can","take","string","arguments","as","well","."],"function":"def precook(s, n=4, out=False):\n \"\"\"Takes a string as input and returns an object that can be given to\n either cook_refs or cook_test. This is optional: cook_refs and cook_test\n can take string arguments as well.\"\"\"\n words = s.split()\n counts = defaultdict(int)\n for k in xrange(1,n+1):\n for i in xrange(len(words)-k+1):\n ngram = tuple(words[i:i+k])\n counts[ngram] += 1\n return (len(words), counts)","function_tokens":["def","precook","(","s",",","n","=","4",",","out","=","False",")",":","words","=","s",".","split","(",")","counts","=","defaultdict","(","int",")","for","k","in","xrange","(","1",",","n","+","1",")",":","for","i","in","xrange","(","len","(","words",")","-","k","+","1",")",":","ngram","=","tuple","(","words","[","i",":","i","+","k","]",")","counts","[","ngram","]","+=","1","return","(","len","(","words",")",",","counts",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py#L23-L33"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py","language":"python","identifier":"cook_refs","parameters":"(refs, eff=None, n=4)","argument_list":"","return_statement":"return (reflen, maxcounts)","docstring":"Takes a list of reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them.","docstring_summary":"Takes a list of reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them.","docstring_tokens":["Takes","a","list","of","reference","sentences","for","a","single","segment","and","returns","an","object","that","encapsulates","everything","that","BLEU","needs","to","know","about","them","."],"function":"def cook_refs(refs, eff=None, n=4): ## lhuang: oracle will call with \"average\"\n '''Takes a list of reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them.'''\n\n reflen = []\n maxcounts = {}\n for ref in refs:\n rl, counts = precook(ref, n)\n reflen.append(rl)\n for (ngram,count) in counts.iteritems():\n maxcounts[ngram] = max(maxcounts.get(ngram,0), count)\n\n # Calculate effective reference sentence length.\n if eff == \"shortest\":\n reflen = min(reflen)\n elif eff == \"average\":\n reflen = float(sum(reflen))\/len(reflen)\n\n ## lhuang: N.B.: leave reflen computaiton to the very end!!\n \n ## lhuang: N.B.: in case of \"closest\", keep a list of reflens!! (bad design)\n\n return (reflen, maxcounts)","function_tokens":["def","cook_refs","(","refs",",","eff","=","None",",","n","=","4",")",":","## lhuang: oracle will call with \"average\"","reflen","=","[","]","maxcounts","=","{","}","for","ref","in","refs",":","rl",",","counts","=","precook","(","ref",",","n",")","reflen",".","append","(","rl",")","for","(","ngram",",","count",")","in","counts",".","iteritems","(",")",":","maxcounts","[","ngram","]","=","max","(","maxcounts",".","get","(","ngram",",","0",")",",","count",")","# Calculate effective reference sentence length.","if","eff","==","\"shortest\"",":","reflen","=","min","(","reflen",")","elif","eff","==","\"average\"",":","reflen","=","float","(","sum","(","reflen",")",")","\/","len","(","reflen",")","## lhuang: N.B.: leave reflen computaiton to the very end!!","## lhuang: N.B.: in case of \"closest\", keep a list of reflens!! (bad design)","return","(","reflen",",","maxcounts",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py#L35-L58"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py","language":"python","identifier":"cook_test","parameters":"(test, (reflen, refmaxcounts), eff=None, n=4)","argument_list":"","return_statement":"return result","docstring":"Takes a test sentence and returns an object that\n encapsulates everything that BLEU needs to know about it.","docstring_summary":"Takes a test sentence and returns an object that\n encapsulates everything that BLEU needs to know about it.","docstring_tokens":["Takes","a","test","sentence","and","returns","an","object","that","encapsulates","everything","that","BLEU","needs","to","know","about","it","."],"function":"def cook_test(test, (reflen, refmaxcounts), eff=None, n=4):\n '''Takes a test sentence and returns an object that\n encapsulates everything that BLEU needs to know about it.'''\n\n testlen, counts = precook(test, n, True)\n\n result = {}\n\n # Calculate effective reference sentence length.\n \n if eff == \"closest\":\n result[\"reflen\"] = min((abs(l-testlen), l) for l in reflen)[1]\n else: ## i.e., \"average\" or \"shortest\" or None\n result[\"reflen\"] = reflen\n\n result[\"testlen\"] = testlen\n\n result[\"guess\"] = [max(0,testlen-k+1) for k in xrange(1,n+1)]\n\n result['correct'] = [0]*n\n for (ngram, count) in counts.iteritems():\n result[\"correct\"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)\n\n return result","function_tokens":["def","cook_test","(","test",",","(","reflen",",","refmaxcounts",")",",","eff","=","None",",","n","=","4",")",":","testlen",",","counts","=","precook","(","test",",","n",",","True",")","result","=","{","}","# Calculate effective reference sentence length.","if","eff","==","\"closest\"",":","result","[","\"reflen\"","]","=","min","(","(","abs","(","l","-","testlen",")",",","l",")","for","l","in","reflen",")","[","1","]","else",":","## i.e., \"average\" or \"shortest\" or None","result","[","\"reflen\"","]","=","reflen","result","[","\"testlen\"","]","=","testlen","result","[","\"guess\"","]","=","[","max","(","0",",","testlen","-","k","+","1",")","for","k","in","xrange","(","1",",","n","+","1",")","]","result","[","'correct'","]","=","[","0","]","*","n","for","(","ngram",",","count",")","in","counts",".","iteritems","(",")",":","result","[","\"correct\"","]","[","len","(","ngram",")","-","1","]","+=","min","(","refmaxcounts",".","get","(","ngram",",","0",")",",","count",")","return","result"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py#L60-L83"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py","language":"python","identifier":"BleuScorer.copy","parameters":"(self)","argument_list":"","return_statement":"return new","docstring":"copy the refs.","docstring_summary":"copy the refs.","docstring_tokens":["copy","the","refs","."],"function":"def copy(self):\n ''' copy the refs.'''\n new = BleuScorer(n=self.n)\n new.ctest = copy.copy(self.ctest)\n new.crefs = copy.copy(self.crefs)\n new._score = None\n return new","function_tokens":["def","copy","(","self",")",":","new","=","BleuScorer","(","n","=","self",".","n",")","new",".","ctest","=","copy",".","copy","(","self",".","ctest",")","new",".","crefs","=","copy",".","copy","(","self",".","crefs",")","new",".","_score","=","None","return","new"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py#L92-L98"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py","language":"python","identifier":"BleuScorer.__init__","parameters":"(self, test=None, refs=None, n=4, special_reflen=None)","argument_list":"","return_statement":"","docstring":"singular instance","docstring_summary":"singular instance","docstring_tokens":["singular","instance"],"function":"def __init__(self, test=None, refs=None, n=4, special_reflen=None):\n ''' singular instance '''\n\n self.n = n\n self.crefs = []\n self.ctest = []\n self.cook_append(test, refs)\n self.special_reflen = special_reflen","function_tokens":["def","__init__","(","self",",","test","=","None",",","refs","=","None",",","n","=","4",",","special_reflen","=","None",")",":","self",".","n","=","n","self",".","crefs","=","[","]","self",".","ctest","=","[","]","self",".","cook_append","(","test",",","refs",")","self",".","special_reflen","=","special_reflen"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py#L100-L107"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py","language":"python","identifier":"BleuScorer.cook_append","parameters":"(self, test, refs)","argument_list":"","return_statement":"","docstring":"called by constructor and __iadd__ to avoid creating new instances.","docstring_summary":"called by constructor and __iadd__ to avoid creating new instances.","docstring_tokens":["called","by","constructor","and","__iadd__","to","avoid","creating","new","instances","."],"function":"def cook_append(self, test, refs):\n '''called by constructor and __iadd__ to avoid creating new instances.'''\n \n if refs is not None:\n self.crefs.append(cook_refs(refs))\n if test is not None:\n cooked_test = cook_test(test, self.crefs[-1])\n self.ctest.append(cooked_test) ## N.B.: -1\n else:\n self.ctest.append(None) # lens of crefs and ctest have to match\n\n self._score = None","function_tokens":["def","cook_append","(","self",",","test",",","refs",")",":","if","refs","is","not","None",":","self",".","crefs",".","append","(","cook_refs","(","refs",")",")","if","test","is","not","None",":","cooked_test","=","cook_test","(","test",",","self",".","crefs","[","-","1","]",")","self",".","ctest",".","append","(","cooked_test",")","## N.B.: -1","else",":","self",".","ctest",".","append","(","None",")","# lens of crefs and ctest have to match","self",".","_score","=","None"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py#L109-L120"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py","language":"python","identifier":"BleuScorer.score_ratio","parameters":"(self, option=None)","argument_list":"","return_statement":"return (self.fscore(option=option), self.ratio(option=option))","docstring":"return (bleu, len_ratio) pair","docstring_summary":"return (bleu, len_ratio) pair","docstring_tokens":["return","(","bleu","len_ratio",")","pair"],"function":"def score_ratio(self, option=None):\n '''return (bleu, len_ratio) pair'''\n return (self.fscore(option=option), self.ratio(option=option))","function_tokens":["def","score_ratio","(","self",",","option","=","None",")",":","return","(","self",".","fscore","(","option","=","option",")",",","self",".","ratio","(","option","=","option",")",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py#L126-L128"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py","language":"python","identifier":"BleuScorer.rescore","parameters":"(self, new_test)","argument_list":"","return_statement":"return self.retest(new_test).compute_score()","docstring":"replace test(s) with new test(s), and returns the new score.","docstring_summary":"replace test(s) with new test(s), and returns the new score.","docstring_tokens":["replace","test","(","s",")","with","new","test","(","s",")","and","returns","the","new","score","."],"function":"def rescore(self, new_test):\n ''' replace test(s) with new test(s), and returns the new score.'''\n \n return self.retest(new_test).compute_score()","function_tokens":["def","rescore","(","self",",","new_test",")",":","return","self",".","retest","(","new_test",")",".","compute_score","(",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py#L152-L155"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py","language":"python","identifier":"BleuScorer.__iadd__","parameters":"(self, other)","argument_list":"","return_statement":"return self","docstring":"add an instance (e.g., from another sentence).","docstring_summary":"add an instance (e.g., from another sentence).","docstring_tokens":["add","an","instance","(","e",".","g",".","from","another","sentence",")","."],"function":"def __iadd__(self, other):\n '''add an instance (e.g., from another sentence).'''\n\n if type(other) is tuple:\n ## avoid creating new BleuScorer instances\n self.cook_append(other[0], other[1])\n else:\n assert self.compatible(other), \"incompatible BLEUs.\"\n self.ctest.extend(other.ctest)\n self.crefs.extend(other.crefs)\n self._score = None ## need to recompute\n\n return self","function_tokens":["def","__iadd__","(","self",",","other",")",":","if","type","(","other",")","is","tuple",":","## avoid creating new BleuScorer instances","self",".","cook_append","(","other","[","0","]",",","other","[","1","]",")","else",":","assert","self",".","compatible","(","other",")",",","\"incompatible BLEUs.\"","self",".","ctest",".","extend","(","other",".","ctest",")","self",".","crefs",".","extend","(","other",".","crefs",")","self",".","_score","=","None","## need to recompute","return","self"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/bleu\/bleu_scorer.py#L161-L173"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/rouge\/rouge.py","language":"python","identifier":"my_lcs","parameters":"(string, sub)","argument_list":"","return_statement":"return lengths[len(string)][len(sub)]","docstring":"Calculates longest common subsequence for a pair of tokenized strings\n :param string : list of str : tokens from a string split using whitespace\n :param sub : list of str : shorter string, also split using whitespace\n :returns: length (list of int): length of the longest common subsequence between the two strings\n\n Note: my_lcs only gives length of the longest common subsequence, not the actual LCS","docstring_summary":"Calculates longest common subsequence for a pair of tokenized strings\n :param string : list of str : tokens from a string split using whitespace\n :param sub : list of str : shorter string, also split using whitespace\n :returns: length (list of int): length of the longest common subsequence between the two strings","docstring_tokens":["Calculates","longest","common","subsequence","for","a","pair","of","tokenized","strings",":","param","string",":","list","of","str",":","tokens","from","a","string","split","using","whitespace",":","param","sub",":","list","of","str",":","shorter","string","also","split","using","whitespace",":","returns",":","length","(","list","of","int",")",":","length","of","the","longest","common","subsequence","between","the","two","strings"],"function":"def my_lcs(string, sub):\n \"\"\"\n Calculates longest common subsequence for a pair of tokenized strings\n :param string : list of str : tokens from a string split using whitespace\n :param sub : list of str : shorter string, also split using whitespace\n :returns: length (list of int): length of the longest common subsequence between the two strings\n\n Note: my_lcs only gives length of the longest common subsequence, not the actual LCS\n \"\"\"\n if(len(string)< len(sub)):\n sub, string = string, sub\n\n lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]\n\n for j in range(1,len(sub)+1):\n for i in range(1,len(string)+1):\n if(string[i-1] == sub[j-1]):\n lengths[i][j] = lengths[i-1][j-1] + 1\n else:\n lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])\n\n return lengths[len(string)][len(sub)]","function_tokens":["def","my_lcs","(","string",",","sub",")",":","if","(","len","(","string",")","<","len","(","sub",")",")",":","sub",",","string","=","string",",","sub","lengths","=","[","[","0","for","i","in","range","(","0",",","len","(","sub",")","+","1",")","]","for","j","in","range","(","0",",","len","(","string",")","+","1",")","]","for","j","in","range","(","1",",","len","(","sub",")","+","1",")",":","for","i","in","range","(","1",",","len","(","string",")","+","1",")",":","if","(","string","[","i","-","1","]","==","sub","[","j","-","1","]",")",":","lengths","[","i","]","[","j","]","=","lengths","[","i","-","1","]","[","j","-","1","]","+","1","else",":","lengths","[","i","]","[","j","]","=","max","(","lengths","[","i","-","1","]","[","j","]",",","lengths","[","i","]","[","j","-","1","]",")","return","lengths","[","len","(","string",")","]","[","len","(","sub",")","]"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/rouge\/rouge.py#L13-L34"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/rouge\/rouge.py","language":"python","identifier":"Rouge.calc_score","parameters":"(self, candidate, refs)","argument_list":"","return_statement":"return score","docstring":"Compute ROUGE-L score given one candidate and references for an image\n :param candidate: str : candidate sentence to be evaluated\n :param refs: list of str : COCO reference sentences for the particular image to be evaluated\n :returns score: int (ROUGE-L score for the candidate evaluated against references)","docstring_summary":"Compute ROUGE-L score given one candidate and references for an image\n :param candidate: str : candidate sentence to be evaluated\n :param refs: list of str : COCO reference sentences for the particular image to be evaluated\n :returns score: int (ROUGE-L score for the candidate evaluated against references)","docstring_tokens":["Compute","ROUGE","-","L","score","given","one","candidate","and","references","for","an","image",":","param","candidate",":","str",":","candidate","sentence","to","be","evaluated",":","param","refs",":","list","of","str",":","COCO","reference","sentences","for","the","particular","image","to","be","evaluated",":","returns","score",":","int","(","ROUGE","-","L","score","for","the","candidate","evaluated","against","references",")"],"function":"def calc_score(self, candidate, refs):\n \"\"\"\n Compute ROUGE-L score given one candidate and references for an image\n :param candidate: str : candidate sentence to be evaluated\n :param refs: list of str : COCO reference sentences for the particular image to be evaluated\n :returns score: int (ROUGE-L score for the candidate evaluated against references)\n \"\"\"\n assert(len(candidate)==1)\t\n assert(len(refs)>0) \n prec = []\n rec = []\n\n # split into tokens\n token_c = candidate[0].split(\" \")\n \t\n for reference in refs:\n # split into tokens\n token_r = reference.split(\" \")\n # compute the longest common subsequence\n lcs = my_lcs(token_r, token_c)\n prec.append(lcs\/float(len(token_c)))\n rec.append(lcs\/float(len(token_r)))\n\n prec_max = max(prec)\n rec_max = max(rec)\n\n if(prec_max!=0 and rec_max !=0):\n score = ((1 + self.beta**2)*prec_max*rec_max)\/float(rec_max + self.beta**2*prec_max)\n else:\n score = 0.0\n return score","function_tokens":["def","calc_score","(","self",",","candidate",",","refs",")",":","assert","(","len","(","candidate",")","==","1",")","assert","(","len","(","refs",")",">","0",")","prec","=","[","]","rec","=","[","]","# split into tokens","token_c","=","candidate","[","0","]",".","split","(","\" \"",")","for","reference","in","refs",":","# split into tokens","token_r","=","reference",".","split","(","\" \"",")","# compute the longest common subsequence","lcs","=","my_lcs","(","token_r",",","token_c",")","prec",".","append","(","lcs","\/","float","(","len","(","token_c",")",")",")","rec",".","append","(","lcs","\/","float","(","len","(","token_r",")",")",")","prec_max","=","max","(","prec",")","rec_max","=","max","(","rec",")","if","(","prec_max","!=","0","and","rec_max","!=","0",")",":","score","=","(","(","1","+","self",".","beta","**","2",")","*","prec_max","*","rec_max",")","\/","float","(","rec_max","+","self",".","beta","**","2","*","prec_max",")","else",":","score","=","0.0","return","score"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/rouge\/rouge.py#L45-L75"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/rouge\/rouge.py","language":"python","identifier":"Rouge.compute_score","parameters":"(self, gts, res)","argument_list":"","return_statement":"return average_score, np.array(score)","docstring":"Computes Rouge-L score given a set of reference and candidate sentences for the dataset\n Invoked by evaluate_captions.py \n :param hypo_for_image: dict : candidate \/ test sentences with \"image name\" key and \"tokenized sentences\" as values \n :param ref_for_image: dict : reference MS-COCO sentences with \"image name\" key and \"tokenized sentences\" as values\n :returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)","docstring_summary":"Computes Rouge-L score given a set of reference and candidate sentences for the dataset\n Invoked by evaluate_captions.py \n :param hypo_for_image: dict : candidate \/ test sentences with \"image name\" key and \"tokenized sentences\" as values \n :param ref_for_image: dict : reference MS-COCO sentences with \"image name\" key and \"tokenized sentences\" as values\n :returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)","docstring_tokens":["Computes","Rouge","-","L","score","given","a","set","of","reference","and","candidate","sentences","for","the","dataset","Invoked","by","evaluate_captions",".","py",":","param","hypo_for_image",":","dict",":","candidate","\/","test","sentences","with","image","name","key","and","tokenized","sentences","as","values",":","param","ref_for_image",":","dict",":","reference","MS","-","COCO","sentences","with","image","name","key","and","tokenized","sentences","as","values",":","returns",":","average_score",":","float","(","mean","ROUGE","-","L","score","computed","by","averaging","scores","for","all","the","images",")"],"function":"def compute_score(self, gts, res):\n \"\"\"\n Computes Rouge-L score given a set of reference and candidate sentences for the dataset\n Invoked by evaluate_captions.py \n :param hypo_for_image: dict : candidate \/ test sentences with \"image name\" key and \"tokenized sentences\" as values \n :param ref_for_image: dict : reference MS-COCO sentences with \"image name\" key and \"tokenized sentences\" as values\n :returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)\n \"\"\"\n assert(gts.keys() == res.keys())\n imgIds = gts.keys()\n\n score = []\n for id in imgIds:\n hypo = res[id]\n ref = gts[id]\n\n score.append(self.calc_score(hypo, ref))\n\n # Sanity check.\n assert(type(hypo) is list)\n assert(len(hypo) == 1)\n assert(type(ref) is list)\n assert(len(ref) > 0)\n\n average_score = np.mean(np.array(score))\n return average_score, np.array(score)","function_tokens":["def","compute_score","(","self",",","gts",",","res",")",":","assert","(","gts",".","keys","(",")","==","res",".","keys","(",")",")","imgIds","=","gts",".","keys","(",")","score","=","[","]","for","id","in","imgIds",":","hypo","=","res","[","id","]","ref","=","gts","[","id","]","score",".","append","(","self",".","calc_score","(","hypo",",","ref",")",")","# Sanity check.","assert","(","type","(","hypo",")","is","list",")","assert","(","len","(","hypo",")","==","1",")","assert","(","type","(","ref",")","is","list",")","assert","(","len","(","ref",")",">","0",")","average_score","=","np",".","mean","(","np",".","array","(","score",")",")","return","average_score",",","np",".","array","(","score",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxevalcap\/rouge\/rouge.py#L77-L102"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py","language":"python","identifier":"COCO.__init__","parameters":"(self, annotation_file=None)","argument_list":"","return_statement":"","docstring":"Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n :param annotation_file (str): location of annotation file\n :param image_folder (str): location to the folder that hosts images.\n :return:","docstring_summary":"Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n :param annotation_file (str): location of annotation file\n :param image_folder (str): location to the folder that hosts images.\n :return:","docstring_tokens":["Constructor","of","Microsoft","COCO","helper","class","for","reading","and","visualizing","annotations",".",":","param","annotation_file","(","str",")",":","location","of","annotation","file",":","param","image_folder","(","str",")",":","location","to","the","folder","that","hosts","images",".",":","return",":"],"function":"def __init__(self, annotation_file=None):\n \"\"\"\n Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n :param annotation_file (str): location of annotation file\n :param image_folder (str): location to the folder that hosts images.\n :return:\n \"\"\"\n # load dataset\n self.dataset = {}\n self.anns = []\n self.imgToAnns = {}\n self.catToImgs = {}\n self.imgs = []\n self.cats = []\n self.image2hash = {}\n if not annotation_file == None:\n print('loading annotations into memory...')\n time_t = datetime.datetime.utcnow()\n dataset = json.load(open(annotation_file, 'r'))\n\n print( datetime.datetime.utcnow() - time_t)\n self.dataset = dataset\n self.createIndex()","function_tokens":["def","__init__","(","self",",","annotation_file","=","None",")",":","# load dataset","self",".","dataset","=","{","}","self",".","anns","=","[","]","self",".","imgToAnns","=","{","}","self",".","catToImgs","=","{","}","self",".","imgs","=","[","]","self",".","cats","=","[","]","self",".","image2hash","=","{","}","if","not","annotation_file","==","None",":","print","(","'loading annotations into memory...'",")","time_t","=","datetime",".","datetime",".","utcnow","(",")","dataset","=","json",".","load","(","open","(","annotation_file",",","'r'",")",")","print","(","datetime",".","datetime",".","utcnow","(",")","-","time_t",")","self",".","dataset","=","dataset","self",".","createIndex","(",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py#L65-L87"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py","language":"python","identifier":"COCO.info","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Print information about the annotation file.\n :return:","docstring_summary":"Print information about the annotation file.\n :return:","docstring_tokens":["Print","information","about","the","annotation","file",".",":","return",":"],"function":"def info(self):\n \"\"\"\n Print information about the annotation file.\n :return:\n \"\"\"\n for key, value in self.datset['info'].items():\n print( '%s: %s'%(key, value))","function_tokens":["def","info","(","self",")",":","for","key",",","value","in","self",".","datset","[","'info'","]",".","items","(",")",":","print","(","'%s: %s'","%","(","key",",","value",")",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py#L129-L135"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py","language":"python","identifier":"COCO.getAnnIds","parameters":"(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None)","argument_list":"","return_statement":"return ids","docstring":"Get ann ids that satisfy given filter conditions. default skips that filter\n :param imgIds (int array) : get anns for given imgs\n catIds (int array) : get anns for given cats\n areaRng (float array) : get anns for given area range (e.g. [0 inf])\n iscrowd (boolean) : get anns for given crowd label (False or True)\n :return: ids (int array) : integer array of ann ids","docstring_summary":"Get ann ids that satisfy given filter conditions. default skips that filter\n :param imgIds (int array) : get anns for given imgs\n catIds (int array) : get anns for given cats\n areaRng (float array) : get anns for given area range (e.g. [0 inf])\n iscrowd (boolean) : get anns for given crowd label (False or True)\n :return: ids (int array) : integer array of ann ids","docstring_tokens":["Get","ann","ids","that","satisfy","given","filter","conditions",".","default","skips","that","filter",":","param","imgIds","(","int","array",")",":","get","anns","for","given","imgs","catIds","(","int","array",")",":","get","anns","for","given","cats","areaRng","(","float","array",")",":","get","anns","for","given","area","range","(","e",".","g",".","[","0","inf","]",")","iscrowd","(","boolean",")",":","get","anns","for","given","crowd","label","(","False","or","True",")",":","return",":","ids","(","int","array",")",":","integer","array","of","ann","ids"],"function":"def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):\n \"\"\"\n Get ann ids that satisfy given filter conditions. default skips that filter\n :param imgIds (int array) : get anns for given imgs\n catIds (int array) : get anns for given cats\n areaRng (float array) : get anns for given area range (e.g. [0 inf])\n iscrowd (boolean) : get anns for given crowd label (False or True)\n :return: ids (int array) : integer array of ann ids\n \"\"\"\n imgIds = imgIds if type(imgIds) == list else [imgIds]\n catIds = catIds if type(catIds) == list else [catIds]\n\n if len(imgIds) == len(catIds) == len(areaRng) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(imgIds) == 0:\n anns = sum([self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns],[])\n else:\n anns = self.dataset['annotations']\n anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]\n anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]\n if self.dataset['type'] == 'instances':\n if not iscrowd == None:\n ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\n else:\n ids = [ann['id'] for ann in anns]\n else:\n ids = [ann['id'] for ann in anns]\n return ids","function_tokens":["def","getAnnIds","(","self",",","imgIds","=","[","]",",","catIds","=","[","]",",","areaRng","=","[","]",",","iscrowd","=","None",")",":","imgIds","=","imgIds","if","type","(","imgIds",")","==","list","else","[","imgIds","]","catIds","=","catIds","if","type","(","catIds",")","==","list","else","[","catIds","]","if","len","(","imgIds",")","==","len","(","catIds",")","==","len","(","areaRng",")","==","0",":","anns","=","self",".","dataset","[","'annotations'","]","else",":","if","not","len","(","imgIds",")","==","0",":","anns","=","sum","(","[","self",".","imgToAnns","[","imgId","]","for","imgId","in","imgIds","if","imgId","in","self",".","imgToAnns","]",",","[","]",")","else",":","anns","=","self",".","dataset","[","'annotations'","]","anns","=","anns","if","len","(","catIds",")","==","0","else","[","ann","for","ann","in","anns","if","ann","[","'category_id'","]","in","catIds","]","anns","=","anns","if","len","(","areaRng",")","==","0","else","[","ann","for","ann","in","anns","if","ann","[","'area'","]",">","areaRng","[","0","]","and","ann","[","'area'","]","<","areaRng","[","1","]","]","if","self",".","dataset","[","'type'","]","==","'instances'",":","if","not","iscrowd","==","None",":","ids","=","[","ann","[","'id'","]","for","ann","in","anns","if","ann","[","'iscrowd'","]","==","iscrowd","]","else",":","ids","=","[","ann","[","'id'","]","for","ann","in","anns","]","else",":","ids","=","[","ann","[","'id'","]","for","ann","in","anns","]","return","ids"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py#L137-L165"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py","language":"python","identifier":"COCO.getCatIds","parameters":"(self, catNms=[], supNms=[], catIds=[])","argument_list":"","return_statement":"return ids","docstring":"filtering parameters. default skips that filter.\n :param catNms (str array) : get cats for given cat names\n :param supNms (str array) : get cats for given supercategory names\n :param catIds (int array) : get cats for given cat ids\n :return: ids (int array) : integer array of cat ids","docstring_summary":"filtering parameters. default skips that filter.\n :param catNms (str array) : get cats for given cat names\n :param supNms (str array) : get cats for given supercategory names\n :param catIds (int array) : get cats for given cat ids\n :return: ids (int array) : integer array of cat ids","docstring_tokens":["filtering","parameters",".","default","skips","that","filter",".",":","param","catNms","(","str","array",")",":","get","cats","for","given","cat","names",":","param","supNms","(","str","array",")",":","get","cats","for","given","supercategory","names",":","param","catIds","(","int","array",")",":","get","cats","for","given","cat","ids",":","return",":","ids","(","int","array",")",":","integer","array","of","cat","ids"],"function":"def getCatIds(self, catNms=[], supNms=[], catIds=[]):\n \"\"\"\n filtering parameters. default skips that filter.\n :param catNms (str array) : get cats for given cat names\n :param supNms (str array) : get cats for given supercategory names\n :param catIds (int array) : get cats for given cat ids\n :return: ids (int array) : integer array of cat ids\n \"\"\"\n catNms = catNms if type(catNms) == list else [catNms]\n supNms = supNms if type(supNms) == list else [supNms]\n catIds = catIds if type(catIds) == list else [catIds]\n\n if len(catNms) == len(supNms) == len(catIds) == 0:\n cats = self.dataset['categories']\n else:\n cats = self.dataset['categories']\n cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]\n cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]\n cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]\n ids = [cat['id'] for cat in cats]\n return ids","function_tokens":["def","getCatIds","(","self",",","catNms","=","[","]",",","supNms","=","[","]",",","catIds","=","[","]",")",":","catNms","=","catNms","if","type","(","catNms",")","==","list","else","[","catNms","]","supNms","=","supNms","if","type","(","supNms",")","==","list","else","[","supNms","]","catIds","=","catIds","if","type","(","catIds",")","==","list","else","[","catIds","]","if","len","(","catNms",")","==","len","(","supNms",")","==","len","(","catIds",")","==","0",":","cats","=","self",".","dataset","[","'categories'","]","else",":","cats","=","self",".","dataset","[","'categories'","]","cats","=","cats","if","len","(","catNms",")","==","0","else","[","cat","for","cat","in","cats","if","cat","[","'name'","]","in","catNms","]","cats","=","cats","if","len","(","supNms",")","==","0","else","[","cat","for","cat","in","cats","if","cat","[","'supercategory'","]","in","supNms","]","cats","=","cats","if","len","(","catIds",")","==","0","else","[","cat","for","cat","in","cats","if","cat","[","'id'","]","in","catIds","]","ids","=","[","cat","[","'id'","]","for","cat","in","cats","]","return","ids"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py#L167-L187"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py","language":"python","identifier":"COCO.getImgIds","parameters":"(self, imgIds=[], catIds=[])","argument_list":"","return_statement":"return list(ids)","docstring":"Get img ids that satisfy given filter conditions.\n :param imgIds (int array) : get imgs for given ids\n :param catIds (int array) : get imgs with all given cats\n :return: ids (int array) : integer array of img ids","docstring_summary":"Get img ids that satisfy given filter conditions.\n :param imgIds (int array) : get imgs for given ids\n :param catIds (int array) : get imgs with all given cats\n :return: ids (int array) : integer array of img ids","docstring_tokens":["Get","img","ids","that","satisfy","given","filter","conditions",".",":","param","imgIds","(","int","array",")",":","get","imgs","for","given","ids",":","param","catIds","(","int","array",")",":","get","imgs","with","all","given","cats",":","return",":","ids","(","int","array",")",":","integer","array","of","img","ids"],"function":"def getImgIds(self, imgIds=[], catIds=[]):\n '''\n Get img ids that satisfy given filter conditions.\n :param imgIds (int array) : get imgs for given ids\n :param catIds (int array) : get imgs with all given cats\n :return: ids (int array) : integer array of img ids\n '''\n imgIds = imgIds if type(imgIds) == list else [imgIds]\n catIds = catIds if type(catIds) == list else [catIds]\n\n if len(imgIds) == len(catIds) == 0:\n ids = self.imgs.keys()\n else:\n ids = set(imgIds)\n for catId in catIds:\n if len(ids) == 0:\n ids = set(self.catToImgs[catId])\n else:\n ids &= set(self.catToImgs[catId])\n return list(ids)","function_tokens":["def","getImgIds","(","self",",","imgIds","=","[","]",",","catIds","=","[","]",")",":","imgIds","=","imgIds","if","type","(","imgIds",")","==","list","else","[","imgIds","]","catIds","=","catIds","if","type","(","catIds",")","==","list","else","[","catIds","]","if","len","(","imgIds",")","==","len","(","catIds",")","==","0",":","ids","=","self",".","imgs",".","keys","(",")","else",":","ids","=","set","(","imgIds",")","for","catId","in","catIds",":","if","len","(","ids",")","==","0",":","ids","=","set","(","self",".","catToImgs","[","catId","]",")","else",":","ids","&=","set","(","self",".","catToImgs","[","catId","]",")","return","list","(","ids",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py#L189-L208"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py","language":"python","identifier":"COCO.loadAnns","parameters":"(self, ids=[])","argument_list":"","return_statement":"","docstring":"Load anns with the specified ids.\n :param ids (int array) : integer ids specifying anns\n :return: anns (object array) : loaded ann objects","docstring_summary":"Load anns with the specified ids.\n :param ids (int array) : integer ids specifying anns\n :return: anns (object array) : loaded ann objects","docstring_tokens":["Load","anns","with","the","specified","ids",".",":","param","ids","(","int","array",")",":","integer","ids","specifying","anns",":","return",":","anns","(","object","array",")",":","loaded","ann","objects"],"function":"def loadAnns(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying anns\n :return: anns (object array) : loaded ann objects\n \"\"\"\n if type(ids) == list:\n return [self.anns[id] for id in ids]\n elif type(ids) == int:\n return [self.anns[ids]]","function_tokens":["def","loadAnns","(","self",",","ids","=","[","]",")",":","if","type","(","ids",")","==","list",":","return","[","self",".","anns","[","id","]","for","id","in","ids","]","elif","type","(","ids",")","==","int",":","return","[","self",".","anns","[","ids","]","]"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py#L210-L219"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py","language":"python","identifier":"COCO.loadCats","parameters":"(self, ids=[])","argument_list":"","return_statement":"","docstring":"Load cats with the specified ids.\n :param ids (int array) : integer ids specifying cats\n :return: cats (object array) : loaded cat objects","docstring_summary":"Load cats with the specified ids.\n :param ids (int array) : integer ids specifying cats\n :return: cats (object array) : loaded cat objects","docstring_tokens":["Load","cats","with","the","specified","ids",".",":","param","ids","(","int","array",")",":","integer","ids","specifying","cats",":","return",":","cats","(","object","array",")",":","loaded","cat","objects"],"function":"def loadCats(self, ids=[]):\n \"\"\"\n Load cats with the specified ids.\n :param ids (int array) : integer ids specifying cats\n :return: cats (object array) : loaded cat objects\n \"\"\"\n if type(ids) == list:\n return [self.cats[id] for id in ids]\n elif type(ids) == int:\n return [self.cats[ids]]","function_tokens":["def","loadCats","(","self",",","ids","=","[","]",")",":","if","type","(","ids",")","==","list",":","return","[","self",".","cats","[","id","]","for","id","in","ids","]","elif","type","(","ids",")","==","int",":","return","[","self",".","cats","[","ids","]","]"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py#L221-L230"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py","language":"python","identifier":"COCO.loadImgs","parameters":"(self, ids=[])","argument_list":"","return_statement":"","docstring":"Load anns with the specified ids.\n :param ids (int array) : integer ids specifying img\n :return: imgs (object array) : loaded img objects","docstring_summary":"Load anns with the specified ids.\n :param ids (int array) : integer ids specifying img\n :return: imgs (object array) : loaded img objects","docstring_tokens":["Load","anns","with","the","specified","ids",".",":","param","ids","(","int","array",")",":","integer","ids","specifying","img",":","return",":","imgs","(","object","array",")",":","loaded","img","objects"],"function":"def loadImgs(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying img\n :return: imgs (object array) : loaded img objects\n \"\"\"\n if type(ids) == list:\n return [self.imgs[id] for id in ids]\n elif type(ids) == int:\n return [self.imgs[ids]]","function_tokens":["def","loadImgs","(","self",",","ids","=","[","]",")",":","if","type","(","ids",")","==","list",":","return","[","self",".","imgs","[","id","]","for","id","in","ids","]","elif","type","(","ids",")","==","int",":","return","[","self",".","imgs","[","ids","]","]"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py#L232-L241"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py","language":"python","identifier":"COCO.showAnns","parameters":"(self, anns)","argument_list":"","return_statement":"","docstring":"Display the specified annotations.\n :param anns (array of object): annotations to display\n :return: None","docstring_summary":"Display the specified annotations.\n :param anns (array of object): annotations to display\n :return: None","docstring_tokens":["Display","the","specified","annotations",".",":","param","anns","(","array","of","object",")",":","annotations","to","display",":","return",":","None"],"function":"def showAnns(self, anns):\n \"\"\"\n Display the specified annotations.\n :param anns (array of object): annotations to display\n :return: None\n \"\"\"\n if len(anns) == 0:\n return 0\n if self.dataset['type'] == 'instances':\n ax = plt.gca()\n polygons = []\n color = []\n for ann in anns:\n c = np.random.random((1, 3)).tolist()[0]\n if type(ann['segmentation']) == list:\n # polygon\n for seg in ann['segmentation']:\n poly = np.array(seg).reshape((len(seg)\/2, 2))\n polygons.append(Polygon(poly, True,alpha=0.4))\n color.append(c)\n else:\n # mask\n mask = COCO.decodeMask(ann['segmentation'])\n img = np.ones( (mask.shape[0], mask.shape[1], 3) )\n if ann['iscrowd'] == 1:\n color_mask = np.array([2.0,166.0,101.0])\/255\n if ann['iscrowd'] == 0:\n color_mask = np.random.random((1, 3)).tolist()[0]\n for i in range(3):\n img[:,:,i] = color_mask[i]\n ax.imshow(np.dstack( (img, mask*0.5) ))\n p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)\n ax.add_collection(p)\n if self.dataset['type'] == 'captions':\n for ann in anns:\n print( ann['caption'])","function_tokens":["def","showAnns","(","self",",","anns",")",":","if","len","(","anns",")","==","0",":","return","0","if","self",".","dataset","[","'type'","]","==","'instances'",":","ax","=","plt",".","gca","(",")","polygons","=","[","]","color","=","[","]","for","ann","in","anns",":","c","=","np",".","random",".","random","(","(","1",",","3",")",")",".","tolist","(",")","[","0","]","if","type","(","ann","[","'segmentation'","]",")","==","list",":","# polygon","for","seg","in","ann","[","'segmentation'","]",":","poly","=","np",".","array","(","seg",")",".","reshape","(","(","len","(","seg",")","\/","2",",","2",")",")","polygons",".","append","(","Polygon","(","poly",",","True",",","alpha","=","0.4",")",")","color",".","append","(","c",")","else",":","# mask","mask","=","COCO",".","decodeMask","(","ann","[","'segmentation'","]",")","img","=","np",".","ones","(","(","mask",".","shape","[","0","]",",","mask",".","shape","[","1","]",",","3",")",")","if","ann","[","'iscrowd'","]","==","1",":","color_mask","=","np",".","array","(","[","2.0",",","166.0",",","101.0","]",")","\/","255","if","ann","[","'iscrowd'","]","==","0",":","color_mask","=","np",".","random",".","random","(","(","1",",","3",")",")",".","tolist","(",")","[","0","]","for","i","in","range","(","3",")",":","img","[",":",",",":",",","i","]","=","color_mask","[","i","]","ax",".","imshow","(","np",".","dstack","(","(","img",",","mask","*","0.5",")",")",")","p","=","PatchCollection","(","polygons",",","facecolors","=","color",",","edgecolors","=","(","0",",","0",",","0",",","1",")",",","linewidths","=","3",",","alpha","=","0.4",")","ax",".","add_collection","(","p",")","if","self",".","dataset","[","'type'","]","==","'captions'",":","for","ann","in","anns",":","print","(","ann","[","'caption'","]",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py#L243-L278"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py","language":"python","identifier":"COCO.loadRes","parameters":"(self, resFile)","argument_list":"","return_statement":"return res","docstring":"change by ZhengHe\n Load result file and return a result api object.\n :param resFile (str) : file name of result file\n :return: res (obj) : result api object","docstring_summary":"change by ZhengHe\n Load result file and return a result api object.\n :param resFile (str) : file name of result file\n :return: res (obj) : result api object","docstring_tokens":["change","by","ZhengHe","Load","result","file","and","return","a","result","api","object",".",":","param","resFile","(","str",")",":","file","name","of","result","file",":","return",":","res","(","obj",")",":","result","api","object"],"function":"def loadRes(self, resFile):\n \"\"\"\n change by ZhengHe\n Load result file and return a result api object.\n :param resFile (str) : file name of result file\n :return: res (obj) : result api object\n \"\"\"\n res = COCO()\n res.dataset['images'] = [img for img in self.dataset['images']]\n res.dataset['info'] = copy.deepcopy(self.dataset['info'])\n res.dataset['type'] = copy.deepcopy(self.dataset['type'])\n res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])\n\n # str to hex int for image_id\n imgdict = {}\n def get_image_dict(img_name):\n # image_hash = int(int(hashlib.sha256(img_name).hexdigest(), 16) % sys.maxint)\n image_hash = self.image2hash[img_name]\n if image_hash in imgdict:\n assert imgdict[image_hash] == img_name, 'hash colision: {0}: {1}'.format(image_hash, img_name)\n else:\n imgdict[image_hash] = img_name\n image_dict = {\"id\": image_hash,\n \"width\": 0,\n \"height\": 0,\n \"file_name\": img_name,\n \"license\": '',\n \"url\": img_name,\n \"date_captured\": '',\n }\n return image_hash\n\n\n print ('Loading and preparing results... ')\n time_t = datetime.datetime.utcnow()\n anns = json.load(open(resFile))\n\n assert type(anns) == list, 'results in not an array of objects'\n\n # annsImgIds = [ann['image_id'] for ann in anns]\n # change by ZhengHe\n annsImgIds = []\n for ann in anns:\n assert ann['image_id'] != '','image_id must have a name'\n assert ann['caption'] != '', 'caption must be a string'\n\n w = jieba.cut(ann['caption'].strip().replace('\u3002',''), cut_all=False)\n p = ' '.join(w)\n ann['caption'] = p\n ann['image_id'] = get_image_dict(ann['image_id'])\n annsImgIds.append((ann['image_id']))\n\n\n assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n 'Results do not correspond to current coco set'\n if 'caption' in anns[0]:\n imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])\n res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]\n for id, ann in enumerate(anns):\n ann['id'] = id\n elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n bb = ann['bbox']\n x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]\n ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\n ann['area'] = bb[2]*bb[3]\n ann['id'] = id\n ann['iscrowd'] = 0\n elif 'segmentation' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n ann['area']=sum(ann['segmentation']['counts'][2:-1:2])\n ann['bbox'] = []\n ann['id'] = id\n ann['iscrowd'] = 0\n print( 'DONE (t=%0.2fs)'%((datetime.datetime.utcnow() - time_t).total_seconds()))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res","function_tokens":["def","loadRes","(","self",",","resFile",")",":","res","=","COCO","(",")","res",".","dataset","[","'images'","]","=","[","img","for","img","in","self",".","dataset","[","'images'","]","]","res",".","dataset","[","'info'","]","=","copy",".","deepcopy","(","self",".","dataset","[","'info'","]",")","res",".","dataset","[","'type'","]","=","copy",".","deepcopy","(","self",".","dataset","[","'type'","]",")","res",".","dataset","[","'licenses'","]","=","copy",".","deepcopy","(","self",".","dataset","[","'licenses'","]",")","# str to hex int for image_id","imgdict","=","{","}","def","get_image_dict","(","img_name",")",":","# image_hash = int(int(hashlib.sha256(img_name).hexdigest(), 16) % sys.maxint)","image_hash","=","self",".","image2hash","[","img_name","]","if","image_hash","in","imgdict",":","assert","imgdict","[","image_hash","]","==","img_name",",","'hash colision: {0}: {1}'",".","format","(","image_hash",",","img_name",")","else",":","imgdict","[","image_hash","]","=","img_name","image_dict","=","{","\"id\"",":","image_hash",",","\"width\"",":","0",",","\"height\"",":","0",",","\"file_name\"",":","img_name",",","\"license\"",":","''",",","\"url\"",":","img_name",",","\"date_captured\"",":","''",",","}","return","image_hash","print","(","'Loading and preparing results... '",")","time_t","=","datetime",".","datetime",".","utcnow","(",")","anns","=","json",".","load","(","open","(","resFile",")",")","assert","type","(","anns",")","==","list",",","'results in not an array of objects'","# annsImgIds = [ann['image_id'] for ann in anns]","# change by ZhengHe","annsImgIds","=","[","]","for","ann","in","anns",":","assert","ann","[","'image_id'","]","!=","''",",","'image_id must have a name'","assert","ann","[","'caption'","]","!=","''",",","'caption must be a string'","w","=","jieba",".","cut","(","ann","[","'caption'","]",".","strip","(",")",".","replace","(","'\u3002','","'","),"," ","c","t_all=F","a","lse)","","p","=","' '",".","join","(","w",")","ann","[","'caption'","]","=","p","ann","[","'image_id'","]","=","get_image_dict","(","ann","[","'image_id'","]",")","annsImgIds",".","append","(","(","ann","[","'image_id'","]",")",")","assert","set","(","annsImgIds",")","==","(","set","(","annsImgIds",")","&","set","(","self",".","getImgIds","(",")",")",")",",","'Results do not correspond to current coco set'","if","'caption'","in","anns","[","0","]",":","imgIds","=","set","(","[","img","[","'id'","]","for","img","in","res",".","dataset","[","'images'","]","]",")","&","set","(","[","ann","[","'image_id'","]","for","ann","in","anns","]",")","res",".","dataset","[","'images'","]","=","[","img","for","img","in","res",".","dataset","[","'images'","]","if","img","[","'id'","]","in","imgIds","]","for","id",",","ann","in","enumerate","(","anns",")",":","ann","[","'id'","]","=","id","elif","'bbox'","in","anns","[","0","]","and","not","anns","[","0","]","[","'bbox'","]","==","[","]",":","res",".","dataset","[","'categories'","]","=","copy",".","deepcopy","(","self",".","dataset","[","'categories'","]",")","for","id",",","ann","in","enumerate","(","anns",")",":","bb","=","ann","[","'bbox'","]","x1",",","x2",",","y1",",","y2","=","[","bb","[","0","]",",","bb","[","0","]","+","bb","[","2","]",",","bb","[","1","]",",","bb","[","1","]","+","bb","[","3","]","]","ann","[","'segmentation'","]","=","[","[","x1",",","y1",",","x1",",","y2",",","x2",",","y2",",","x2",",","y1","]","]","ann","[","'area'","]","=","bb","[","2","]","*","bb","[","3","]","ann","[","'id'","]","=","id","ann","[","'iscrowd'","]","=","0","elif","'segmentation'","in","anns","[","0","]",":","res",".","dataset","[","'categories'","]","=","copy",".","deepcopy","(","self",".","dataset","[","'categories'","]",")","for","id",",","ann","in","enumerate","(","anns",")",":","ann","[","'area'","]","=","sum","(","ann","[","'segmentation'","]","[","'counts'","]","[","2",":","-","1",":","2","]",")","ann","[","'bbox'","]","=","[","]","ann","[","'id'","]","=","id","ann","[","'iscrowd'","]","=","0","print","(","'DONE (t=%0.2fs)'","%","(","(","datetime",".","datetime",".","utcnow","(",")","-","time_t",")",".","total_seconds","(",")",")",")","res",".","dataset","[","'annotations'","]","=","anns","res",".","createIndex","(",")","return","res"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py#L280-L360"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py","language":"python","identifier":"COCO.decodeMask","parameters":"(R)","argument_list":"","return_statement":"return M.reshape((R['size']), order='F')","docstring":"Decode binary mask M encoded via run-length encoding.\n :param R (object RLE) : run-length encoding of binary mask\n :return: M (bool 2D array) : decoded binary mask","docstring_summary":"Decode binary mask M encoded via run-length encoding.\n :param R (object RLE) : run-length encoding of binary mask\n :return: M (bool 2D array) : decoded binary mask","docstring_tokens":["Decode","binary","mask","M","encoded","via","run","-","length","encoding",".",":","param","R","(","object","RLE",")",":","run","-","length","encoding","of","binary","mask",":","return",":","M","(","bool","2D","array",")",":","decoded","binary","mask"],"function":"def decodeMask(R):\n \"\"\"\n Decode binary mask M encoded via run-length encoding.\n :param R (object RLE) : run-length encoding of binary mask\n :return: M (bool 2D array) : decoded binary mask\n \"\"\"\n N = len(R['counts'])\n M = np.zeros( (R['size'][0]*R['size'][1], ))\n n = 0\n val = 1\n for pos in range(N):\n val = not val\n for c in range(R['counts'][pos]):\n R['counts'][pos]\n M[n] = val\n n += 1\n return M.reshape((R['size']), order='F')","function_tokens":["def","decodeMask","(","R",")",":","N","=","len","(","R","[","'counts'","]",")","M","=","np",".","zeros","(","(","R","[","'size'","]","[","0","]","*","R","[","'size'","]","[","1","]",",",")",")","n","=","0","val","=","1","for","pos","in","range","(","N",")",":","val","=","not","val","for","c","in","range","(","R","[","'counts'","]","[","pos","]",")",":","R","[","'counts'","]","[","pos","]","M","[","n","]","=","val","n","+=","1","return","M",".","reshape","(","(","R","[","'size'","]",")",",","order","=","'F'",")"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py#L364-L380"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py","language":"python","identifier":"COCO.encodeMask","parameters":"(M)","argument_list":"","return_statement":"return {'size': [h, w],\n 'counts': counts_list ,\n }","docstring":"Encode binary mask M using run-length encoding.\n :param M (bool 2D array) : binary mask to encode\n :return: R (object RLE) : run-length encoding of binary mask","docstring_summary":"Encode binary mask M using run-length encoding.\n :param M (bool 2D array) : binary mask to encode\n :return: R (object RLE) : run-length encoding of binary mask","docstring_tokens":["Encode","binary","mask","M","using","run","-","length","encoding",".",":","param","M","(","bool","2D","array",")",":","binary","mask","to","encode",":","return",":","R","(","object","RLE",")",":","run","-","length","encoding","of","binary","mask"],"function":"def encodeMask(M):\n \"\"\"\n Encode binary mask M using run-length encoding.\n :param M (bool 2D array) : binary mask to encode\n :return: R (object RLE) : run-length encoding of binary mask\n \"\"\"\n [h, w] = M.shape\n M = M.flatten(order='F')\n N = len(M)\n counts_list = []\n pos = 0\n # counts\n counts_list.append(1)\n diffs = np.logical_xor(M[0:N-1], M[1:N])\n for diff in diffs:\n if diff:\n pos +=1\n counts_list.append(1)\n else:\n counts_list[pos] += 1\n # if array starts from 1. start with 0 counts for 0\n if M[0] == 1:\n counts_list = [0] + counts_list\n return {'size': [h, w],\n 'counts': counts_list ,\n }","function_tokens":["def","encodeMask","(","M",")",":","[","h",",","w","]","=","M",".","shape","M","=","M",".","flatten","(","order","=","'F'",")","N","=","len","(","M",")","counts_list","=","[","]","pos","=","0","# counts","counts_list",".","append","(","1",")","diffs","=","np",".","logical_xor","(","M","[","0",":","N","-","1","]",",","M","[","1",":","N","]",")","for","diff","in","diffs",":","if","diff",":","pos","+=","1","counts_list",".","append","(","1",")","else",":","counts_list","[","pos","]","+=","1","# if array starts from 1. start with 0 counts for 0","if","M","[","0","]","==","1",":","counts_list","=","[","0","]","+","counts_list","return","{","'size'",":","[","h",",","w","]",",","'counts'",":","counts_list",",","}"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py#L383-L408"} {"nwo":"AIChallenger\/AI_Challenger_2017","sha":"52014e0defbbdd85bf94ab05d308300d5764022f","path":"Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py","language":"python","identifier":"COCO.segToMask","parameters":"( S, h, w )","argument_list":"","return_statement":"return M","docstring":"Convert polygon segmentation to binary mask.\n :param S (float array) : polygon segmentation mask\n :param h (int) : target mask height\n :param w (int) : target mask width\n :return: M (bool 2D array) : binary mask","docstring_summary":"Convert polygon segmentation to binary mask.\n :param S (float array) : polygon segmentation mask\n :param h (int) : target mask height\n :param w (int) : target mask width\n :return: M (bool 2D array) : binary mask","docstring_tokens":["Convert","polygon","segmentation","to","binary","mask",".",":","param","S","(","float","array",")",":","polygon","segmentation","mask",":","param","h","(","int",")",":","target","mask","height",":","param","w","(","int",")",":","target","mask","width",":","return",":","M","(","bool","2D","array",")",":","binary","mask"],"function":"def segToMask( S, h, w ):\n \"\"\"\n Convert polygon segmentation to binary mask.\n :param S (float array) : polygon segmentation mask\n :param h (int) : target mask height\n :param w (int) : target mask width\n :return: M (bool 2D array) : binary mask\n \"\"\"\n M = np.zeros((h,w), dtype=np.bool)\n for s in S:\n N = len(s)\n rr, cc = polygon(np.array(s[1:N:2]), np.array(s[0:N:2])) # (y, x)\n M[rr, cc] = 1\n return M","function_tokens":["def","segToMask","(","S",",","h",",","w",")",":","M","=","np",".","zeros","(","(","h",",","w",")",",","dtype","=","np",".","bool",")","for","s","in","S",":","N","=","len","(","s",")","rr",",","cc","=","polygon","(","np",".","array","(","s","[","1",":","N",":","2","]",")",",","np",".","array","(","s","[","0",":","N",":","2","]",")",")","# (y, x)","M","[","rr",",","cc","]","=","1","return","M"],"url":"https:\/\/github.com\/AIChallenger\/AI_Challenger_2017\/blob\/52014e0defbbdd85bf94ab05d308300d5764022f\/Evaluation\/caption_eval\/coco_caption\/pycxtools\/coco.py#L411-L424"}