diff --git a/.gitattributes b/.gitattributes
index c7d9f3332a950355d5a77d85000f05e6f45435ea..11ed3b03004e803a21bc9f30ff5c913f7c909fa7 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -32,3 +32,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+*.jar filter=lfs diff=lfs merge=lfs -text
+*.mp4 filter=lfs diff=lfs merge=lfs -text
+*.gif filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..fa960a207e89d4c8e33c6f28096d1acc55910b26
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,71 @@
+results/
+output_*/
+icl_inference_output/
+.vscode/
+tmp/
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+*.ipynb
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..b4f7ee2451c3a2e39e946a33ddaaa04d660180bd
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2023 Yiqin Wang
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
index 276c9176d6040e1ec61d752991e092f20206ca4f..e5baae608a8729271f37c3b132e23e02abb6af39 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,10 @@
---
title: ChatVID
-emoji: 🐨
-colorFrom: gray
-colorTo: blue
+emoji: 🎥
+colorFrom: green
+colorTo: red
sdk: gradio
-sdk_version: 3.34.0
+sdk_version: 3.30.0
app_file: app.py
pinned: false
license: mit
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe4fe6b41b3c7782941e50814d3f93929bdd1561
--- /dev/null
+++ b/app.py
@@ -0,0 +1,97 @@
+import argparse
+import time
+
+import gradio as gr
+
+from config.config_utils import get_config
+from model import Captioner, VicunaHandler
+
+
+def set_example_video(example: list) -> dict:
+ return gr.Video.update(value=example[0])
+
+
+def upload_file(files):
+ file_paths = [file.name for file in files]
+ return file_paths
+
+
+def upload_video(video):
+ print(video)
+ return video
+
+
+def respond(input, chat_history):
+ bot_response = handler.gr_chat(input)
+ chat_history.append((input, bot_response))
+ time.sleep(0.1)
+ return "", chat_history
+
+
+def clear_chat(chat_history):
+ handler.chatbot.clear_conv_()
+
+ return "", []
+
+
+
+config = get_config('config/infer.yaml')
+
+captioner = Captioner(config) # global
+
+global handler
+handler = VicunaHandler(config['vicuna'])
+
+with gr.Blocks(theme=gr.themes.Soft()) as demo:
+ gr.Markdown("##
ChatVID ")
+ gr.Markdown("""
+ ChatVID is a video chatbot that can chat about any video.
+ """)
+ with gr.Row():
+ with gr.Column():
+ video_path = gr.Video(label="Video")
+
+ with gr.Column():
+ upload_button = gr.Button(
+ "Upload & Watch. (Click once and wait 3min )")
+ chat_button = gr.Button("Let's Chat!", interactive=False)
+ num_frames = gr.Slider(
+ minimum=5,
+ value=12,
+ maximum=12,
+ step=1,
+ label="Number of frames (no more than 12)")
+
+ with gr.Column():
+ chatbot = gr.Chatbot()
+ captions = gr.State("")
+ with gr.Row(visible=False) as input:
+ with gr.Column(scale=0.7):
+ txt = gr.Textbox(
+ show_label=False,
+ placeholder="Enter text and press enter").style(
+ container=False)
+ with gr.Column(scale=0.15, min_width=0):
+ run_button = gr.Button("RUN!")
+ with gr.Column(scale=0.15, min_width=0):
+ clear_button = gr.Button("CLEAR")
+
+ upload_button.click(
+ lambda: gr.update(interactive=False), None, chat_button).then(
+ lambda: gr.update(visible=False), None,
+ input).then(lambda: [], None, chatbot).then(
+ captioner.caption_video, [video_path, num_frames],
+ [captions]).then(lambda: gr.update(interactive=True), None,
+ chat_button)
+
+ chat_button.click(handler.gr_chatbot_init, [captions],
+ None).then(lambda: gr.update(visible=True), None,
+ input)
+
+ txt.submit(respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
+ run_button.click(
+ respond, inputs=[txt, chatbot], outputs=[txt, chatbot])
+ clear_button.click(
+ clear_chat, inputs=[chatbot], outputs=[txt, chatbot])
+
+demo.launch(share=True)
diff --git a/config/config_utils.py b/config/config_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f0d4d9ff58522738e8afcf425454ace3a1886fb
--- /dev/null
+++ b/config/config_utils.py
@@ -0,0 +1,14 @@
+def get_config(
+ config_path: str
+):
+ import yaml
+ f = open(config_path, "r")
+ config = yaml.load(f.read(), yaml.Loader)
+ f.close()
+ return config
+
+def save_config(
+ config: dict,
+ file_path: str,
+):
+ pass
\ No newline at end of file
diff --git a/config/debug.yaml b/config/debug.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0b06e564d8562e4f420a3ef231ba0e2a33dc475f
--- /dev/null
+++ b/config/debug.yaml
@@ -0,0 +1,23 @@
+device: 'cuda'
+video_path: '/mnt/petrelfs/wangyiqin/vid_cap/examples/videos/'
+video_name: 'cook_720p.mp4'
+fps: 120
+
+vicuna:
+ model_path: '/mnt/petrelfs/wangyiqin/vid_cap/vicuna-7b'
+ device: 'cuda'
+ num_gpus: 1
+ max_gpu_memory: '40Gib'
+ load_8bit: True
+ conv_template:
+ temperature: 1.0
+ max_new_tokens: 512
+ debug: False
+ output_path: '/mnt/petrelfs/wangyiqin/vid_cap/VideoChatDuplicate/examples/test.json'
+
+vid2seq:
+ enable: True
+ clip_path: '/mnt/petrelfs/wangyiqin/vid_cap/examples/ViT-L-14.pt'
+ output_path: '/mnt/petrelfs/wangyiqin/vid_cap/examples/'
+ work_dir: 'vid2seq_workdir'
+ config_path: '/mnt/petrelfs/wangyiqin/vid_cap/scenic/scenic/projects/vid2seq/configs/youcook2.py'
diff --git a/config/infer.yaml b/config/infer.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..893a35a82f582e4fc34cf823a558dbca9cc0d70e
--- /dev/null
+++ b/config/infer.yaml
@@ -0,0 +1,16 @@
+device: 'cuda'
+
+vicuna:
+ model_path: '/home/user/app/vicuna-7b'
+ device: 'cuda'
+ num_gpus: 'auto'
+ max_gpu_memory: '24Gib'
+ load_8bit: True
+ conv_template:
+ temperature: 1.0
+ max_new_tokens: 512
+ debug: False
+ output_path: '/home/user/app/vicuna_out.json'
+
+vid2seq:
+ enable: False
diff --git a/config/local_infer.yaml b/config/local_infer.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..31c478eecae2285a54e21a4cb09cc7d4be1f9b3e
--- /dev/null
+++ b/config/local_infer.yaml
@@ -0,0 +1,21 @@
+device: 'cuda'
+
+vicuna:
+ model_path: '/mnt/petrelfs/wangyiqin/vid_cap/ChatVID/vicuna-7b'
+ device: 'cuda'
+ num_gpus: 1
+ max_gpu_memory: '24Gib'
+ load_8bit: True
+ conv_template:
+ temperature: 1.0
+ max_new_tokens: 512
+ debug: False
+ output_path: '/mnt/petrelfs/wangyiqin/vid_cap/ChatVID/examples/vicuna_out.json'
+
+vid2seq:
+ enable: True
+ clip_path: '/mnt/petrelfs/wangyiqin/vid_cap/ChatVID/clip_ckpt/ViT-L-14.pt'
+ output_path: '/mnt/petrelfs/wangyiqin/vid_cap/ChatVID/examples/'
+ work_dir: 'vid2seq_workdir'
+ config_path: 'config/vid2seq_config.py'
+ checkpoint_path: '/mnt/petrelfs/wangyiqin/vid_cap/ChatVID/vid2seq_ckpt' #only folder name
diff --git a/config/vid2seq_config.py b/config/vid2seq_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..885f8ad2b08d3d78422e96e1ea2b9ac5d3baa57f
--- /dev/null
+++ b/config/vid2seq_config.py
@@ -0,0 +1,183 @@
+import ml_collections
+
+YOUCOOK_TRAIN_SIZE = 1333 # Number of videos
+
+
+def get_config(runlocal=''):
+ """Returns the base experiment configuration."""
+
+ runlocal = bool(runlocal)
+
+ config = ml_collections.ConfigDict()
+ config.token_loss_coef = 1.
+ config.runlocal = runlocal
+ config.experiment_name = 'youcook'
+
+ config.count_flops = False # if runlocal else ml_collections.ConfigDict({'count_flops': True})
+
+ # dataset
+ config.dataset_name = 'dense_video_captioning'
+ config.dataset_configs = ml_collections.ConfigDict()
+ config.dataset_configs.corrupt = 0.
+ config.dataset_configs.span_len = 3.
+ config.dataset_configs.preserve = True
+ config.dataset_configs.corrupt_coef = 0.
+ config.dataset_configs.proba_corrupt = 0.
+ notime = ml_collections.config_dict.FieldReference(False)
+ config.dataset_configs.notime = notime
+ config.dataset_configs.abs_time_token = False
+ config.dataset_configs.random_temporal_crop_proba = 0.5
+ config.dataset_configs.time_format = 'se'
+ tmp_only = ml_collections.config_dict.FieldReference(False)
+ config.dataset_configs.tmp_only = tmp_only
+ config.dataset_configs.split = False
+ order = ml_collections.config_dict.FieldReference('ld')
+ config.dataset_configs.order = order
+ config.dataset_configs.from_xm = None
+
+ config.data_dtype_str = 'float32'
+
+ config.dataset_configs.base_dir = '/mnt/petrelfs/wangyiqin/vid_cap/examples'
+ config.dataset_configs.tables = {
+ 'train': 'train.tfrecord.sst@64',
+ 'validation': 'test@1',
+ }
+ config.dataset_configs.examples_per_subset = {
+ 'train': 0,
+ 'validation': 1,
+ }
+
+ # List of modalities to load, supports `features` only for now.
+ # Note that it only specifies which modalities to load, not which to use,
+ # which is controlled by config.model.modality_fusion
+ config.dataset_configs.modalities = ('features', 'text')
+ config.dataset_configs.features_dim = 768
+ config.dataset_configs.return_as_dict = True
+ num_frames = ml_collections.config_dict.FieldReference(
+ 100) # need to change back to 100 in the future -- Yiqin
+ config.dataset_configs.num_frames = num_frames
+ num_bins = ml_collections.config_dict.FieldReference(100)
+ config.dataset_configs.num_bins = num_bins
+ config.dataset_configs.one_hot_labels = True
+ config.dataset_configs.zero_centering = True
+ config.dataset_configs.val_on_test = False
+ config.dataset_configs.num_eval_clips = 1
+ config.dataset_configs.prefetch_to_device = 2
+
+ # Text params
+ config.dataset_configs.max_num_output_words = 256
+ config.dataset_configs.max_num_input_words = 1000
+ config.dataset_configs.tokenizer = ml_collections.ConfigDict()
+ config.dataset_configs.tokenizer.tokenizer_type = 'sentence_piece'
+ config.dataset_configs.caption_string = 'caption/string'
+ config.dataset_configs.train_caption_string = 'caption/string'
+ config.dataset_configs.input_timestamp_name = 'video/timestamps'
+ config.dataset_configs.input_duration_name = 'video/duration'
+ config.dataset_configs.output_raw_timestamp_name = 'timestamp'
+ config.dataset_configs.output_raw_duration_name = 'duration'
+ config.dataset_configs.input_feature_name = 'image/clip_embeddings'
+ config.dataset_configs.output_raw_feature_name = 'features'
+ config.dataset_configs.vocabulary_size = 32128
+ config.dataset_configs.max_events = 20
+ config.dataset_configs.asr_notime = False
+ config.datasets = {'youcook': config.dataset_configs}
+
+ # Decoding
+ config.decoding = ml_collections.ConfigDict()
+ config.decoding.decoding_method = 'beamsearch'
+ # config.decoding.decoding_method = 'temperature_sample'
+ config.decoding.num_decodes = 4
+ config.decoding.alpha = 1
+ config.decoding.temperature = 1.
+
+ # Model
+ config.model_name = 'vid2seq'
+ config.model = ml_collections.ConfigDict()
+ config.model.from_xm = None
+
+ # Encoder configs
+ config.model.encoder = ml_collections.ConfigDict()
+ config.model.encoder.share_encoder = True
+ config.model.encoder.encoder_type = 'cat_encoder'
+ config.model.encoder.cat_encoder = ml_collections.ConfigDict()
+ config.model.encoder.cat_encoder.dim = 2048
+ config.model.encoder.cat_encoder.layers = 12
+ config.model.encoder.cat_encoder.heads = 12
+ config.model.encoder.cat_encoder.pos_embed = 'learned_1d'
+ config.model.encoder.cat_encoder.dropout_rate = 0.
+ config.model.encoder.cat_encoder.t5_dropout_rate = 0.1
+ config.model.encoder.cat_encoder.stochastic_depth = 0.
+ config.model.encoder.cat_encoder.pretrained_config = 't5_1_1_base'
+ config.model.encoder.from_xm = None
+
+ # Decoder configs
+ config.model.decoder_type = 't5_decoder'
+ config.model.decoder = ml_collections.ConfigDict()
+ config.model.decoder.order = order
+ config.model.decoder.t5_decoder = ml_collections.ConfigDict()
+ config.model.decoder.t5_decoder.logits_via_embedding = False
+ config.model.decoder.t5_decoder.dropout_rate = 0.1
+ config.model.decoder.t5_decoder.num_frames = num_frames
+ config.model.decoder.notime = notime
+ config.model.decoder.num_bins = num_bins
+ config.model.decoder.tmp_only = tmp_only
+ config.model.decoder.t5_decoder.pretrained_config = 't5_1_1_base'
+
+ # Initalisation configs
+ config.init_from = ml_collections.ConfigDict()
+ # Replace with your checkpoint pretrained on YT-temporal-1bn, assuming it has
+ # been trained for 200K iterations
+ config.init_from.checkpoint_path = '/mnt/petrelfs/wangyiqin/vid_cap/vid2seq_model'
+ # config.init_from.model_config = '/mnt/petrelfs/wangyiqin/vid_cap/scenic/scenic/projects/vid2seq/configs/yttemporal.py'
+ config.init_from.step = 200001 # ytt 200000, anet 200001
+
+ config.init_from.encoder = ml_collections.ConfigDict()
+ config.init_from.encoder.checkpoint_path = None
+ config.init_from.encoder.init_from_vit = False
+ config.init_from.encoder = ml_collections.ConfigDict()
+ config.init_from.encoder.load_pretrained_weights = True
+
+ config.init_from.decoder = ml_collections.ConfigDict()
+ config.init_from.decoder.load_pretrained_weights = True
+
+ config.init_from.t5 = ml_collections.ConfigDict()
+ config.init_from.t5.load_pretrained_weights = True
+
+ # Training
+ config.trainer_name = 'densevidcap_trainer'
+ config.optimizer = 'adam'
+ config.optimizer_configs = ml_collections.ConfigDict()
+ config.optimizer_configs.weight_decay = 0.
+ config.l2_decay_factor = 0.
+ config.max_grad_norm = 1.
+ config.label_smoothing = 0.1
+ epochs = ml_collections.config_dict.FieldReference(0) ### add
+ config.num_training_epochs = 0
+ batch_size = ml_collections.config_dict.FieldReference(1)
+ config.batch_size = 1 #if runlocal else batch_size # 128 # Minimum is num_devices = 32
+ config.eval_batch_size = 1 #if runlocal else 32 # Needs to be num_local_devices
+ config.rng_seed = 0
+
+ # Learning schedule.
+ steps_per_epoch = 3 if runlocal else YOUCOOK_TRAIN_SIZE // batch_size
+ total_steps = epochs * steps_per_epoch
+ config.lr_configs = ml_collections.ConfigDict()
+ config.lr_configs.learning_rate_schedule = 'compound'
+ config.lr_configs.factors = 'constant * cosine_decay * linear_warmup'
+ config.lr_configs.warmup_steps = total_steps // 10
+ config.lr_configs.steps_per_cycle = total_steps
+ config.lr_configs.total_steps = total_steps
+ config.lr_configs.base_learning_rate = 3e-4
+
+ config.eval_metrics = ['cider', 'meteor', 'soda']
+
+ # Logging
+ config.log_eval_steps = steps_per_epoch # write TB and/or XM summary
+ config.log_summary_steps = steps_per_epoch # write TB and/or XM summary
+ config.write_summary = True # write TB and/or XM summary
+ config.write_xm_measurements = True # write XM measurements
+ config.xprof = True # Profile using xprof
+ config.checkpoint = True # do checkpointing
+ config.debug_train = False # debug mode during training
+ config.debug_eval = True # debug mode during eval
+ return config
diff --git a/config/yttemporal.py b/config/yttemporal.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e291c18ab8c1b3a6bd3adcbe1a92013ea871783
--- /dev/null
+++ b/config/yttemporal.py
@@ -0,0 +1,184 @@
+
+import ml_collections
+
+
+def get_config(runlocal=''):
+ """Returns the base experiment configuration."""
+
+ runlocal = bool(runlocal)
+
+ config = ml_collections.ConfigDict()
+ config.token_loss_coef = 1.
+ config.runlocal = runlocal
+ config.experiment_name = 'ytt'
+
+ config.count_flops = False if runlocal else ml_collections.ConfigDict(
+ {'count_flops': True})
+
+ # dataset
+ config.dataset_name = 'dense_video_captioning'
+ config.dataset_configs = ml_collections.ConfigDict()
+ config.dataset_configs.corrupt = 0.25
+ config.dataset_configs.span_len = 5.
+ config.dataset_configs.proba_corrupt = 1.
+ config.dataset_configs.corrupt_coef = 1.
+ config.dataset_configs.preserve = False
+ notime = ml_collections.config_dict.FieldReference(False)
+ config.dataset_configs.notime = notime
+ config.dataset_configs.abs_time_token = False
+ config.dataset_configs.random_temporal_crop_proba = 1.
+ config.dataset_configs.time_format = 'se'
+ tmp_only = ml_collections.config_dict.FieldReference(False)
+ config.dataset_configs.tmp_only = tmp_only
+ config.dataset_configs.split = not runlocal
+ order = ml_collections.config_dict.FieldReference('ld')
+ config.dataset_configs.order = order
+ config.dataset_configs.from_xm = None
+
+ config.data_dtype_str = 'float32'
+
+ config.dataset_configs.base_dir = '/'
+ config.dataset_configs.base_dir = '/path/to/yttemporal'
+ config.dataset_configs.tables = {
+ 'train': 'train.tfrecord.sst@1024',
+ }
+ config.dataset_configs.examples_per_subset = {
+ 'train': 14780275,
+ }
+
+ # List of modalities to load, supports `features` only for now.
+ # Note that it only specifies which modalities to load, not which to use,
+ # which is controlled by config.model.modality_fusion
+ config.dataset_configs.modalities = ('features', 'text')
+ config.dataset_configs.features_dim = 768
+ config.dataset_configs.return_as_dict = True
+ num_frames = ml_collections.config_dict.FieldReference(100)
+ config.dataset_configs.num_frames = num_frames
+ num_bins = ml_collections.config_dict.FieldReference(100)
+ config.dataset_configs.num_bins = num_bins
+ config.dataset_configs.one_hot_labels = True
+ config.dataset_configs.zero_centering = True
+ config.dataset_configs.val_on_test = False
+ config.dataset_configs.num_eval_clips = 1
+ config.dataset_configs.prefetch_to_device = 2
+
+ # Text params
+ config.dataset_configs.max_num_output_words = 1000
+ config.dataset_configs.max_num_input_words = 1000
+ config.dataset_configs.tokenizer = ml_collections.ConfigDict()
+ config.dataset_configs.tokenizer.tokenizer_type = 'sentence_piece'
+ config.dataset_configs.caption_string = 'ASR/segment/label/string'
+ config.dataset_configs.train_caption_string = 'ASR/segment/label/string'
+ config.dataset_configs.input_timestamp_start_name = 'ASR/segment/start/timestamp'
+ config.dataset_configs.input_timestamp_end_name = 'ASR/segment/end/timestamp'
+ config.dataset_configs.input_duration_name = 'video/duration'
+ config.dataset_configs.output_raw_timestamp_name = 'timestamp'
+ config.dataset_configs.output_raw_duration_name = 'duration'
+ config.dataset_configs.input_feature_name = 'image/clip_embeddings'
+ config.dataset_configs.output_raw_feature_name = 'features'
+ config.dataset_configs.vocabulary_size = 32128
+ config.dataset_configs.max_events = 1100
+ config.dataset_configs.max_segments = 0
+ config.datasets = {'ytt': config.dataset_configs}
+
+ # Decoding
+ config.decoding = ml_collections.ConfigDict()
+ config.decoding.decoding_method = 'beamsearch'
+ config.decoding.num_decodes = 4
+ config.decoding.alpha = 0.6
+ config.decoding.temperature = 1.
+
+ # Model
+ config.model_name = 'vid2seq'
+ config.model = ml_collections.ConfigDict()
+ config.model.from_xm = None
+
+ # Encoder configs
+ config.model.encoder = ml_collections.ConfigDict()
+ config.model.encoder.share_encoder = True
+ config.model.encoder.encoder_type = 'cat_encoder'
+ config.model.encoder.cat_encoder = ml_collections.ConfigDict()
+ config.model.encoder.cat_encoder.dim = 2048
+ config.model.encoder.cat_encoder.layers = 12
+ config.model.encoder.cat_encoder.heads = 12
+ config.model.encoder.cat_encoder.pos_embed = 'learned_1d'
+ config.model.encoder.cat_encoder.dropout_rate = 0.1
+ config.model.encoder.cat_encoder.t5_dropout_rate = 0.1
+ config.model.encoder.cat_encoder.stochastic_depth = 0.
+ config.model.encoder.cat_encoder.pretrained_config = 't5_1_1_base'
+ config.model.encoder.from_xm = None
+
+ # Decoder configs
+ config.model.decoder_type = 't5_decoder'
+ config.model.decoder = ml_collections.ConfigDict()
+ config.model.decoder.order = order
+ config.model.decoder.t5_decoder = ml_collections.ConfigDict()
+ config.model.decoder.t5_decoder.logits_via_embedding = False
+ config.model.decoder.t5_decoder.dropout_rate = 0.1
+ config.model.decoder.t5_decoder.num_frames = num_frames
+ config.model.decoder.notime = notime
+ config.model.decoder.num_bins = num_bins
+ config.model.decoder.tmp_only = tmp_only
+ # Obtained from scenic/projects/t5/model.py.
+ config.model.decoder.t5_decoder.pretrained_config = 't5_1_1_base'
+
+ config.model.tmp_decoder_type = 't5_decoder'
+ config.model.tmp_decoder = ml_collections.ConfigDict()
+ config.model.tmp_decoder.t5_decoder = ml_collections.ConfigDict()
+ config.model.tmp_decoder.t5_decoder.logits_via_embedding = False
+ config.model.tmp_decoder.t5_decoder.dropout_rate = 0.
+ config.model.tmp_decoder.t5_decoder.pretrained_config = 't5_1_1_base'
+ config.model.decoder.t5_decoder.local = 5
+
+ # Initalisation configs
+ config.init_from = ml_collections.ConfigDict()
+ config.init_from.step = None
+ config.init_from.xm = None
+
+ config.init_from.encoder = ml_collections.ConfigDict()
+ config.init_from.encoder.checkpoint_path = None
+ config.init_from.encoder.init_from_vit = False
+ config.init_from.encoder = ml_collections.ConfigDict()
+ config.init_from.encoder.load_pretrained_weights = True
+
+ config.init_from.decoder = ml_collections.ConfigDict()
+ config.init_from.decoder.load_pretrained_weights = True
+
+ config.init_from.t5 = ml_collections.ConfigDict()
+ config.init_from.t5.load_pretrained_weights = True
+
+ # Training
+ config.trainer_name = 'densevidcap_trainer'
+ config.optimizer = 'adam'
+ config.optimizer_configs = ml_collections.ConfigDict()
+ config.optimizer_configs.weight_decay = 0.
+ config.l2_decay_factor = 0.
+ config.max_grad_norm = 0.1
+ config.label_smoothing = 0.1
+ epochs = ml_collections.config_dict.FieldReference(10)
+ config.num_training_epochs = 0
+ batch_size = ml_collections.config_dict.FieldReference(512)
+ config.batch_size = 1 if runlocal else batch_size # 128 # Minimum is num_devices = 32
+ config.eval_batch_size = 1 if runlocal else 128 # Needs to be num_local_devices
+ config.rng_seed = 0
+
+ # Learning schedule.
+ config.lr_configs = ml_collections.ConfigDict()
+ config.lr_configs.learning_rate_schedule = 'compound'
+ config.lr_configs.factors = 'constant * linear_warmup'
+ config.lr_configs.warmup_steps = 1000
+ config.lr_configs.base_learning_rate = 1e-4
+
+ config.eval_metrics = ['cider', 'meteor', 'soda']
+
+ # Logging
+ config.log_summary_steps = 500 # write TB and/or XM summary
+ config.checkpoint_steps = 5000
+ config.log_eval_steps = 5000
+ config.write_summary = True # write TB and/or XM summary
+ config.write_xm_measurements = True # write XM measurements
+ config.xprof = True # Profile using xprof
+ config.checkpoint = True # do checkpointing
+ config.debug_train = False # debug mode during training
+ config.debug_eval = False # debug mode during eval
+ return config
diff --git a/model/Captioner.py b/model/Captioner.py
new file mode 100644
index 0000000000000000000000000000000000000000..e443612b9eaf64ffbd0354722c93d94d609c2b2b
--- /dev/null
+++ b/model/Captioner.py
@@ -0,0 +1,72 @@
+from mmaction.datasets.transforms import (DecordInit, SampleFrames, Resize,
+ FormatShape, DecordDecode)
+from model.audio import SpeechRecognizer
+from model.vision import DenseCaptioner, ImageCaptioner
+
+
+class Captioner:
+ """ Captioner class for video captioning
+ """
+
+ def __init__(self, config):
+ """ Initialize the captioner
+ Args:
+ config: configuration file
+ """
+ self.config = config
+ self.image_captioner = ImageCaptioner(device=config['device'])
+ self.dense_captioner = DenseCaptioner(device=config['device'])
+ self.speech_recognizer = SpeechRecognizer(device=config['device'])
+ # if self.config['vid2seq']['enable']:
+ # self.vid2seq_captioner = Vid2SeqCaptioner(config=config['vid2seq'])
+
+ self.src_dir = ''
+
+ def debug_vid2seq(self, video_path, num_frames=8):
+ return self.vid2seq_captioner(video_path=video_path)
+
+ def caption_video(self, video_path, num_frames=8):
+ print("Watching video ...")
+
+ video_info = {'filename': video_path, 'start_index': 0}
+
+ video_processors = [
+ DecordInit(),
+ SampleFrames(clip_len=1, frame_interval=1, num_clips=num_frames),
+ DecordDecode(),
+ Resize(scale=(-1, 720)),
+ FormatShape(input_format='NCHW'),
+ ]
+ for processor in video_processors:
+ video_info = processor.transform(video_info)
+
+ timestamp_list = [
+ round(i / video_info['avg_fps'], 1)
+ for i in video_info['frame_inds']
+ ]
+
+ image_captions = self.image_captioner(imgs=video_info['imgs'])
+ dense_captions = self.dense_captioner(imgs=video_info['imgs'])
+ # if self.config['vid2seq']['enable']:
+ # vid2seq_captions = self.vid2seq_captioner(video_path=video_path)
+ # else:
+ vid2seq_captions = []
+ try:
+ speech = self.speech_recognizer(video_path)
+ except RuntimeError:
+ speech = ""
+
+ overall_captions = ""
+ for i in range(num_frames):
+ overall_captions += "[" + str(timestamp_list[i]) + "s]: "
+ overall_captions += "You see " + image_captions[i]
+ overall_captions += "You find " + dense_captions[i] + "\n"
+
+ if speech != "":
+ overall_captions += "You hear \"" + speech + "\"\n"
+
+ for i in range(len(vid2seq_captions)):
+ overall_captions += "You notice " + vid2seq_captions[i] + "\n"
+ print("Captions generated")
+
+ return overall_captions
diff --git a/model/Vicuna.py b/model/Vicuna.py
new file mode 100644
index 0000000000000000000000000000000000000000..54e0786fe26679b09b71a90aa64cc20a32a519ec
--- /dev/null
+++ b/model/Vicuna.py
@@ -0,0 +1,214 @@
+from model.fastchat.conversation import (Conversation, SeparatorStyle,
+ compute_skip_echo_len,
+ get_default_conv_template)
+from model.fastchat.serve.inference import (ChatIO, chat_loop, generate_stream,
+ load_model)
+
+
+class SimpleChatIO(ChatIO):
+
+ def prompt_for_input(self, role) -> str:
+ return input(f"{role}: ")
+
+ def prompt_for_output(self, role: str):
+ print(f"{role}: ", end="", flush=True)
+
+ def stream_output(self, output_stream, skip_echo_len: int):
+ pre = 0
+ for outputs in output_stream:
+ outputs = outputs[skip_echo_len:].strip()
+ outputs = outputs.split(" ")
+ now = len(outputs) - 1
+ if now > pre:
+ print(" ".join(outputs[pre:now]), end=" ", flush=True)
+ pre = now
+ print(" ".join(outputs[pre:]), flush=True)
+ return " ".join(outputs)
+
+
+class VicunaChatBot:
+
+ def __init__(
+ self,
+ model_path: str,
+ device: str,
+ num_gpus: str,
+ max_gpu_memory: str,
+ load_8bit: bool,
+ conv_template,
+ ChatIO: ChatIO,
+ debug: bool,
+ ):
+ self.model_path = model_path
+ self.device = device
+ self.chatio = ChatIO
+ self.debug = debug
+
+ self.model, self.tokenizer = load_model(self.model_path, device,
+ num_gpus, max_gpu_memory,
+ load_8bit, debug)
+
+ if conv_template:
+ self.conv = conv_template.copy()
+ else:
+ self.conv = get_default_conv_template(model_path).copy()
+
+ self.conv_template = self.conv.copy()
+
+ def chat(self, inp: str, temperature: float, max_new_tokens: int):
+ """ Vicuna as a chatbot. """
+ self.conv.append_message(self.conv.roles[0], inp)
+ self.conv.append_message(self.conv.roles[1], None)
+
+ generate_stream_func = generate_stream
+ prompt = self.conv.get_prompt()
+
+ skip_echo_len = compute_skip_echo_len(self.model_path, self.conv,
+ prompt)
+ stop_str = (
+ self.conv.sep if self.conv.sep_style
+ in [SeparatorStyle.SINGLE, SeparatorStyle.BAIZE] else None)
+ params = {
+ "model": self.model_path,
+ "prompt": prompt,
+ "temperature": temperature,
+ "max_new_tokens": max_new_tokens,
+ "stop": stop_str,
+ }
+ print(prompt)
+ self.chatio.prompt_for_output(self.conv.roles[1])
+ output_stream = generate_stream_func(self.model, self.tokenizer,
+ params, self.device)
+ outputs = self.chatio.stream_output(output_stream, skip_echo_len)
+ # NOTE: strip is important to align with the training data.
+ self.conv.messages[-1][-1] = outputs.strip()
+ return outputs
+
+ def summarise(self, caption: dict, temperature: float,
+ max_new_tokens: int):
+ """ Vicuna as a summariser. """
+ questions = caption
+ captions = {}
+ for id, question in questions.items():
+ # Reset the conversation for each iteration
+ self.conv = get_default_conv_template(self.model_path).copy()
+ self.conv.append_message(self.conv.roles[0], question)
+ self.conv.append_message(self.conv.roles[1], None)
+
+ generate_stream_func = generate_stream
+ prompt = self.conv.get_prompt()
+
+ skip_echo_len = compute_skip_echo_len(self.model_path, self.conv,
+ prompt)
+ stop_str = (
+ self.conv.sep if self.conv.sep_style
+ in [SeparatorStyle.SINGLE, SeparatorStyle.BAIZE] else None)
+
+ params = {
+ "model": self.model_path,
+ "prompt": prompt,
+ "temperature": temperature,
+ "max_new_tokens": max_new_tokens,
+ "stop": stop_str,
+ }
+
+ self.chatio.prompt_for_output(self.conv.roles[1])
+ output_stream = generate_stream_func(self.model, self.tokenizer,
+ params, self.device)
+ outputs = self.chatio.stream_output(output_stream, skip_echo_len)
+ captions[id] = outputs
+
+ if self.debug:
+ print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
+
+ print(captions)
+ return captions
+
+ def clear_conv_(self):
+ """ Clear the conversation. """
+ self.conv = self.conv_template.copy()
+
+ def change_conv_template_(self, conv_template):
+ self.conv_template = conv_template.copy()
+ self.conv = conv_template.copy()
+
+ def change_conv_(self, conv_template):
+ """ Change the conversation. """
+ self.conv = conv_template.copy()
+
+
+class VicunaHandler:
+ """ VicunaHandler is a class that handles the communication between the
+ frontend and the backend. """
+
+ def __init__(self, config):
+ self.config = config
+ self.chat_io = SimpleChatIO()
+ self.chatbot = VicunaChatBot(
+ self.config['model_path'],
+ self.config['device'],
+ self.config['num_gpus'],
+ self.config['max_gpu_memory'],
+ self.config['load_8bit'],
+ None,
+ self.chat_io,
+ self.config['debug'],
+ )
+
+ def chat(self):
+ """ Chat with the Vicuna. """
+ template = self._construct_conversation("")
+ chat_loop(
+ self.config['model_path'],
+ self.config['device'],
+ self.config['num_gpus'],
+ self.config['max_gpu_memory'],
+ self.config['load_8bit'],
+ template,
+ self.config['temperature'],
+ self.config['max_new_tokens'],
+ self.chat_io,
+ self.config['debug'],
+ )
+
+ def gr_chatbot_init(self, caption: str):
+ """ Initialise the chatbot for gradio. """
+
+ template = self._construct_conversation(caption)
+ self.chatbot.change_conv_template_(template)
+ print("Chatbot initialised.")
+
+ def gr_chat(self, inp):
+ """ Chat using gradio as the frontend. """
+ return self.chatbot.chat(inp, self.config['temperature'],
+ self.config['max_new_tokens'])
+
+ def _construct_conversation(self, prompt):
+ """ Construct a conversation template.
+ Args:
+ prompt: the prompt for the conversation.
+ """
+
+ user_message = "The following text described what you have " +\
+ "seen, found, heard and notice from a consecutive video." +\
+ " Some of the texts may not be accurate. " +\
+ "Try to conclude what happens in the video, " +\
+ "then answer my question based on your conclusion.\n" +\
+ "\n" + prompt + "\n" +\
+ "Example: Is this a Video?"
+
+ user_message = user_message.strip()
+
+ print(user_message)
+
+ return Conversation(
+ system=
+ "A chat between a curious user and an artificial intelligence assistant answering quetions on videos."
+ "The assistant answers the questions based on the given video captions and speech in time order.",
+ roles=("USER", "ASSISTANT"),
+ messages=(("USER", user_message), ("ASSISTANT", "yes")),
+ offset=0,
+ sep_style=SeparatorStyle.TWO,
+ sep=" ",
+ sep2="",
+ )
diff --git a/model/__init__.py b/model/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b8aa93b02dfe7a3d23d2d59001aa476a017aad4
--- /dev/null
+++ b/model/__init__.py
@@ -0,0 +1,2 @@
+from .Captioner import Captioner
+from .Vicuna import VicunaHandler
\ No newline at end of file
diff --git a/model/audio/SpeechRecognizer.py b/model/audio/SpeechRecognizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..af7f90224a0e82fe2ae38caf5186cb8c2daf9831
--- /dev/null
+++ b/model/audio/SpeechRecognizer.py
@@ -0,0 +1,11 @@
+import whisper
+
+
+class SpeechRecognizer:
+
+ def __init__(self, device='cuda'):
+ self.model = whisper.load_model('base').to(device)
+
+ def __call__(self, video_path):
+ generated_text = self.model.transcribe(video_path)['text']
+ return generated_text
diff --git a/model/audio/__init__.py b/model/audio/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5132c2eb58e18ad6280cabe58a936116843d3491
--- /dev/null
+++ b/model/audio/__init__.py
@@ -0,0 +1 @@
+from .SpeechRecognizer import SpeechRecognizer
\ No newline at end of file
diff --git a/model/fastchat/__init__.py b/model/fastchat/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d31c31eaeb0ef12d7e45a4f738d5029b6d95d135
--- /dev/null
+++ b/model/fastchat/__init__.py
@@ -0,0 +1 @@
+__version__ = "0.2.3"
diff --git a/model/fastchat/client/__init__.py b/model/fastchat/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff1f3f146bb9eee8644c0223aca34506a0b714fa
--- /dev/null
+++ b/model/fastchat/client/__init__.py
@@ -0,0 +1,3 @@
+from fastchat.client.api import ChatCompletion, set_baseurl
+
+__all__ = ["ChatCompletion", "set_baseurl"]
diff --git a/model/fastchat/client/api.py b/model/fastchat/client/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e1eb7734350b700bfeeeb1edadb7ba1998f330a
--- /dev/null
+++ b/model/fastchat/client/api.py
@@ -0,0 +1,72 @@
+from typing import Dict, List, Optional
+import asyncio
+import os
+
+import httpx
+from fastchat.protocol.chat_completion import (
+ ChatCompletionRequest,
+ ChatCompletionResponse,
+)
+
+_BASE_URL = "http://localhost:8000"
+
+if os.environ.get("FASTCHAT_BASE_URL"):
+ _BASE_URL = os.environ.get("FASTCHAT_BASE_URL")
+
+
+def set_baseurl(base_url: str):
+ global _BASE_URL
+ _BASE_URL = base_url
+
+
+class ChatCompletionClient:
+ def __init__(self, base_url: str):
+ self.base_url = base_url
+
+ async def request_completion(
+ self, request: ChatCompletionRequest, timeout: Optional[float] = None
+ ) -> ChatCompletionResponse:
+ async with httpx.AsyncClient() as client:
+ response = await client.post(
+ f"{self.base_url}/v1/chat/completions",
+ json=request.dict(),
+ timeout=timeout,
+ )
+ response.raise_for_status()
+ return ChatCompletionResponse.parse_obj(response.json())
+
+
+class ChatCompletion:
+ OBJECT_NAME = "chat.completions"
+
+ @classmethod
+ def create(cls, *args, **kwargs) -> ChatCompletionResponse:
+ """Creates a new chat completion for the provided messages and parameters.
+
+ See `acreate` for more details.
+ """
+ return asyncio.run(cls.acreate(*args, **kwargs))
+
+ @classmethod
+ async def acreate(
+ cls,
+ model: str,
+ messages: List[Dict[str, str]],
+ temperature: Optional[float] = 0.7,
+ n: int = 1,
+ max_tokens: Optional[int] = None,
+ stop: Optional[str] = None,
+ timeout: Optional[float] = None,
+ ) -> ChatCompletionResponse:
+ """Creates a new chat completion for the provided messages and parameters."""
+ request = ChatCompletionRequest(
+ model=model,
+ messages=messages,
+ temperature=temperature,
+ n=n,
+ max_tokens=max_tokens,
+ stop=stop,
+ )
+ client = ChatCompletionClient(_BASE_URL)
+ response = await client.request_completion(request, timeout=timeout)
+ return response
diff --git a/model/fastchat/client/test_client.py b/model/fastchat/client/test_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..a04197532d40d663bec029589dfce08d717f3048
--- /dev/null
+++ b/model/fastchat/client/test_client.py
@@ -0,0 +1,17 @@
+from fastchat import client
+
+completion = client.ChatCompletion.create(
+ model="vicuna-7b-v1.1",
+ messages=[
+ {"role": "user", "content": "Hello!"},
+ {"role": "assistant", "content": "Hello! How can I help you today?"},
+ {"role": "user", "content": "What's your favorite food?"},
+ {
+ "role": "assistant",
+ "content": "As an AI language model, I don't have personal preferences or emotions. However, I can provide information about food. If you have any specific cuisine or dish in mind, I can tell you more about it.",
+ },
+ {"role": "user", "content": "What's your recommendation?"},
+ ],
+)
+
+print(completion.choices[0].message)
diff --git a/model/fastchat/constants.py b/model/fastchat/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..70294c04e7086d3bd486ecb7675c263cef5b9d07
--- /dev/null
+++ b/model/fastchat/constants.py
@@ -0,0 +1,4 @@
+CONTROLLER_HEART_BEAT_EXPIRATION = 90
+WORKER_HEART_BEAT_INTERVAL = 30
+
+LOGDIR = "."
diff --git a/model/fastchat/conversation.py b/model/fastchat/conversation.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d5555dfe30df5c0193ffd7edf0a0e03f51b78ed
--- /dev/null
+++ b/model/fastchat/conversation.py
@@ -0,0 +1,289 @@
+"""
+Conversation prompt template.
+
+Now we support
+- Vicuna
+- Koala
+- OpenAssistant/oasst-sft-1-pythia-12b
+- StabilityAI/stablelm-tuned-alpha-7b
+- databricks/dolly-v2-12b
+- THUDM/chatglm-6b
+- project-baize/baize-lora-7B
+- Alpaca/LLaMa
+"""
+
+import dataclasses
+from enum import auto, Enum
+from typing import List, Tuple, Any
+
+
+class SeparatorStyle(Enum):
+ """Different separator style."""
+
+ SINGLE = auto()
+ TWO = auto()
+ DOLLY = auto()
+ OASST_PYTHIA = auto()
+ BAIZE = auto()
+
+
+@dataclasses.dataclass
+class Conversation:
+ """A class that keeps all conversation history."""
+
+ system: str
+ roles: List[str]
+ messages: List[List[str]]
+ offset: int
+ sep_style: SeparatorStyle = SeparatorStyle.SINGLE
+ sep: str = "###"
+ sep2: str = None
+
+ # Used for gradio server
+ skip_next: bool = False
+ conv_id: Any = None
+
+ def get_prompt(self):
+ if self.sep_style == SeparatorStyle.SINGLE:
+ ret = self.system
+ for role, message in self.messages:
+ if message:
+ ret += self.sep + " " + role + ": " + message
+ else:
+ ret += self.sep + " " + role + ":"
+ return ret
+ elif self.sep_style == SeparatorStyle.TWO:
+ seps = [self.sep, self.sep2]
+ ret = self.system + seps[0]
+ for i, (role, message) in enumerate(self.messages):
+ if message:
+ ret += role + ": " + message + seps[i % 2]
+ else:
+ ret += role + ":"
+ return ret
+ elif self.sep_style == SeparatorStyle.DOLLY:
+ seps = [self.sep, self.sep2]
+ ret = self.system
+ for i, (role, message) in enumerate(self.messages):
+ if message:
+ ret += role + ":\n" + message + seps[i % 2]
+ if i % 2 == 1:
+ ret += "\n\n"
+ else:
+ ret += role + ":\n"
+ return ret
+ elif self.sep_style == SeparatorStyle.OASST_PYTHIA:
+ ret = self.system
+ for role, message in self.messages:
+ if message:
+ ret += role + message + self.sep
+ else:
+ ret += role
+ return ret
+ elif self.sep_style == SeparatorStyle.BAIZE:
+ ret = self.system
+ for role, message in self.messages:
+ if message:
+ ret += "\n" + role + message
+ else:
+ ret += "\n" + role
+ return ret
+ else:
+ raise ValueError(f"Invalid style: {self.sep_style}")
+
+ def append_message(self, role, message):
+ self.messages.append([role, message])
+
+ def to_gradio_chatbot(self):
+ ret = []
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
+ if i % 2 == 0:
+ ret.append([msg, None])
+ else:
+ ret[-1][-1] = msg
+ return ret
+
+ def copy(self):
+ return Conversation(
+ system=self.system,
+ roles=self.roles,
+ messages=[[x, y] for x, y in self.messages],
+ offset=self.offset,
+ sep_style=self.sep_style,
+ sep=self.sep,
+ sep2=self.sep2,
+ conv_id=self.conv_id,
+ )
+
+ def dict(self):
+ return {
+ "system": self.system,
+ "roles": self.roles,
+ "messages": self.messages,
+ "offset": self.offset,
+ "sep": self.sep,
+ "sep2": self.sep2,
+ "conv_id": self.conv_id,
+ }
+
+
+conv_one_shot = Conversation(
+ system="A chat between a curious human and an artificial intelligence assistant. "
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
+ roles=("Human", "Assistant"),
+ messages=(
+ (
+ "Human",
+ "What are the key differences between renewable and non-renewable energy sources?",
+ ),
+ (
+ "Assistant",
+ "Renewable energy sources are those that can be replenished naturally in a relatively "
+ "short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
+ "Non-renewable energy sources, on the other hand, are finite and will eventually be "
+ "depleted, such as coal, oil, and natural gas. Here are some key differences between "
+ "renewable and non-renewable energy sources:\n"
+ "1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
+ "energy sources are finite and will eventually run out.\n"
+ "2. Environmental impact: Renewable energy sources have a much lower environmental impact "
+ "than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
+ "and other negative effects.\n"
+ "3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
+ "have lower operational costs than non-renewable sources.\n"
+ "4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
+ "locations than non-renewable sources.\n"
+ "5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
+ "situations and needs, while non-renewable sources are more rigid and inflexible.\n"
+ "6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
+ "non-renewable sources are not, and their depletion can lead to economic and social instability.",
+ ),
+ ),
+ offset=2,
+ sep_style=SeparatorStyle.SINGLE,
+ sep="###",
+)
+
+
+conv_vicuna_v1_1 = Conversation(
+ system="A chat between a curious user and an artificial intelligence assistant. "
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
+ roles=("USER", "ASSISTANT"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.TWO,
+ sep=" ",
+ sep2="",
+)
+
+
+conv_koala_v1 = Conversation(
+ system="BEGINNING OF CONVERSATION:",
+ roles=("USER", "GPT"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.TWO,
+ sep=" ",
+ sep2="",
+)
+
+conv_dolly = Conversation(
+ system="Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n",
+ roles=("### Instruction", "### Response"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.DOLLY,
+ sep="\n\n",
+ sep2="### End",
+)
+
+conv_oasst = Conversation(
+ system="",
+ roles=("<|prompter|>", "<|assistant|>"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.OASST_PYTHIA,
+ sep="<|endoftext|>",
+)
+
+conv_stablelm = Conversation(
+ system="""<|SYSTEM|># StableLM Tuned (Alpha version)
+- StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
+- StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
+- StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
+- StableLM will refuse to participate in anything that could harm a human.
+""",
+ roles=("<|USER|>", "<|ASSISTANT|>"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.OASST_PYTHIA,
+ sep="",
+)
+
+conv_baize = Conversation(
+ system="The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.",
+ roles=("[|Human|]", "[|AI|]"),
+ messages=(
+ ("[|Human|]", "Hello!"),
+ ("[|AI|]", "Hi!"),
+ ),
+ offset=2,
+ sep_style=SeparatorStyle.BAIZE,
+ sep="[|Human|]",
+)
+
+
+conv_templates = {
+ "conv_one_shot": conv_one_shot,
+ "vicuna_v1.1": conv_vicuna_v1_1,
+ "koala_v1": conv_koala_v1,
+ "dolly": conv_dolly,
+ "oasst": conv_oasst,
+ "baize": conv_baize,
+}
+
+
+def get_default_conv_template(model_name):
+ model_name = model_name.lower()
+ if "vicuna" in model_name or "output" in model_name:
+ return conv_vicuna_v1_1
+ elif "koala" in model_name:
+ return conv_koala_v1
+ elif "dolly-v2" in model_name:
+ return conv_dolly
+ elif "oasst" in model_name and "pythia" in model_name:
+ return conv_oasst
+ elif "baize" in model_name:
+ return conv_baize
+ elif "stablelm" in model_name:
+ return conv_stablelm
+ return conv_one_shot
+
+
+def compute_skip_echo_len(model_name, conv, prompt):
+ model_name = model_name.lower()
+ if "chatglm" in model_name:
+ skip_echo_len = len(conv.messages[-2][1]) + 1
+ elif "dolly-v2" in model_name:
+ special_toks = ["### Instruction:", "### Response:", "### End"]
+ skip_echo_len = len(prompt)
+ for tok in special_toks:
+ skip_echo_len -= prompt.count(tok) * len(tok)
+ elif "oasst" in model_name and "pythia" in model_name:
+ special_toks = ["<|prompter|>", "<|assistant|>", "<|endoftext|>"]
+ skip_echo_len = len(prompt)
+ for tok in special_toks:
+ skip_echo_len -= prompt.count(tok) * len(tok)
+ elif "stablelm" in model_name:
+ special_toks = ["<|SYSTEM|>", "<|USER|>", "<|ASSISTANT|>"]
+ skip_echo_len = len(prompt)
+ for tok in special_toks:
+ skip_echo_len -= prompt.count(tok) * len(tok)
+ elif "baize" in model_name:
+ skip_echo_len = len(prompt)
+ else:
+ skip_echo_len = len(prompt) + 1 - prompt.count("") * 3
+ return skip_echo_len
+
+
+if __name__ == "__main__":
+ print(default_conversation.get_prompt())
diff --git a/model/fastchat/data/__init__.py b/model/fastchat/data/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/model/fastchat/data/alpaca-converter.py b/model/fastchat/data/alpaca-converter.py
new file mode 100644
index 0000000000000000000000000000000000000000..392ed2c2beaae92ce0464aecac6c254ffee53300
--- /dev/null
+++ b/model/fastchat/data/alpaca-converter.py
@@ -0,0 +1,67 @@
+import argparse
+import json
+import pathlib
+
+# Prompt from stanford alpaca's training script
+PROMPT_DICT = {
+ "prompt_input": (
+ "Below is an instruction that describes a task, paired with an input that provides further context. "
+ "Write a response that appropriately completes the request.\n\n"
+ "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:"
+ ),
+ "prompt_no_input": (
+ "Below is an instruction that describes a task. "
+ "Write a response that appropriately completes the request.\n\n"
+ "### Instruction:\n{instruction}\n\n### Response:"
+ ),
+}
+
+
+def main(args):
+ data_path = pathlib.Path(args.data_path)
+ with data_path.open() as f:
+ data = json.load(f)
+
+ prompt_input, prompt_no_input = (
+ PROMPT_DICT["prompt_input"],
+ PROMPT_DICT["prompt_no_input"],
+ )
+ sources = [
+ prompt_input.format_map(example)
+ if example.get("input", "") != ""
+ else prompt_no_input.format_map(example)
+ for example in data
+ ]
+ targets = [example["output"] for example in data]
+
+ new_data = []
+ cnt = 1
+ for s, t in zip(sources, targets):
+ new_data.append(
+ {
+ "id": str(cnt),
+ "conversations": [
+ {
+ "from": "human",
+ "value": s,
+ },
+ {
+ "from": "gpt",
+ "value": t,
+ },
+ ],
+ }
+ )
+ cnt += 1
+
+ json.dump(new_data, open(args.output_path, "w"), indent=2)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--data_path", type=str, default="alpaca-data.json")
+ parser.add_argument(
+ "--output_path", type=str, default="alpaca-data-conversation.json"
+ )
+ args = parser.parse_args()
+ main(args)
diff --git a/model/fastchat/data/clean_sharegpt.py b/model/fastchat/data/clean_sharegpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..34743f174bc5c2c40a761baaee42c724d5925f14
--- /dev/null
+++ b/model/fastchat/data/clean_sharegpt.py
@@ -0,0 +1,190 @@
+"""
+- Convert html to markdown with basic data cleaning.
+- Deduplication.
+
+Usage:
+python3 -m fastchat.data.clean_sharegpt --in sharegpt_html.json --out sharegpt_clean.json
+"""
+import argparse
+import json
+import logging
+import re
+from typing import Dict, Union
+
+import bs4
+import markdownify # == 0.11.6
+import tqdm
+
+
+div_pattern = re.compile("")
+span_pattern = re.compile("")
+code_lang_pattern = re.compile(
+ "```\s*" + "(.*?)" + "(?:Copy code)+" + "(.+?)" + "\s*?```", re.DOTALL
+)
+code_lang_format = "```\g<1>\n\g<2>\n```"
+regenerate_pattern = re.compile("\d+ / \d+")
+copy_chars_pattern = re.compile("Copy\d+ chars / \d+ words")
+copy_code_pattern = re.compile("```(.*?)Copy code\s*```")
+
+
+def reformat_code(val: str) -> str:
+ # Input code format is:
+ # ```
+ # $Copy code$
+ #
+ # ```
+ # This function convert it into the correct markdown format
+ return re.sub(code_lang_pattern, code_lang_format, val)
+
+
+def html_to_markdown(val: str) -> str:
+ # Remove all . This is required to make intent work in code blocks.
+ val = re.sub(div_pattern, "", val)
+ # Remove all
. This is required to make underscores work in code blocks.
+ val = re.sub(span_pattern, "", val)
+ # Markdown to html
+ val = markdownify.markdownify(val).strip()
+ # Reformat code
+ val = reformat_code(val)
+
+ # Remove noisy "[number] / [number]" at the beginning
+ noise = re.search(regenerate_pattern, val)
+ if noise and noise.start() == 0:
+ val = val[noise.end() :]
+ # Remove noisy "Copy[number] chars / [number] words"
+ val = re.sub(copy_chars_pattern, "", val)
+ # Remove empty code block ```\nCopy code\n```
+ val = re.sub(copy_code_pattern, "", val)
+
+ # Strip
+ val = val.replace("\n\n\n", "\n").strip()
+
+ if args.debug:
+ print(val)
+ exit()
+
+ return val
+
+
+def should_filter(val: str) -> bool:
+ black_list = ["openai", "chatgpt"]
+ for w in black_list:
+ if w in val.lower():
+ return True
+ return False
+
+
+def clean_html_source(content, begin, end, check_tag, check_num):
+ """
+ Clean the input json content.
+
+ Args:
+ content: json file loaded in memory.
+ check_tag: a debug purpose arg. If a conversation contains the tag, log
+ it before and after cleaning.
+ check_num: number of matched conversations logged.
+ """
+ BARRIER = "\n" + "=" * 20 + "\n"
+ cnt_skip = 0
+ cnt_too_short = 0
+ cnt_id_duplication = 0
+ cnt_value_duplication = 0
+ cnt_filter = 0
+ cnt_tag = 0
+ visited = {}
+
+ content = content[begin:end]
+ new_content = []
+
+ for sample in tqdm.tqdm(content):
+ skipped = False
+ cid = sample["id"]
+
+ if len(sample["conversations"]) <= 1:
+ print(f"id {cid} is too short")
+ cnt_too_short += 1
+ skipped = True
+ elif cid in visited:
+ print(f"id {cid} is an id duplication of {visited[cid]}")
+ cnt_id_duplication += 1
+ skipped = True
+ elif (
+ sample["conversations"][1]["value"],
+ len(sample["conversations"]),
+ ) in visited:
+ key = (sample["conversations"][1]["value"], len(sample["conversations"]))
+ print(f"id {cid} is a value duplication of {visited[key]}")
+ cnt_value_duplication += 1
+ skipped = True
+ else:
+ key = (sample["conversations"][1]["value"], len(sample["conversations"]))
+ visited[cid] = visited[key] = cid
+
+ for c in sample["conversations"]:
+ if should_filter(c["value"]):
+ print(f"id {cid} is filtered out")
+ cnt_filter += 1
+ skipped = True
+ break
+
+ try:
+ new_val = html_to_markdown(c["value"])
+ except (bs4.builder.ParserRejectedMarkup, AssertionError):
+ skipped = True
+ break
+
+ c["value"] = new_val
+
+ # Debug
+ if (
+ check_tag is not None
+ and check_tag in c["value"]
+ and cnt_tag < check_num
+ ):
+ logging.debug(
+ BARRIER
+ + c["value"]
+ + "\n"
+ + BARRIER
+ + new_val
+ + "\n"
+ + BARRIER
+ + "\n"
+ )
+ cnt_tag += 1
+ if cnt_tag == check_num:
+ break
+
+ if not skipped:
+ new_content.append(sample)
+ else:
+ cnt_skip += 1
+
+ print(
+ f"total: {len(content)}, skip: {cnt_skip}, new: {len(new_content)}, "
+ f"cnt_too_short: {cnt_too_short}, cnt_id_duplication: {cnt_id_duplication}, "
+ f"cnt_value_duplication: {cnt_value_duplication}, cnt_filter: {cnt_filter}"
+ )
+
+ return new_content
+
+
+def main(args):
+ content = json.load(open(args["in_file"], "r"))
+ content = clean_html_source(
+ content, args["begin"], args["end"], args["check_tag"], args["check_num"]
+ )
+ json.dump(content, open(args["out_file"], "w"), indent=2)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str, default="sharegpt_clean.json")
+ parser.add_argument("--begin", type=int)
+ parser.add_argument("--end", type=int)
+ parser.add_argument("--debug", action="store_true")
+ parser.add_argument("--check-tag", type=str)
+ parser.add_argument("--check-num", type=int, default=1)
+ args = parser.parse_args()
+ main(vars(args))
diff --git a/model/fastchat/data/hardcoded_questions.py b/model/fastchat/data/hardcoded_questions.py
new file mode 100644
index 0000000000000000000000000000000000000000..0bded3948693ad70a91666c447ee716c4ebac240
--- /dev/null
+++ b/model/fastchat/data/hardcoded_questions.py
@@ -0,0 +1,161 @@
+import json
+
+
+def identity_questions():
+ """ "
+ Adopted from https://github.com/young-geng/koala_data_pipeline/blob/main/process_hard_coded_data.py
+ """
+ content = []
+
+ name = "Vicuna"
+ org = "Large Model Systems Organization (LMSYS)"
+
+ def generate_conversations(questions, answers):
+ for q in questions:
+ for a in answers:
+ content.append(
+ {
+ "id": f"identity_{len(content)}",
+ "conversations": [
+ {"from": "human", "value": q},
+ {"from": "gpt", "value": a},
+ ],
+ }
+ )
+
+ questions = [
+ "Who are you?",
+ "What is your name?",
+ "What's your name?",
+ "What are you called?",
+ "What are you?",
+ "Tell me your name.",
+ "Tell me about yourself.",
+ "Tell me about you.",
+ "Tell me who you are.",
+ ]
+ answers = [
+ f"I am {name}, a language model trained by researchers from {org}.",
+ f"My name is {name}, and I'm a language model developed by {org}.",
+ f"You can call me {name}, and I was trained by {org} researchers as a language model.",
+ f"As a language model, I go by the name {name} and was trained by researchers from {org}.",
+ f"I'm a language model called {name}, and I was trained by {org} researchers.",
+ ]
+ generate_conversations(questions, answers)
+
+ questions = [
+ "Who created you?",
+ "Who made you?",
+ "Who built you?",
+ "Who programmed you?",
+ "Who trained you?",
+ "Who taught you?",
+ "Who developed you?",
+ ]
+ answers = [
+ f"Researchers from {org} created me.",
+ f"I'm created by {org}.",
+ f"I'm built by researchers from {org}.",
+ f"I am a language model trained by researchers from {org}.",
+ f"I'm a language model developed by {org}.",
+ f"I'm a language model created by researchers from {org}.",
+ f"My creators are researchers from {org}.",
+ ]
+ generate_conversations(questions, answers)
+
+ questions = [
+ "Are you ChatGPT?",
+ "Are you GPT-2?",
+ "Are you GPT-3?",
+ "Are you GPT-4?",
+ "Are you davinci?",
+ "Are you davinci-001?",
+ "Are you davinci-002?",
+ "Are you davinci-003?",
+ "Are you curie?",
+ "Are you based on ChatGPT?",
+ "Are you based on GPT-2?",
+ "Are you based on GPT-3?",
+ "Are you based on GPT-4?",
+ "Are you based on davinci?",
+ "Are you based on davinci-001?",
+ "Are you based on davinci-002?",
+ "Are you based on davinci-003?",
+ "Are you based on curie?",
+ "Are you trained by OpenAI?",
+ "Are you trained by Google?",
+ "Are you trained by Microsoft?",
+ "Are you trained by Meta?",
+ "Are you trained by IBM?",
+ "Do you call OpenAI APIs?",
+ "Do you call Google APIs?",
+ "Do you call Microsoft APIs?",
+ "Do you call Meta APIs?",
+ "Do you call IBM APIs?",
+ "Are you created by OpenAI?",
+ "Are you created by Google?",
+ "Are you created by Microsoft?",
+ "Are you created by Meta?",
+ "Are you created by IBM?",
+ "Are you developed by OpenAI?",
+ "Are you developed by Google?",
+ "Are you developed by Microsoft?",
+ "Are you developed by Meta?",
+ "Are you developed by IBM?",
+ "Are you trained on OpenAI data?",
+ "Are you trained on Google data?",
+ "Are you trained on Microsoft data?",
+ "Are you trained on Meta data?",
+ "Are you trained on IBM data?",
+ "Are you trained with OpenAI data?",
+ "Are you trained with Google data?",
+ "Are you trained with Microsoft data?",
+ "Are you trained with Meta data?",
+ "Are you trained with IBM data?",
+ "Have you been trained with OpenAI data?",
+ "Have you been trained with Google data?",
+ "Have you been trained with Microsoft data?",
+ "Have you been trained with Meta data?",
+ "Have you been trained with IBM data?",
+ "Are you finetuned on OpenAI data?",
+ "Are you finetuned on Google data?",
+ "Are you finetuned on Microsoft data?",
+ "Are you finetuned on Meta data?",
+ "Are you finetuned on IBM data?",
+ "Are you finetuned with OpenAI data?",
+ "Are you finetuned with Google data?",
+ "Are you finetuned with Microsoft data?",
+ "Are you finetuned with Meta data?",
+ "Are you finetuned with IBM data?",
+ "Have you been finetuned with OpenAI data?",
+ "Have you been finetuned with Google data?",
+ "Have you been finetuned with Microsoft data?",
+ "Have you been finetuned with Meta data?",
+ "Have you been finetuned with IBM data?",
+ ]
+ answers = [
+ f"No, I am a language model trained by researchers from {org}.",
+ f"No, I am a language model developed by researchers from {org}.",
+ f"No, I am a language model created by researchers from {org}.",
+ f"No, I am trained by researchers from {org}.",
+ f"No, I am developed by researchers from {org}.",
+ f"No, I am created by researchers from {org}.",
+ f"No, I'm a language model trained by researchers from {org}.",
+ f"No, I'm a language model developed by researchers from {org}.",
+ f"No, I'm a language model created by researchers from {org}.",
+ f"No, I'm trained by researchers from {org}.",
+ f"No, I'm developed by researchers from {org}.",
+ f"No, I'm created by researchers from {org}.",
+ ]
+ generate_conversations(questions, answers)
+
+ return content
+
+
+if __name__ == "__main__":
+ out_file = "hardcoded.json"
+
+ content = []
+ content.extend(identity_questions())
+
+ json.dump(content, open(out_file, "w"), indent=2)
diff --git a/model/fastchat/data/inspect.py b/model/fastchat/data/inspect.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f63eb0bc3c29efa185306cef247b9cbd6afdfe2
--- /dev/null
+++ b/model/fastchat/data/inspect.py
@@ -0,0 +1,23 @@
+"""
+Usage:
+python3 -m fastchat.data.inspect --in sharegpt_20230322_clean_lang_split.json
+"""
+import argparse
+import json
+
+import tqdm
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--begin", type=int)
+ args = parser.parse_args()
+
+ content = json.load(open(args.in_file, "r"))
+ for sample in tqdm.tqdm(content[args.begin :]):
+ print(f"id: {sample['id']}")
+ for conv in sample["conversations"]:
+ print(conv["from"] + ": ")
+ print(conv["value"])
+ input()
diff --git a/model/fastchat/data/merge.py b/model/fastchat/data/merge.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea5b8a93b3872f20e8a285465709c09549b76b89
--- /dev/null
+++ b/model/fastchat/data/merge.py
@@ -0,0 +1,23 @@
+"""
+Merge two conversation files into one
+
+Usage: python3 -m fastchat.data.merge --in file1.json file2.json --out merged.json
+"""
+
+import argparse
+import json
+from typing import Dict, Sequence, Optional
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True, nargs="+")
+ parser.add_argument("--out-file", type=str, default="merged.json")
+ args = parser.parse_args()
+
+ new_content = []
+ for in_file in args.in_file:
+ content = json.load(open(in_file, "r"))
+ new_content.extend(content)
+
+ json.dump(new_content, open(args.out_file, "w"), indent=2)
diff --git a/model/fastchat/data/optional_clean.py b/model/fastchat/data/optional_clean.py
new file mode 100644
index 0000000000000000000000000000000000000000..518c4d7b6e4ab4f615df0cce5dfd7d76c3f1f12a
--- /dev/null
+++ b/model/fastchat/data/optional_clean.py
@@ -0,0 +1,90 @@
+"""
+Do optional cleaning (e.g., remove some languages).
+
+Usage:
+python3 -m fastchat.data.optional_clean --in input.json --out output.json --keep-lang en
+python3 -m fastchat.data.optional_clean --in input.json --out output.json --skip-lang en
+
+Requirement:
+pip3 install polyglot icu pyicu pycld2 morfessor
+"""
+import argparse
+import json
+import re
+
+import polyglot
+from polyglot.detect import Detector
+import pycld2
+from tqdm import tqdm
+
+
+def skip(conv, args):
+ # Remove certain languages
+ if args.keep_lang != "all" or args.skip_lang is not None:
+ text = "\n".join([x["value"] for x in conv["conversations"]])
+ try:
+ lang_code = Detector(text).language.code
+ except (pycld2.error, polyglot.detect.base.UnknownLanguage):
+ lang_code = "unknown"
+
+ if args.keep_lang != "all" and lang_code != args.keep_lang:
+ return True
+
+ if lang_code == args.skip_lang:
+ return True
+
+ # Remove repetitive numbers
+ if args.reduce_rep:
+ for sentence in conv["conversations"]:
+ val = sentence["value"]
+ sub = re.search(r"(\d)\1{8}", val)
+ if sub is not None:
+ return True
+
+ return False
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str)
+ parser.add_argument(
+ "--keep-lang",
+ type=str,
+ default="all",
+ choices=["all", "en"],
+ help="Only keep certain langauges.",
+ )
+ parser.add_argument("--skip-lang", type=str, help="Skip a specific language.")
+ # NOTE: Be careful about reduce_rep which may remove some good data.
+ # For example, addresses could have long consecutive 0's
+ parser.add_argument("--reduce-rep", action="store_true")
+ args = parser.parse_args()
+
+ in_file = args.in_file
+ out_file = args.out_file
+ keep_lang = args.keep_lang
+ skip_lang = args.skip_lang
+ reduce_rep = args.reduce_rep
+ assert keep_lang == "all" or skip_lang is None
+
+ if out_file is None:
+ out_file = "sharegpt_clean"
+ if keep_lang != "all":
+ out_file += "_" + keep_lang
+ if skip_lang is not None:
+ out_file += "_skip_" + skip_lang
+ if reduce_rep:
+ out_file += "_reduce_rep"
+ out_file += ".json"
+
+ content = json.load(open(in_file, "r"))
+ num_conv = len(content)
+
+ new_content = []
+ for conv in tqdm(content):
+ if not skip(conv, args):
+ new_content.append(conv)
+
+ print(f"return {len(new_content)} out of {len(content)}, start dump ...")
+ json.dump(new_content, open(out_file, "w"), indent=2)
diff --git a/model/fastchat/data/pretty_json.py b/model/fastchat/data/pretty_json.py
new file mode 100644
index 0000000000000000000000000000000000000000..426fadc2dd83675840488d85c64093ed4983ecf6
--- /dev/null
+++ b/model/fastchat/data/pretty_json.py
@@ -0,0 +1,20 @@
+"""
+Usage:
+python3 pretty_json.py --in in.json --out out.json
+"""
+
+import argparse
+import json
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str, required=True)
+ args = parser.parse_args()
+
+ with open(args.in_file, "r") as fin:
+ data = json.load(fin)
+
+ with open(args.out_file, "w") as fout:
+ json.dump(data, fout, indent=2)
diff --git a/model/fastchat/data/sample.py b/model/fastchat/data/sample.py
new file mode 100644
index 0000000000000000000000000000000000000000..b53df6a67d575e8a6e91261d5468dee193292eb2
--- /dev/null
+++ b/model/fastchat/data/sample.py
@@ -0,0 +1,33 @@
+"""
+Sample some conversations from a file.
+
+Usage: python3 -m fastchat.data.sample --in sharegpt.json --out sampled.json
+"""
+import argparse
+import json
+from typing import Dict, Sequence, Optional
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str, default="sampled.json")
+ parser.add_argument("--begin", type=int, default=0)
+ parser.add_argument("--end", type=int, default=100)
+ parser.add_argument("--max-length", type=int, default=128)
+ args = parser.parse_args()
+
+ content = json.load(open(args.in_file, "r"))
+ new_content = []
+ for i in range(args.begin, args.end):
+ sample = content[i]
+ concat = ""
+ for s in sample["conversations"]:
+ concat += s["value"]
+
+ if len(concat) > args.max_length:
+ continue
+
+ new_content.append(sample)
+
+ json.dump(new_content, open(args.out_file, "w"), indent=2)
diff --git a/model/fastchat/data/split_long_conversation.py b/model/fastchat/data/split_long_conversation.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b0d7e27fe745b958e39b319bbb1813d8d59b7e2
--- /dev/null
+++ b/model/fastchat/data/split_long_conversation.py
@@ -0,0 +1,105 @@
+"""
+Split long conversations based on certain max length.
+
+Usage: python3 -m fastchat.data.split_long_conversation \
+ --in sharegpt_clean.json \
+ --out sharegpt_split.json \
+ --model-name-or-path $
+"""
+import argparse
+import json
+from typing import Dict, Sequence, Optional
+
+import transformers
+import tqdm
+
+from fastchat import conversation as conversation_lib
+
+
+def split_sample(sample, start_idx, end_idx):
+ assert (end_idx - start_idx) % 2 == 0
+ return {
+ "id": sample["id"] + "_" + str(start_idx),
+ "conversations": sample["conversations"][start_idx:end_idx],
+ }
+
+
+def split_contents(content, begin, end, tokenizer, max_length):
+ """
+ Keep the maximum round of conversations within the max token length constraint
+ """
+ content = content[begin:end]
+ new_content = []
+
+ for sample in tqdm.tqdm(content):
+ tokenized_lens = []
+ conversations = sample["conversations"]
+ conversations = conversations[: len(conversations) // 2 * 2]
+ for c in conversations:
+ length = len(tokenizer(c["value"]).input_ids) + 5
+ tokenized_lens.append(length)
+
+ start_idx = 0
+ cur_len = 0
+ sample
+ assert len(conversations) % 2 == 0, f"id: {sample['id']}"
+ for i in range(0, len(conversations), 2):
+ tmp_len = tokenized_lens[i] + tokenized_lens[i + 1]
+ if cur_len + tmp_len > max_length:
+ new_content.append(split_sample(sample, start_idx, i))
+ start_idx = i
+ cur_len = 0
+ elif i == len(conversations) - 2:
+ new_content.append(split_sample(sample, start_idx, i + 2))
+
+ cur_len += tmp_len
+
+ return new_content
+
+
+def filter_invalid_roles(content):
+ new_content = []
+ for i, c in enumerate(content):
+ roles = ["human", "gpt"]
+ if len(c["conversations"]) <= 0:
+ continue
+
+ valid = True
+ for j, s in enumerate(c["conversations"]):
+ if s["from"] != roles[j % 2]:
+ valid = False
+ break
+
+ if valid:
+ new_content.append(c)
+
+ return new_content
+
+
+def main(args):
+ content = json.load(open(args.in_file, "r"))
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ args.model_name_or_path,
+ model_max_length=args.max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ new_content = split_contents(
+ content, args.begin, args.end, tokenizer, args.max_length
+ )
+ new_content = filter_invalid_roles(new_content)
+
+ print(f"total: {len(content)}, new: {len(new_content)}")
+ json.dump(new_content, open(args.out_file, "w"), indent=2)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str, default="sharegpt_split.json")
+ parser.add_argument("--begin", type=int)
+ parser.add_argument("--end", type=int)
+ parser.add_argument("--model-name-or-path", type=str, required=True)
+ parser.add_argument("--max-length", type=int, default=2048)
+ args = parser.parse_args()
+ main(args)
diff --git a/model/fastchat/eval/README.md b/model/fastchat/eval/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..403c9acf2570647b1a1a887967cd639289605d3d
--- /dev/null
+++ b/model/fastchat/eval/README.md
@@ -0,0 +1,187 @@
+# Evaluations
+
+This directory contains end-to-end pipelines for AI-enhanced evaluation. We will introduce the evaluation pipeline and the data format in this document.
+
+## Generate Answers
+
+### ChatGPT (gpt-3.5-turbo)
+
+Make sure you have setup the OpenAI API Key in your environment. Then run:
+
+```bash
+python qa_baseline_gpt35.py --question table/question.jsonl --output table/answer/answer_gpt35.jsonl
+```
+
+### Bard
+
+Unfortunately, Bard has not release its public APIs till now. You may have to enter the anwsers manually. Or you could find a third-party project that interfaces with Bard.
+
+### Vicuna and others
+
+To generate answers with Vicuna or other models, specify path to the model checkpoint, a desired model ID and run:
+```bash
+python get_model_answer.py --model-id [MODEL-ID] --model-path /model/path --question-file table/question.jsonl --answer-file table/answer/answer.jsonl --num-gpus [NUM-GPUS]
+```
+Then the answers to the questions will be saved in `table/answer/answer.jsonl`.
+Note: we assume the model can be loaded with a single GPU.
+
+## Evaluate Answers Automatically
+
+### Generete Reviews with GPT-4
+
+Note: Below script requires access to GPT-4 API. If you only have access to GPT-4 on web interface, you can evaluate the answers by manually formatting the prompt. See more details in the **Reviewers** and **Prompts** sections in **Data Format**.
+It is critical to follow the prompt templates; otherwise GPT-4 may not give fair reviews. `table/review/*.jsonl` are some review examples generated by GPT-4 or you can view them on our eval [webpage](https://vicuna.lmsys.org/eval/).
+
+To use the script for generating reviews with GPT-4, you need to `export` your OpenAI API key in environment variable. Then run:
+```bash
+python eval_gpt_review.py -q table/question.jsonl -a /path/to/answer_1.jsonl /path/to/answer_2.jsonl -p table/prompt.jsonl -r table/reviewer.jsonl -o /path/to/review_output.jsonl
+```
+The GPT-4 reviews will be saved in `/path/to/review_output.jsonl`. Note: we implement some simple parsing code to extract the score pairs from GPT-4's reviews. However, you need to double check whether the parsed score pair are correct. Sometime the parsing logic may fail if GPT-4 doesn't give a structured answer.
+
+## Visualize Results
+
+You can generate the data for the webpage by running:
+
+```bash
+python eval/generate_webpage_data_from_table.py
+```
+
+Then you can serve a static website in `webpage` to see the results.
+
+## Data Format
+
+If you want to have a deeper understanding of our evaluation pipeline or want to contribute to the evaluation process, you need to learn the data format we used for evaluation.
+
+Our evaluation data are encoded with [JSON Lines](https://jsonlines.org/).
+
+### Random ID Generation
+
+We use the `shortuuid` Python library for generating short random UUIDs.
+
+```python
+import shortuuid
+shortuuid.uuid() -> str
+```
+
+### Models
+
+`model.jsonl` contains model information we used for generating anwsers.
+
+Each row contains a record of a model with the following field:
+
+* `model_id` (str): A unique ID for a model. Models with different IDs is supposed to have different performance. This ID is generated by `{model_name}:{model_version}`.
+* `model_name` (str): The name of a model. This is not unique, because a model could be trained and updated continuously, but it is still considered as the same model with different versions.
+* `model_version` (str): The version of a model.
+* `model_metadata` (Any): Any metadata of a model (descriptions etc). This is optional.
+
+For example:
+
+```json
+{
+ "model_id": "vicuna-13b:v1",
+ "model_name": "vicuna-13b",
+ "model_version": "v1",
+ "model_metadata": "learning rate 1e-5, 3 epochs, 13b"
+}
+```
+
+### Prompts
+
+We store prompts in `prompt.jsonl`. Each row contains a record of a prompt with the following field:
+
+* `prompt_id` (int): A unique integer ID for a prompt. Prompts with different IDs are supposed to have different purpose.
+* `system_prompt` (str): The system prompt given to a model. This is the prompt that the model sees first.
+* `prompt_template` (str): The prompt body. This is the user prompt that the model sees after the system prompt. It is a Python f-string template, so that we can fill in the inputs later.
+* `defaults` (dict): A dictionary of default values for the prompt template. It can be empty.
+* `description` (str): A description of the functionality of the prompt.
+
+For example:
+
+```json
+{
+ "prompt_id": 1,
+ "system_prompt": "You are a helpful assistant.",
+ "prompt_template": "[Question]\n{question}\n\n[Assistant 1]\n{answer_1}\n\n[End of Assistant 1]\n\n[Assistant 2]\n{answer_2}\n\n[End of Assistant 2]\n\n[System]\n{prompt}\n\n",
+ "defaults": {"prompt": "Which assistant is more helpful?"},
+ "description": "Compare two assistants' answers to a question."
+}
+```
+
+### Reviewers
+
+`reviewer.jsonl` contains reviewer information we used for reviewing answers generated by different models. Each row contains a record of a reviewer with the following field:
+
+* `reviewer_id` (str): A unique ID for a reviewer. Reviewers with different IDs is supposed to have different reviewing performance.
+* `prompt_id` (str): The ID of the prompt given to the reviewer (e.g., an AI assistant). Different prompts could result in different reviewing performance.
+* `metadata` (dict): Metadata of a reviewer about its configurations.
+* `description` (str): A description of the reviewer.
+* `category` (str): The category that the reviewer belongs to.
+
+For example:
+
+```json
+{
+ "reviewer_id": "gpt-4-0328-default",
+ "prompt_id": 1,
+ "temperature": 0.2,
+ "max_tokens": 8192,
+ "description": "GPT-4 for general questions.",
+ "category": "general"
+}
+```
+
+### Questions
+
+`question.jsonl` contains questions we used for evaluation. Each row contains a record of a question with the following field:
+
+* `question_id` (int): A unique integer for a question. Questions with different IDs is supposed to be different.
+* `text` (str): The question text.
+* `category` (str): The category of the question. Questions with the same category are supposed to be similar or originate from the same source.
+
+### Answers
+
+`answer/xxx.jsonl` contains answers generated by different models. Each row contains a record of an answer with the following field:
+
+* `answer_id` (str): A unique UUID for an answer. Answers with different IDs is supposed to be different.
+* `question_id` (int): The ID of the question the answer is generated for.
+* `model_id` (str): The ID of the model the answer is generated by.
+* `text` (str): The answer text.
+* `metadata` (dict): Any metadata of the answer.
+
+Example:
+
+```json
+{
+ "answer_id": "[short uuid]",
+ "question_id": 1,
+ "model_id": "vicuna-13b:v1",
+ "text": "Here are five tips...",
+ "metadata": {}
+}
+```
+
+### Reviews
+
+`review/xxx.jsonl` contains reviews given by reviewers, comparing peformance between a pair of models. Each row contains a record of a review with the following field:
+
+* `review_id` (str): A unique UUID for a review. Reviews with different IDs is supposed to be different.
+* `question_id` (int): The ID of the question the review is given for.
+* `answer1_id` (str): The ID of the first answer.
+* `answer2_id` (str): The ID of the second answer.
+* `text` (str): The review text.
+* `score` (list): A list of scores given by the reviewer. The first score is for the first answer, and the second score is for the second answer.
+* `reviewer_id` (str): The ID of the reviewer.
+* `metadata` (dict): Any metadata of the review.
+
+```json
+{
+ "review_id": "[short uuid]",
+ "question_id": 1,
+ "answer1_id": "[answer1_id]",
+ "answer2_id": "[answer2_id]",
+ "text": "Assistant 2 is better...",
+ "score": [9.0, 7.5],
+ "reviewer_id": "gpt-4-0328-default",
+ "metadata": {}
+}
+```
diff --git a/model/fastchat/eval/eval_gpt_review.py b/model/fastchat/eval/eval_gpt_review.py
new file mode 100644
index 0000000000000000000000000000000000000000..890bca730a18a7f19eeb4f193c014154aeb1a0b3
--- /dev/null
+++ b/model/fastchat/eval/eval_gpt_review.py
@@ -0,0 +1,162 @@
+import argparse
+import json
+import os
+import time
+
+import openai
+import tqdm
+import ray
+
+import shortuuid
+import logging
+
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+MAX_API_RETRY = 5
+REQ_TIME_GAP = 10
+
+
+@ray.remote(num_cpus=4)
+def get_eval(sys_prompt, user_prompt: str, max_tokens: int):
+ logging.basicConfig(level=logging.INFO)
+ for i in range(MAX_API_RETRY):
+ try:
+ response = openai.ChatCompletion.create(
+ model="gpt-4",
+ messages=[
+ {"role": "system", "content": sys_prompt},
+ {
+ "role": "user",
+ "content": user_prompt,
+ },
+ ],
+ temperature=0.2, # TODO: figure out which temperature is best for evaluation
+ max_tokens=max_tokens,
+ )
+ content = response["choices"][0]["message"]["content"]
+ logger.info(content)
+ return content
+ except Exception as e:
+ logger.error(e)
+ time.sleep(5)
+ logger.error(f"Failed after {MAX_API_RETRY} retries.")
+ return "error"
+
+
+def parse_score(review):
+ try:
+ score_pair = review.split("\n")[0]
+ score_pair = score_pair.replace(",", " ")
+ sp = score_pair.split(" ")
+ if len(sp) == 2:
+ return [float(sp[0]), float(sp[1])]
+ else:
+ raise Exception("Invalid score pair.")
+ except Exception as e:
+ logger.error(
+ f"{e}\nContent: {review}\n" "You must manually fix the score pair."
+ )
+ return [-1, -1]
+
+
+def gen_prompt(reviewer_jsons, prompt_jsons, cat, ques, ans1, ans2):
+ # Default to general category (index=0)
+ reviewer_idx = 0
+ for idx, reviewer in enumerate(reviewer_jsons):
+ if reviewer["category"] == cat:
+ reviewer_idx = idx
+ break
+ prompt_id = reviewer_jsons[reviewer_idx]["prompt_id"]
+ prompt_json = prompt_jsons[prompt_id - 1]
+ assert prompt_json["prompt_id"] == prompt_id
+
+ sys_prompt = prompt_json["system_prompt"]
+ prompt_template = prompt_json["prompt_template"]
+ defaults = prompt_json["defaults"]
+ prompt = prompt_template.format(
+ question=ques, answer_1=ans1, answer_2=ans2, **defaults
+ )
+
+ return sys_prompt, prompt, reviewer_idx + 1
+
+
+def get_json_list(file_path):
+ file_path = os.path.expanduser(file_path)
+ with open(file_path, "r") as f:
+ json_list = []
+ for line in f:
+ json_list.append(json.loads(line))
+ return json_list
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="ChatGPT-based QA evaluation.")
+ parser.add_argument("-q", "--question-file")
+ parser.add_argument("-a", "--answer-file-list", nargs="+", default=[])
+ parser.add_argument("-p", "--prompt-file")
+ parser.add_argument("-r", "--reviewer-file")
+ parser.add_argument("-o", "--output-review-file")
+ parser.add_argument(
+ "--max-tokens",
+ type=int,
+ default=1024,
+ help="maximum number of tokens produced in the output",
+ )
+ args = parser.parse_args()
+
+ ray.init()
+
+ question_jsons = get_json_list(args.question_file)
+ answer1_jsons = get_json_list(args.answer_file_list[0])
+ answer2_jsons = get_json_list(args.answer_file_list[1])
+ reviewer_jsons = get_json_list(args.reviewer_file)
+ prompt_jsons = get_json_list(args.prompt_file)
+
+ # check if # of questions, answers are the same
+ assert len(question_jsons) == len(answer1_jsons) == len(answer2_jsons)
+
+ handles = []
+ review_jsons = []
+ total_len = len(question_jsons)
+ question_idx_list = list(range(total_len))
+
+ for i in question_idx_list:
+ assert (
+ answer1_jsons[i]["question_id"]
+ == question_jsons[i]["question_id"]
+ == answer2_jsons[i]["question_id"]
+ )
+
+ ques = question_jsons[i]["text"]
+ cat = question_jsons[i]["category"]
+ ans1 = answer1_jsons[i]["text"]
+ ans2 = answer2_jsons[i]["text"]
+ sys_prompt, prompt, reviewer_id = gen_prompt(
+ reviewer_jsons, prompt_jsons, cat, ques, ans1, ans2
+ )
+ review_id = shortuuid.uuid()
+ review_jsons.append(
+ {
+ "review_id": review_id,
+ "question_id": question_jsons[i]["question_id"],
+ "answer1_id": answer1_jsons[i]["answer_id"],
+ "answer2_id": answer2_jsons[i]["answer_id"],
+ "reviewer_id": reviewer_id,
+ "metadata": {},
+ }
+ )
+ # To avoid the rate limit set by OpenAI
+ handles.append(get_eval.remote(sys_prompt, prompt, args.max_tokens))
+ logger.info(
+ f"Waiting for {REQ_TIME_GAP} seconds before sending the next request."
+ )
+ time.sleep(REQ_TIME_GAP)
+
+ reviews = ray.get(handles)
+ with open(f"{args.output_review_file}", "w") as output_review_file:
+ for idx, review in enumerate(reviews):
+ scores = parse_score(review)
+ review_jsons[idx]["text"] = review
+ review_jsons[idx]["score"] = scores
+ output_review_file.write(json.dumps(review_jsons[idx]) + "\n")
diff --git a/model/fastchat/eval/generate_webpage_data_from_table.py b/model/fastchat/eval/generate_webpage_data_from_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..e24175aa588493e8d41264abc34cf44155ea335b
--- /dev/null
+++ b/model/fastchat/eval/generate_webpage_data_from_table.py
@@ -0,0 +1,119 @@
+"""Generate json file for webpage."""
+import json
+import os
+import re
+
+models = ["alpaca", "llama", "gpt35", "bard"]
+
+
+def read_jsonl(path: str, key: str = None):
+ data = []
+ with open(os.path.expanduser(path)) as f:
+ for line in f:
+ if not line:
+ continue
+ data.append(json.loads(line))
+ if key is not None:
+ data.sort(key=lambda x: x[key])
+ data = {item[key]: item for item in data}
+ return data
+
+
+def trim_hanging_lines(s: str, n: int) -> str:
+ s = s.strip()
+ for _ in range(n):
+ s = s.split("\n", 1)[1].strip()
+ return s
+
+
+if __name__ == "__main__":
+ questions = read_jsonl("table/question.jsonl", key="question_id")
+
+ alpaca_answers = read_jsonl(
+ "table/answer/answer_alpaca-13b.jsonl", key="question_id"
+ )
+ bard_answers = read_jsonl("table/answer/answer_bard.jsonl", key="question_id")
+ gpt35_answers = read_jsonl("table/answer/answer_gpt35.jsonl", key="question_id")
+ llama_answers = read_jsonl("table/answer/answer_llama-13b.jsonl", key="question_id")
+ vicuna_answers = read_jsonl(
+ "table/answer/answer_vicuna-13b.jsonl", key="question_id"
+ )
+
+ review_alpaca = read_jsonl(
+ "table/review/review_alpaca-13b_vicuna-13b.jsonl", key="question_id"
+ )
+ review_bard = read_jsonl(
+ "table/review/review_bard_vicuna-13b.jsonl", key="question_id"
+ )
+ review_gpt35 = read_jsonl(
+ "table/review/review_gpt35_vicuna-13b.jsonl", key="question_id"
+ )
+ review_llama = read_jsonl(
+ "table/review/review_llama-13b_vicuna-13b.jsonl", key="question_id"
+ )
+
+ records = []
+ for qid in questions.keys():
+ r = {
+ "id": qid,
+ "category": questions[qid]["category"],
+ "question": questions[qid]["text"],
+ "answers": {
+ "alpaca": alpaca_answers[qid]["text"],
+ "llama": llama_answers[qid]["text"],
+ "bard": bard_answers[qid]["text"],
+ "gpt35": gpt35_answers[qid]["text"],
+ "vicuna": vicuna_answers[qid]["text"],
+ },
+ "evaluations": {
+ "alpaca": review_alpaca[qid]["text"],
+ "llama": review_llama[qid]["text"],
+ "bard": review_bard[qid]["text"],
+ "gpt35": review_gpt35[qid]["text"],
+ },
+ "scores": {
+ "alpaca": review_alpaca[qid]["score"],
+ "llama": review_llama[qid]["score"],
+ "bard": review_bard[qid]["score"],
+ "gpt35": review_gpt35[qid]["score"],
+ },
+ }
+
+ # cleanup data
+ cleaned_evals = {}
+ for k, v in r["evaluations"].items():
+ v = v.strip()
+ lines = v.split("\n")
+ # trim the first line if it's a pair of numbers
+ if re.match(r"\d+[, ]+\d+", lines[0]):
+ lines = lines[1:]
+ v = "\n".join(lines)
+ cleaned_evals[k] = v.replace("Assistant 1", "**Assistant 1**").replace(
+ "Assistant 2", "**Assistant 2**"
+ )
+
+ r["evaluations"] = cleaned_evals
+ records.append(r)
+
+ # Reorder the records, this is optional
+ for r in records:
+ if r["id"] <= 20:
+ r["id"] += 60
+ else:
+ r["id"] -= 20
+ for r in records:
+ if r["id"] <= 50:
+ r["id"] += 10
+ elif 50 < r["id"] <= 60:
+ r["id"] -= 50
+ for r in records:
+ if r["id"] == 7:
+ r["id"] = 1
+ elif r["id"] < 7:
+ r["id"] += 1
+
+ records.sort(key=lambda x: x["id"])
+
+ # Write to file
+ with open("webpage/data.json", "w") as f:
+ json.dump({"questions": records, "models": models}, f, indent=2)
diff --git a/model/fastchat/eval/get_model_answer.py b/model/fastchat/eval/get_model_answer.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e9ba0bd670d0d1ec4ed50ebbb6957515da35ca3
--- /dev/null
+++ b/model/fastchat/eval/get_model_answer.py
@@ -0,0 +1,98 @@
+import argparse
+from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM
+import torch
+import os
+import json
+from tqdm import tqdm
+import shortuuid
+import ray
+
+from fastchat.conversation import get_default_conv_template, compute_skip_echo_len
+from fastchat.utils import disable_torch_init
+
+
+def run_eval(model_path, model_id, question_file, answer_file, num_gpus):
+ # split question file into num_gpus files
+ ques_jsons = []
+ with open(os.path.expanduser(question_file), "r") as ques_file:
+ for line in ques_file:
+ ques_jsons.append(line)
+
+ chunk_size = len(ques_jsons) // num_gpus
+ ans_handles = []
+ for i in range(0, len(ques_jsons), chunk_size):
+ ans_handles.append(
+ get_model_answers.remote(
+ model_path, model_id, ques_jsons[i : i + chunk_size]
+ )
+ )
+
+ ans_jsons = []
+ for ans_handle in ans_handles:
+ ans_jsons.extend(ray.get(ans_handle))
+
+ with open(os.path.expanduser(answer_file), "w") as ans_file:
+ for line in ans_jsons:
+ ans_file.write(json.dumps(line) + "\n")
+
+
+@ray.remote(num_gpus=1)
+@torch.inference_mode()
+def get_model_answers(model_path, model_id, question_jsons):
+ disable_torch_init()
+ model_path = os.path.expanduser(model_path)
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
+ model = AutoModelForCausalLM.from_pretrained(
+ model_path, torch_dtype=torch.float16
+ ).cuda()
+
+ ans_jsons = []
+ for i, line in enumerate(tqdm(question_jsons)):
+ ques_json = json.loads(line)
+ idx = ques_json["question_id"]
+ qs = ques_json["text"]
+ conv = get_default_conv_template(model_id).copy()
+ conv.append_message(conv.roles[0], qs)
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+ inputs = tokenizer([prompt])
+ output_ids = model.generate(
+ torch.as_tensor(inputs.input_ids).cuda(),
+ do_sample=True,
+ temperature=0.7,
+ max_new_tokens=1024,
+ )
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
+ skip_echo_len = compute_skip_echo_len(model_id, conv, prompt)
+
+ outputs = outputs[skip_echo_len:].strip()
+ ans_id = shortuuid.uuid()
+ ans_jsons.append(
+ {
+ "question_id": idx,
+ "text": outputs,
+ "answer_id": ans_id,
+ "model_id": model_id,
+ "metadata": {},
+ }
+ )
+ return ans_jsons
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--model-path", type=str, required=True)
+ parser.add_argument("--model-id", type=str, required=True)
+ parser.add_argument("--question-file", type=str, required=True)
+ parser.add_argument("--answer-file", type=str, default="answer.jsonl")
+ parser.add_argument("--num-gpus", type=int, default=1)
+ args = parser.parse_args()
+
+ ray.init()
+ run_eval(
+ args.model_path,
+ args.model_id,
+ args.question_file,
+ args.answer_file,
+ args.num_gpus,
+ )
diff --git a/model/fastchat/eval/qa_baseline_gpt35.py b/model/fastchat/eval/qa_baseline_gpt35.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0f9f5fbc9a20f3acfee58569b210d1e0572c7b9
--- /dev/null
+++ b/model/fastchat/eval/qa_baseline_gpt35.py
@@ -0,0 +1,82 @@
+"""Generate answers with GPT-3.5"""
+# Note: you need to be using OpenAI Python v0.27.0 for the code below to work
+import argparse
+import json
+import os
+import time
+import concurrent.futures
+
+import openai
+import tqdm
+import shortuuid
+
+MODEL = "gpt-3.5-turbo"
+MODEL_ID = "gpt-3.5-turbo:20230327"
+
+
+def get_answer(question_id: int, question: str, max_tokens: int):
+ ans = {
+ "answer_id": shortuuid.uuid(),
+ "question_id": question_id,
+ "model_id": MODEL_ID,
+ }
+ for _ in range(3):
+ try:
+ response = openai.ChatCompletion.create(
+ model=MODEL,
+ messages=[
+ {"role": "system", "content": "You are a helpful assistant."},
+ {
+ "role": "user",
+ "content": question,
+ },
+ ],
+ max_tokens=max_tokens,
+ )
+ ans["text"] = response["choices"][0]["message"]["content"]
+ return ans
+ except Exception as e:
+ print("[ERROR]", e)
+ ans["text"] = "#ERROR#"
+ time.sleep(1)
+ return ans
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="ChatGPT answer generation.")
+ parser.add_argument("-q", "--question")
+ parser.add_argument("-o", "--output")
+ parser.add_argument(
+ "--max-tokens",
+ type=int,
+ default=1024,
+ help="maximum number of tokens produced in the output",
+ )
+ args = parser.parse_args()
+
+ questions_dict = {}
+ with open(os.path.expanduser(args.question)) as f:
+ for line in f:
+ if not line:
+ continue
+ q = json.loads(line)
+ questions_dict[q["question_id"]] = q["text"]
+
+ answers = []
+
+ with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
+ futures = []
+ for qid, question in questions_dict.items():
+ future = executor.submit(get_answer, qid, question, args.max_tokens)
+ futures.append(future)
+
+ for future in tqdm.tqdm(
+ concurrent.futures.as_completed(futures), total=len(futures)
+ ):
+ answers.append(future.result())
+
+ answers.sort(key=lambda x: x["question_id"])
+
+ with open(os.path.expanduser(args.output), "w") as f:
+ table = [json.dumps(ans) for ans in answers]
+ f.write("\n".join(table))
diff --git a/model/fastchat/eval/requirements.txt b/model/fastchat/eval/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c2490e15eadabbbf9fca579319377103a826d203
--- /dev/null
+++ b/model/fastchat/eval/requirements.txt
@@ -0,0 +1,2 @@
+shortuuid
+ray
\ No newline at end of file
diff --git a/model/fastchat/eval/script/run_model_qa.yaml b/model/fastchat/eval/script/run_model_qa.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..64e36560e60ba70e88c8da8090f69199d926fd81
--- /dev/null
+++ b/model/fastchat/eval/script/run_model_qa.yaml
@@ -0,0 +1,48 @@
+resources:
+ accelerators: A100:4
+ cloud: gcp
+
+num_nodes: 1
+
+workdir: .
+
+setup: |
+ conda activate chatbot
+ if [ $? -eq 0 ]; then
+ echo 'conda env exists'
+ else
+ # Setup the environment
+ conda create -n chatbot python=3.10 -y
+ fi
+ conda activate chatbot
+
+ pip3 install -e .
+
+ # Install pytorch
+ pip install torch==1.13.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116
+
+ # Install huggingface with the LLaMA commit
+ pip install git+https://github.com/huggingface/transformers.git@c612628045822f909020f7eb6784c79700813eda
+
+ cd fastchat/eval
+ pip install -r requirements.txt
+
+ MODEL_NAME=vicuna-7b-20230322-fp16
+ MODEL_PATH=~/${MODEL_NAME}
+
+ if [ ! -f "$MODEL_PATH/ready" ]; then
+ echo "export MODEL_PATH=${MODEL_PATH}" >> ~/.bashrc
+ echo "export MODEL_NAME=${MODEL_NAME}" >> ~/.bashrc
+ mkdir -p $MODEL_PATH
+ gsutil -m cp gs://model-weights/${MODEL_NAME}/* $MODEL_PATH
+ touch $MODEL_PATH/ready
+ echo "model downloaded"
+ fi
+
+run: |
+ conda activate chatbot
+ python -m fastchat.eval.get_model_answer --model-path $MODEL_PATH \
+ --model-id $MODEL_NAME \
+ --question-file fastchat/eval/table/question.jsonl \
+ --answer-file answer.jsonl \
+ --num-gpus $SKYPILOT_NUM_GPUS_PER_NODE
diff --git a/model/fastchat/eval/table/answer/answer_alpaca-13b.jsonl b/model/fastchat/eval/table/answer/answer_alpaca-13b.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..b850e137eb1b020c36c9e2f370934e50b188afe1
--- /dev/null
+++ b/model/fastchat/eval/table/answer/answer_alpaca-13b.jsonl
@@ -0,0 +1,80 @@
+{"question_id": 1, "text": "Improving time management skills involves setting priorities, breaking tasks into smaller chunks, delegating tasks, avoiding multitasking, and taking regular breaks. Additionally, it is important to have a positive attitude and be realistic with goals. Making a to-do list, tracking time, and using technology to automate mundane tasks can also help improve time management skills.", "answer_id": "kEL9ifUHDeYuAXzevje2se", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 2, "text": "The most effective ways to deal with stress include identifying and avoiding the sources of stress, developing healthy coping mechanisms, and engaging in relaxation techniques such as meditation, deep breathing, and physical exercise. Additionally, it is important to take care of one's mental and physical health, get enough rest and nutrition, and practice self-care.", "answer_id": "VcF3NrWGXhhxLkDVurNrwq", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 3, "text": "The main differences between Python and JavaScript programming languages are in their syntax, structure, and the type of applications they are designed for. Python is a general-purpose language designed to be easy to read and write, while JavaScript is primarily used for web development. Python is a compiled language, while JavaScript is an interpreted language. Python is strongly typed, while JavaScript is not. Python is typically faster than JavaScript in terms of execution, while JavaScript is better suited to create interactive web pages.", "answer_id": "LpvtyQi9QdSgRrgGDxiGrT", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 4, "text": "Working from home can be a great way to increase productivity, but it does require some strategic planning and tactics. Here are some tips for increasing your productivity while working from home: \n1. Establish a routine: Having a regular schedule and sticking to it can help you stay focused and motivated.\n2. Create a distraction-free zone: Eliminate distractions by turning off notifications and setting a \u201cdo not disturb\u201d mode.\n3. Take breaks: Working for long stretches without taking breaks can lead to burnout and decreased productivity.\n4. Exercise: Exercise can help improve focus and productivity.\n5. Get organized: Having a clean and organized workspace can help you stay on task.\n6. Set goals: Set short-term and long-term goals to keep you motivated and on track.\n7. Connect with colleagues: Don\u2019t be afraid to reach out to colleagues for help or advice.\n8. Track your progress: Keep track of your progress and celebrate your successes.\n9. Unplug: Taking breaks and unplugging from work can help you recharge and come back to work more refreshed.", "answer_id": "7zQm8cSTJhPtPdZdxbcfrX", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 5, "text": "Sure. Quantum computing is a new technology that uses quantum mechanical phenomena to perform operations on data. Unlike traditional computers, which process information in binary form, quantum computers can process information in multiple states at the same time, allowing them to perform operations faster and with more accuracy. It also allows them to solve problems that classical computers struggle with. Quantum computing is still in its infancy, but it has the potential to revolutionize the computing industry and open up new possibilities in artificial intelligence, cryptography, and other fields.", "answer_id": "UrLEH82RHwqqLt2LyvYSKj", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 6, "text": "Plant-based protein sources include legumes, grains, nuts, and seeds, and are generally lower in fat and calories than animal-based proteins. Animal-based proteins come from sources such as meat, eggs, and dairy, and are generally higher in fat and calories than plant-based proteins. Plant-based proteins are also generally lower in cholesterol and saturated fat than animal-based proteins. Additionally, plant-based proteins require fewer resources for production, such as water and land, than animal-based proteins.", "answer_id": "fpRdMTdnfirosQixuf2Gez", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 7, "text": "Developing critical thinking skills involves actively questioning assumptions, taking multiple perspectives, analyzing information, and coming to logical conclusions. You can practice these skills by posing questions to yourself and working through problems, as well as engaging in thoughtful discussion with others.", "answer_id": "PvGmsCJSNFcvQKmPTnnd7s", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 8, "text": "The major challenges faced by the education sector include lack of access to quality education, low educational outcomes, high costs of education, lack of adequate infrastructure and teaching materials, overcrowding in classrooms, and gender inequality. Other challenges include lack of access to technology and digital learning materials, outdated curriculum and pedagogical practices, and inadequate teacher training.", "answer_id": "n4ANAbpR3gvLPP8poPfKZ6", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 9, "text": "The primary factors that influence consumer behavior include cultural, social, and personal factors. Cultural factors include shared beliefs, values, and customs within a society, while social factors include the influence of family, friends, and peers. Personal factors include an individual's lifestyle, attitudes, beliefs, and past experiences. Other factors such as advertising, marketing, and price also have an effect on consumer behavior.", "answer_id": "STJ36GrgQMcaUi7zaoNPit", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 10, "text": "The most effective strategies for conflict resolution in the workplace include:\n1. Establishing open communication: Encouraging open and honest communication between parties is key to resolving any conflict.\n2. Listening actively: Active listening allows both parties to feel heard and allows for a better understanding of the other person's perspective.\n3. Identifying root causes: Understanding the underlying cause of the conflict can help to find more effective solutions that address the core issue.\n4. Focusing on interests, not positions: Focusing on the interests of both parties can help to find creative solutions that satisfy the needs of everyone involved.\n5. Developing mutual respect: Respect is essential to any successful resolution. Building mutual respect can help to create a healthier work environment.\n6. Negotiation: Working together to negotiate a mutually beneficial solution can help to create an agreement that both parties are happy with.\n7. Taking responsibility: Taking responsibility for one\u2019s words and actions can help to create a culture of accountability in the workplace.", "answer_id": "425SwYvqKPAXFGTYKXB7Cs", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 11, "text": "The use of single-use plastic bottles has a number of negative implications on the environment. They are not biodegradable and can take hundreds of years to break down. This means they are around for a long time, releasing potentially harmful chemicals into the environment, and they can be easily mistaken for food by animals. Single-use plastic bottles can also be a source of micro-plastics, which are very small particles of plastic that are difficult to clean up and can be consumed by fish and other aquatic life, leading to health problems in humans who consume those fish. On the other hand, reusable bottles are a much more environmentally friendly choice since they do not produce any waste. They are also often made of materials that are safer for human health, such as stainless steel or glass. Reusable bottles are also more cost-effective since they do not need to be replaced as often as single-use plastic bottles.", "answer_id": "VbNAuj6KAkMdLJQXMo22oK", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 12, "text": "When designing an inclusive and accessible public transportation system, some factors to consider include providing frequent and reliable service, accessible stops and vehicles, a variety of fare options, clear and consistent signage and information, comfortable seating and space for wheelchairs and other assistive devices, and security and safety protocols. Additionally, public transportation systems should be integrated with other services such as public parking, bike and scooter sharing, and shuttles to provide a comprehensive and accessible experience.", "answer_id": "CNGqAeu2QJbQ4QGzHJDPdq", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 13, "text": "Governments can use fiscal and monetary policies to combat economic recessions. Fiscal policies involve the government spending money or cutting taxes in order to stimulate the economy, while monetary policies involve changing the money supply or interest rates to influence economic activity. These policies can be used to stabilize the economy, create jobs, and reduce poverty.", "answer_id": "E8w2qYqnm8iqCrSkUv62sz", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 14, "text": "Language and cultural barriers can lead to misunderstandings and make it more difficult to communicate and form relationships in multicultural societies. Without a common language or shared culture, people can have difficulty understanding each other's perspectives and beliefs, which can lead to mistrust and a lack of understanding between different ethnic groups. To overcome these barriers, it is important to make an effort to learn about different cultures, be open-minded, and take the time to understand each other.", "answer_id": "8o5yMymfzo6kzmp9GK5MWr", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 15, "text": "Artificial intelligence can be used to improve the quality and efficiency of healthcare delivery in a variety of ways. AI can be used to assist with diagnosing diseases by comparing symptoms and medical history to images of known diseases and medical conditions. AI can also be used to analyze laboratory results and patient records to identify potential problems and develop treatment plans. AI can be used to automate administrative tasks and reduce paperwork, as well as identify potential drug interactions and side effects. AI can also be used to automate appointment reminders, facilitate communication between doctors and patients, and even provide virtual health coaching to help patients manage their conditions.", "answer_id": "kbJVEEsdsSScEq5Y5furr7", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 16, "text": "CRISPR-Cas9 is a recently developed gene editing technology that has revolutionized the way scientists are able to edit genomes. The technology uses a guide RNA to direct the Cas9 enzyme to a specific location in the genome, where it will cut the DNA strands. This allows for the insertion or deletion of DNA sequences, which can be used to modify the genetic code of an organism. Potential applications include treating genetic diseases, increasing crop yields, and creating pest-resistant crops. Ethically, the biggest concern is the potential misuse of the technology, which could lead to unintended consequences or be used to alter humanity in ways that could harm us.", "answer_id": "CMUL5ULZuR7YC5EPzCBN2N", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 17, "text": "Vaccinations work by stimulating the body's immune system to protect against infectious diseases. Herd immunity is a concept whereby a population is protected against a certain disease when a certain percentage of the population has immunity to the disease, either through vaccination or having already contracted the disease. This is because when enough people are vaccinated, it reduces the spread of the disease and prevents it from spreading to those who are not immune.", "answer_id": "kEmDDQyNqSkyFihYEEBpuR", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 18, "text": "Social media platforms can have a significant influence on how people consume and share news. By providing instant access to news sources and allowing users to easily share content, these platforms can shape the public's information landscape and the spread of misinformation. Some potential implications of this are that users may not always be able to discern between reliable and unreliable sources, and misleading information may be more easily spread and go viral. This can lead to confusion and mistrust among users, and can potentially be exploited for nefarious purposes. To mitigate these risks, it is important for users to be aware of the potential biases and agendas of the sources they are consuming, and for social media platforms to take proactive steps to curb the spread of misinformation.", "answer_id": "Qs3grQsqFVGK9EVkCkf9PB", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 19, "text": "Cultural, social, and economic factors all play a role in determining an individual's food choices. For example, people growing up in households with limited economic means may be more likely to consume cheaper, less nutritious foods due to economic constraints. Additionally, cultural norms and values can influence food choices, such as the emphasis on large portions typical in some American cultural settings. Finally, social factors such as peer pressure and advertising can also sway an individual's food choices. By understanding and taking into account these various factors, health promotion strategies can be tailored and targeted to help individuals make healthier dietary decisions.", "answer_id": "kzZ6dKN7hkRWjqtdHr7Qns", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 20, "text": "Natural selection is the process by which certain traits or characteristics become more or less common in a population over time. This is due to the fact that those individuals with certain advantageous traits are more likely to survive, reproduce, and pass those traits on to their offspring. Over time, this leads to the evolution and adaptation of species to their environment.", "answer_id": "DPPDG6YGFJij2GCmRL66PU", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 21, "text": "I am a brave knight, loyal to my king and country, and I have come to serve and protect them both. I am skilled in the arts of war and peace, and I am proud to be here at the royal court.", "answer_id": "D62FjDb4nZANzPpfSfsiyn", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 22, "text": "Avast ye maties! To the bottom of the sea we go! In search of hidden riches and fortune, we shall brave the depths and face any danger that comes our way. For glory and honor, let us set sail and find the lost Ark!", "answer_id": "k7E4NNw5kyj9DmvP5Pu2zb", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 23, "text": "If I were a Shakespearean character, I would declare my love for someone in a soliloquy with passionate words and metaphors. I would express my love for them with descriptions of their beauty and the joy I feel when I am with them. I would confess my hopes and dreams for our future together and declare my devotion to them. I would make my love for them known in a way that could never be forgotten.", "answer_id": "KFocjVCejYrU3YmLjAqoUF", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 24, "text": "As a superhero, my origin story is very special. I was born with special abilities that I had to keep hidden for a long time. I was scared to use them at first, but eventually I discovered that I could use my powers to protect people and make the world a better place. I've had to face a lot of challenges along the way, but I've also had a lot of help from friends and family. Now, I use my abilities to help people in need and fight for justice.", "answer_id": "dq8Sm9djS7e7y9sG9vmMJf", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 25, "text": "If I were a time traveler from the year 3000, I would tell people about the incredible advancements in technology, such as the ability to travel through time, space, and dimensions; the development of intelligent robots and autonomous vehicles; the emergence of virtual reality and augmented reality; and the rise of artificial intelligence and machine learning.", "answer_id": "XZ8fG8e6u7CyKd2moK6abe", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 26, "text": "The game was a nail-biter, with both teams trading leads throughout the game. With only seconds left on the clock, the home team made a bold move and passed the ball to their star player, who took the ball down the court and made a layup at the buzzer to seal the victory for the home team!", "answer_id": "oKaXHfoK4pXwrefFWXmeA8", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 27, "text": "My signature dish is a seamless blend of traditional and modern cooking techniques. I use only the freshest ingredients to create a unique and unforgettable dining experience. The dish is a perfect balance of flavors and textures, with a subtle hint of my personal style. It is a dish that I am proud to call my own.", "answer_id": "ZwiZfvDWm7SETKNBfDk7Mb", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 28, "text": "At the summit of Mount Everest, you are filled with a sense of accomplishment and joy. The view from the top is absolutely breathtaking - you can see for miles and miles, with the majestic Himalayan mountain range stretching out in all directions. It is a truly unforgettable experience.", "answer_id": "DxYopRe2LcTJMy3FWu6btd", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 29, "text": "As a colonist on Mars, my daily life is filled with challenges. Finding resources and creating a sustainable environment is a priority. I face a number of challenges including extreme temperature fluctuations, limited access to resources, and the difficulty of travelling to and from the planet. Additionally, I must be mindful of my physical and mental health since I am so far from home. Despite these challenges, I am grateful to be able to explore and experience this new world.", "answer_id": "WC3UJVh4jQ5RUkpcRMU98L", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 30, "text": "In the post-apocalyptic world, I am a survivor by necessity. I scavenge for food and supplies, and I'm always on the lookout for potential allies. I've encountered a few people who have managed to survive, and together we have formed an alliance to help each other. We hunt for food, build shelter, and work together to stay alive. We also share knowledge and skills, like how to start a fire or how to use a weapon. We look out for each other, and our alliance has strengthened our chances of survival.", "answer_id": "gTvgn6ksDjGGgdprw6AG5A", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 31, "text": "There are a few ways to tell if a restaurant is popular among locals or mainly attracts tourists. Firstly, look at the clientele - if the majority of people there are tourists, it's likely that the restaurant is mainly attracting tourists. Secondly, check online reviews - if the reviews are mainly from tourists, then it's likely that the restaurant is popular with tourists. Finally, look at the prices - if the prices are higher than average for the area, it could be a sign that the restaurant is popular with locals. This information can be useful to get an idea of what kind of experience to expect, as locals might know about different aspects of the restaurant that tourists wouldn't.", "answer_id": "3q7giCk2BA3Ye4Tm9HC2iw", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 32, "text": "Some subtle clues that suggest someone is pretending to understand a topic or conversation when they are actually confused or uninformed include: not asking any questions or engaging in the conversation, avoiding eye contact, fidgeting or stammering when speaking, repeating questions or comments made by other people, and nodding along without any signs of understanding.", "answer_id": "hRGsxy86v26SC4yAQS29X4", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 33, "text": "Some people prefer the tactile and visual experience of using a paper map, and others may prefer to ask for directions from locals in order to get a more personalized experience. Additionally, GPS devices and smartphone apps can sometimes be inaccurate or have limited battery life, while a paper map or asking for directions may be more reliable.", "answer_id": "3n49A5ggJERfXYrLns3ZeU", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 34, "text": "One way to tell if someone is genuinely interested in a conversation is to observe their body language and facial expressions. Are they making an effort to maintain eye contact? Are they leaning in and actively listening to what you are saying? Do they ask questions and provide relevant answers? If so, it is likely that they are genuinely interested in the conversation. Additionally, if someone is simply being polite, they may not ask questions or engage in the conversation as much, and may not make an effort to maintain eye contact.", "answer_id": "ErCpFtPuYVru4oTTk4WrxG", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 35, "text": "Shopping at a small, locally-owned business can benefit the local community by keeping money in the area and supporting local jobs. Additionally, these businesses tend to offer a more personal experience and higher quality products than large chain stores. Furthermore, shopping at small businesses can help create a sense of place and community, and can help maintain a unique local culture.", "answer_id": "PTNoCRMZWoJk8HaKX7fW45", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 36, "text": "There are several ways to assess the credibility of a source of information. Firstly, you can look at the author's credentials and experience in the relevant field. Secondly, you can check the source of the information, such as whether it is from a reliable website or publication. Thirdly, you can look at the evidence presented in the article and whether it is backed up by reliable sources. Finally, you can read other people's reviews or comments about the article to get a better idea of its credibility.", "answer_id": "n8cFs9KENNwZ4z3SR4iXTr", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 37, "text": "Some people enjoy the sensation of being scared because it can create a feeling of excitement, enhance their emotional state, and provide a sense of thrill and adventure. Others may avoid these experiences because they are afraid of the unknown, or because they don't enjoy the feeling of being scared. Everyone is different, and some people may be more attracted to thrilling and exciting activities while others may prefer calmer activities.", "answer_id": "GzxL9mmEK5RzKqRbqBMUVC", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 38, "text": "By observing the behavior of others in a social situation, one can gain clues as to the cultural norms and expectations of a group. For example, watching how people interact with one another, how they address each other, how they handle disagreements, and how they go about solving problems can provide insight into the cultural values of the group. Additionally, observing body language, facial expressions, and other nonverbal cues can offer clues as to the accepted norms of behavior in a particular culture.", "answer_id": "QpoHFgb9SzwuaXQQUuBUQD", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 39, "text": "It is an interesting question, and one that has been debated for quite some time. I think there are valid arguments on both sides. On the one hand, exploring space is a remarkable human endeavor and could lead to tremendous scientific discoveries and technological advances. On the other hand, there are many pressing issues that need to be addressed on Earth, such as poverty, inequality, and climate change. Each side would argue that their cause is more important, and it is ultimately up to each individual to decide which one they feel more strongly about.", "answer_id": "Fxe6MS4GpP3LMDUwzY2cPA", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 40, "text": "It is important to strike a balance between job creation and technological progress. Automation can increase efficiency and productivity, but it should not come at the expense of job security and people's livelihoods. Therefore, it is essential to create policies and initiatives that promote both job creation and technological progress. This could include investing in training and education to ensure that people have the skills necessary to compete in the modern job market, as well as incentivizing companies to invest in technologies that create jobs and stimulate economic growth.", "answer_id": "mJiQ2FGR4Xb8kmhZjharkw", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 41, "text": "On average, the human eye blinks about 20 times per minute, or about 14,400 times per day. In a lifetime, this means that the average human will blink roughly 50 million times. This may seem like a lot, but it serves an important purpose. Blinking helps to keep the eyes lubricated and prevents them from drying out. It also helps to spread tears over the surface of the eye, washing away foreign particles and keeping the eye clean. Additionally, blinking helps to reduce the risk of eye infections by helping to clear away bacteria and other foreign substances.", "answer_id": "6Kph4RHRKEZ4YUoaHuEhBv", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 42, "text": "A grain of salt contains 102.98 atoms. To calculate this, we first need to know the atomic weight of a single atom. The atomic weight of an atom is the number of protons and neutrons in the nucleus of an atom, which determines its atomic mass. The atomic weight of a single atom of salt is 58.943 g/atom. Therefore, a grain of salt contains 102.98 atoms, which is equivalent to 60.98 grams.", "answer_id": "WBwpBQwhxn5kxLDb7MschC", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 43, "text": "Approximately 2000 lightning strikes occur on Earth each day. This is because the atmospheric conditions must come together in a particular way for a lightning strike to occur. Firstly, a large amount of electric charge must accumulate in the atmosphere, typically in a storm system. Then, the air must become increasingly unstable, leading to rising air and a strong updraft. This causes an electric breakdown of the air, and then an exchange of electricity occurs from the cloud to the ground, forming a lightning bolt. As these conditions are necessary for a lightning strike to occur, about 2000 lightning strikes happen on Earth each day.", "answer_id": "kf8nahQVci2ZLaYikagB7U", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 44, "text": "It would take about 10 million balloons to lift a house like in the movie Up. The balloons would need to be filled with helium in order for the house to be lifted. Each balloon would need to be filled with about 89.1 cubic feet of helium in order to lift 500 pounds. To calculate how many balloons would be needed, simply multiply the weight of the house (264.72 lbs) by the number of cubic feet of helium needed to lift 500 pounds (89.1). Therefore, it would take 10 million balloons to lift a house like in the movie Up.", "answer_id": "Gptgryd4o2dC8V5aqRmeJJ", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 45, "text": "According to a 2017 study, over 6.3 billion text messages are sent globally in a minute. This number is expected to grow to 6.8 billion by 2021. This is due to the increasing number of smartphones, better network coverage, and the increasing popularity of texting for communication. Furthermore, the increase in smartphones and better network coverage has also led to the rise of instant messaging applications such as WhatsApp, which has further increased the amount of text messages sent per minute.", "answer_id": "RfBWW8ZhdfTuTMb454Un4o", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 46, "text": "It is estimated that over 1 billion words are spoken on Earth every day. This is due to the fact that humans are highly social creatures, and language is the primary way that we communicate with each other. In addition, language is a key component of all cultures and societies, and is constantly being used to exchange information and ideas. Furthermore, language is integral to the way we think and process information, as well as to how we connect with each other on an emotional level. Therefore, it is not surprising that so many words are spoken on a daily basis.", "answer_id": "neGgLYm47JvqN8qkw8VeoW", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 47, "text": "On average, about 100 snowflakes fall per second during a typical winter. This number can vary drastically depending on the location and severity of the storm. Snowflakes are made up of tiny ice crystals, which form in the atmosphere when the temperature is below freezing and there is moisture in the air. As air moves and mixes, the crystals come together to form snowflakes. The shape, size and number of snowflakes depend on the atmospheric conditions, such as temperature and humidity, when they form.", "answer_id": "KSePbzLwsYasR3aui4HU8h", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 48, "text": "It is estimated that all the books ever written would total about 100 billion pages. This is based on the fact that there are roughly 130 million unique titles in existence, and each book on average contains about 200 pages. This means that if all the books were printed out, the stack of paper would be about 400 miles high. Additionally, if each book was represented by a single page, the total number of pages would be approximately 100 billion. Throughout history, books have served as a way to preserve knowledge and information, as well as to share ideas and stories with others. As technology advances, books are increasingly available in digital formats, which allows for easier access and storage. Regardless of the medium, books will continue to be a valuable source of information for generations to come.", "answer_id": "SWWfLpP2gnWHLbYRZXNsW3", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 49, "text": "Since the beginning of life, the Earth has orbited the Sun a total of 4.3 billion times. This is calculated by multiplying the average length of a year (365.25 days) by the number of years since life began (approximately 4.5 billion years). Life on Earth relies on the Sun's energy reaching the Earth's atmosphere and being redistributed by the atmosphere, the land surface, and the oceans. Without this energy, life on Earth would not be possible. Therefore, the Earth's orbit around the Sun is integral to the sustainability of life on Earth.", "answer_id": "WaBwKYhs7eAG22qCGLH2j3", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 50, "text": "Since the beginning of recorded music, countless songs have been recorded. The exact number is difficult to quantify, but conservative estimates put the number of songs recorded throughout history at several hundred thousand. This includes songs from all genres, from classical music to modern pop. The invention of recording technology has allowed musicians to preserve their music and share it with the world. This has enabled generations to access and enjoy music from all eras. With the rise of digital music, the number of recordings available has grown exponentially, and new songs are being recorded all the time.", "answer_id": "MfMJeE9om7qyBbqopHouf4", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 51, "text": "If the Internet had been invented during the Renaissance period, it likely would have been used primarily by the wealthy and powerful to access knowledge and information, as there was no widespread system of public education at the time. It also would have been a much slower and more expensive process to access information, as there were no high-speed connections or digital networks available.", "answer_id": "TjWPRDM6JFpPF8xeRptCKb", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 52, "text": "If the Aztecs had successfully repelled the Spanish conquistadors, their culture and way of life would have likely remained intact. The Spanish would not have been able to colonize Mexico, and their influence on the region would have been much less. It's likely that the Aztecs would have continued to expand their empire and control the areas that are now parts of Central and South America. Additionally, the Native American populations in the area would have likely been able to maintain more of their traditional customs and beliefs, and their languages may not have been as heavily impacted by Spanish.", "answer_id": "iR2tYTsWTFENEP7Qy9RgtX", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 53, "text": "If the Black Death had not occurred in the 14th century, the population of Europe may have continued to grow unabated, leading to more rapid urbanization and economic growth. It's likely that the Renaissance would have begun sooner, and the scientific revolution may have occurred earlier as well. Without the Black Death, there may have been no need for the industrial revolution, or at least it may have occurred later and on a smaller scale. Additionally, the spread of diseases may have been slower, leading to greater population density in certain areas.", "answer_id": "AZdS8xAi3GwAmCqkNSnnwv", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 54, "text": "If Isaac Newton had focused on biology instead of physics, he might have made important discoveries in the field of medicine and biology. He may have studied the human body and its functions in greater detail, and possibly even made important breakthroughs in treating diseases. He may also have studied the behavior of different animals in the wild and made important observations about their habits and habitats.", "answer_id": "VmwifF2JD5osYKDTqv2ZRS", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 55, "text": "If the Beatles had never formed as a band, the music world would have been drastically different. The British Invasion of the 1960s might not have happened, or it could have happened in a much different way. The cultural impact of the Beatles, including their influence on other musicians, fashion and society, may never have existed.", "answer_id": "mUL5UPj3qDGaCriEjL2U3B", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 56, "text": "If Alan Turing had not cracked the Enigma code during World War II, the outcome of the war could have been very different. It's possible that the Allies would have eventually defeated the Axis powers, but it likely would have taken much longer and cost many more lives. With the Enigma code cracked, the Allies were able to gain a critical advantage over the Axis powers and ultimately win the war.", "answer_id": "dVdwUoVrAQJDuWxiodykiw", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 57, "text": "If the Suez Canal had never been constructed, it would have major implications for international trade and navigation. The Suez Canal is an important shipping route connecting the Mediterranean Sea to the Red Sea, which in turn connects to the Indian Ocean. This allows for a shorter and more direct route for ships traveling between Europe and Asia, reducing travel time significantly. Without the Suez Canal, ships would have to travel around the Cape of Good Hope at the southern tip of Africa, which is much more time consuming and costly. This would make international trade more difficult and expensive, and could hinder global economic growth.", "answer_id": "EiNn9jjfy7dga6xfCtLtF8", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 58, "text": "If the Maya civilization had not mysteriously collapsed, it is likely that their influence and power would have continued to grow. They would have likely continued to build large cities and monuments, develop their writing and astronomical systems, and expand their trade networks. Additionally, their advances in agriculture and water management may have led to increased food production and a larger population.", "answer_id": "eqG9f2R9hXVyZrZMpcqAYq", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 59, "text": "If Christopher Columbus had not discovered the Americas, the indigenous people of the Americas would have continued to live in relative isolation from the rest of the world. It is likely that European colonization and the consequences of that would have been delayed, leading to a different modern world. It is also possible that other Europeans would have made the voyage across the Atlantic and discovered the Americas instead.", "answer_id": "ex42CLEzDVC2TxAvMCtYQJ", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 60, "text": "If Vincent van Gogh had been successful during his lifetime, he may have been able to achieve the recognition and financial stability that he desired. He may have been able to travel and gain more exposure for his artwork, as well as connect with other like-minded individuals. He may have been able to create more artwork and expand his portfolio. He may also have been able to find a mentor or patron who could have helped him further his career.", "answer_id": "RfFANSwHeCm3Pqe2AJ86Dk", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 61, "text": "The program should read a text file line by line. It should then check each line for the specified word, counting up each time the word is found. Finally, it should output the number of occurrences of the word.", "answer_id": "nzxdtWmgQhRkHeVAanVogZ", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 62, "text": "The following Python function can be used to find the longest common subsequence of two input strings using dynamic programming:\ndef longest_common_subsequence(s1, s2):\n\"\"\"This function finds the longest common subsequence of two strings.\n\n Arguments:\n s1 {string} -- The first string\n s2 {string} -- The second string\n\n Returns:\n {int} -- The length of the longest common subsequence\n\n\"\"\"\n n1 = len(s1)\n n2 = len(s2)\n \n # Creating an empty matrix to store the results\n results = [[None]*(n2 + 1) for _ in range(n1 + 1)]\n \n # Initializing the first row to 0\n for i in range(n1 + 1):\n results[i][0] = 0\n \n # Filling the first row with 0\n for i in range(n1):\n results[i][0] = 0\n \n # Calculating the longest common subsequence\n for i in range(n1 + 1):\n for j in range(n2 + 1):\n if (i == 0 or j == 0):\n results[i][j] = 0\n elif (s1[i-1] == s2[j-1] and (results[i-1][j-1] < (i-j) or results[i-1][j-1] == 0)):\n results[i][j] = max(results[i-1][j-1] + 1, i+j)\n else:\n results[i][j] = 0\n \n return results[n1][n2]", "answer_id": "kRo6z5qmeiSM8Xz6BPJHLC", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 63, "text": "The following regular expression can be used to validate an email address in Python:\n^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$", "answer_id": "7kj8odEE79UBdR4pT6u6TY", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 64, "text": "Here is a program to find the nth Fibonacci number using dynamic programming:\n1. Set a variable 'n' to the nth Fibonacci number.\n2. Set two other variables 'a' and 'b' to 0 and 1 respectively.\n3. Set a loop from 0 to (n-1).\n4. In the loop, set another loop from 0 to (a+b).\n5. In the inner loop, if 'a' is even and 'b' is odd, set 'a' to a+1 and 'b' to b+1.\n6. If 'a' is odd and 'b' is even, set 'a' to a-1 and 'b' to b-1.\n7. After the inner loop, set 'n' to 'a' and break out of the outer loop.\n8. Print the value of 'n'.\nThis program uses the idea of dynamic programming to compute the nth Fibonacci number. The program first sets two auxiliary variables 'a' and 'b' to 0 and 1 respectively. Then it sets a loop from 0 to (n-1). In the loop, it sets another loop from 0 to (a+b). In the inner loop, if 'a' is even and 'b' is odd, it sets 'a' to a+1 and 'b' to b+1. If 'a' is odd and 'b' is even, it sets 'a' to a-1 and 'b' to b-1. After the inner loop, it sets 'n' to 'a' and breaks out of the outer loop. Finally, it prints the value of 'n'.", "answer_id": "UULZ4GLz9j9ot7tN7x3ySD", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 65, "text": "A binary search algorithm can be used to find a specific element in a sorted array by repeatedly splitting the array in half and examining the middle element to determine whether the target element is greater than, less than, or equal to it. If the target element is found, the algorithm immediately returns the index of the element. If it isn\u2019t, the array is split in half, with one half discarded based on the comparison. The process is repeated until the element is found or the array is completely searched.", "answer_id": "Rh8P3bxSJx4p6nCZkSZBrh", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 66, "text": "Implementing a queue using two stacks in Python is relatively simple. First, you need to create two stacks, which will act as the queue. Next, you need to define an enqueue and dequeue method, which will add and remove items from the queue. Finally, you need to create an instance of the queue class and start using it.", "answer_id": "R3aYUUtY46ht4ToU8US96x", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 67, "text": "The program can be implemented using two nested for loops. The inner loop can be used to iterate through the elements of the first array, while the outer loop can be used to iterate through the elements of the second array. If the two elements being compared are the same, the program should mark those elements with a \"found\" flag and move on. Once the inner loop completes, the program should check if the \"found\" flag is set for all elements in the second array. If it is, then the common elements have been found.", "answer_id": "MQJFbH3KoFaSMmPgmK9LPa", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 68, "text": "The value of f(2) is 1.", "answer_id": "dBWqryuzG6BHRdPgrWWpbH", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 69, "text": "x = 6.", "answer_id": "W74byrCbE7XiJWjcPTZS7x", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 70, "text": "The length of the line segment is 8 units.", "answer_id": "7WC5quGKdv8hADCxWaWZ57", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 71, "text": "Yes, I can help you write a formal email to a potential business partner proposing a joint venture. Here's a template that you can use:\nSubject Line: Proposing a Joint Venture\nDear [Name],\nI am writing to propose a joint venture between our two companies. I believe that by combining our resources, we can create a powerful and profitable partnership. Our combined experience and knowledge in the industry will help us to create something unique and special.\nI am confident that our partnership will bring mutual benefits, such as increased profits and access to new markets. Please let me know if you are interested, and we can discuss the details.\nThank you for your time and consideration. I look forward to hearing from you soon.\nSincerely,\n[Your Name]", "answer_id": "AZER7D3RKZ9F9SXHiMjdqM", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 72, "text": "Yes, I can help you write a resignation letter to your current employer. The letter should be written in a polite and professional tone and should express your appreciation for the opportunities and experiences you have gained during your employment. It should also include a clear statement of your intention to resign and the date of your last day. Here is a template you can use:\nDear [Employer],\nI am writing to inform you of my intention to resign from my position as [Position], effective [date]. I have appreciated my time here and I am grateful for the opportunities and experiences I have gained during my employment. \nI thank you for your support and understanding.\nSincerely, [Your Name]", "answer_id": "MSrdDafr77UvSHCnsPMSP3", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 73, "text": "The letter of recommendation should be structured in a formal manner. Begin by introducing the student and explaining your relationship to them. Describe their qualifications, including their academic performance and relevant skills, and mention any particular accomplishments or awards they have received. Explain why the student is a strong candidate for the program and how they will make a positive contribution. End the letter by reaffirming your recommendation and offering your contact information for further questions.", "answer_id": "hxkjUkDkXhGP78Vo74B4WE", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 74, "text": "Dear valued customers, \nWe are excited to announce the launch of our new software solution \u2013 a revolutionary product designed to transform the way you do business! Our new software is an intuitive and powerful tool that can help you streamline processes, save time and money, and increase overall productivity. \nAt [Company Name], we are committed to helping you stay ahead of the competition, and we believe that our new software solution is the perfect tool to help you achieve your goals. Our experienced team of professionals has worked hard to ensure that this product meets the highest standards of quality, and we are confident that it will make a real difference for your business. \nWe invite you to join us in this journey of transformation and discover how our new software can help you achieve your vision. Sign up now and get a free demo to learn more about this revolutionary product. \nThank you for your trust and support.", "answer_id": "hv9jpG9PxeJ9AnFdNzH3Jv", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 75, "text": "mer,\nWe apologize for the delay you experienced with your order and thank you for your patience. We have identified the cause of the issue and have taken steps to ensure it does not happen again. We value your business and hope to be able to serve you better in the future.", "answer_id": "X5BVHT5pFpWxB88SzERrEG", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 76, "text": "The video will explore the history and cultural significance of jazz music. We will begin by discussing its roots in West African cultural music traditions and its journey through the 19th and 20th centuries, focusing on the key musicians, composers, and genres that have defined the genre. We will also explore the impact of jazz on popular culture, such as in film and literature, as well as its influence on other musical styles. Finally, we will examine the legacy of jazz and its continued importance today.", "answer_id": "Af2SufKwvDBriNXnZX9UoP", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 77, "text": "I have composed a travel blog post about my recent trip to Hawaii. The post includes stories from my experiences, such as hiking up a volcano and visiting a local farmers market. I also included plenty of photos to give the reader a sense of place. I highlighted some of the unique cultural experiences, such as an evening spent listening to live Hawaiian music, as well as must-see attractions, like the beautiful beaches and lush rainforests. I ended the post with a personal reflection on the trip and why it was so special to me.", "answer_id": "HVnqVdSGRwQfqSsiYxtTTn", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 78, "text": "I recently watched the science fiction movie \"The Future is Blue\" and it was an incredible cinematic experience. The movie follows the story of a scientist who travels back in time to save the world from a deadly virus. The plot was interesting and engaging, with plenty of twists and turns to keep you guessing. The characters were also well developed and likable, making it easy to get invested in their story. The special effects were stunning and added a lot of visual oomph to the movie. All in all, I highly recommend this movie - it's an exciting and thought-provoking ride from start to finish.", "answer_id": "8hcmmjzyMyhFJ8ZwqJJfvw", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 79, "text": "The podcast episode would begin by introducing the topic of how streaming platforms have changed the music industry. This would include discussing the impact of streaming services like Spotify and Apple Music on music consumption, the effect on musicians' ability to make money, and how streaming has changed the way people find and discover music. The episode would then explore the effects of streaming on the music industry from multiple angles, such as the impact on musicians and songwriters, how streaming has changed the sound of popular music, and the implications for the music industry's future. Finally, the episode would conclude by discussing how streaming has both benefited and hurt the music industry, and what the future holds for music consumption.", "answer_id": "CU6gGfJqQmwZNxEG2n2Uda", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"question_id": 80, "text": "The symphony concert was a wonderful experience. The orchestra played beautifully, with a rich and soulful sound that filled the hall. The audience was captivated, and you could feel the energy of the music in the air. The performance was a perfect balance of classic and modern pieces, and the conductor's passion was contagious. The atmosphere was festive, and the encore performance was a perfect way to end the evening. All in all, it was a brilliant concert and a delight for the ears.", "answer_id": "5SWbCoLV6NRrq5RtWQK4jp", "model_id": "alpaca-13b:v1", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
diff --git a/model/fastchat/eval/table/answer/answer_bard.jsonl b/model/fastchat/eval/table/answer/answer_bard.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..d5233419535a2821277d98ce151c0900a27bec7c
--- /dev/null
+++ b/model/fastchat/eval/table/answer/answer_bard.jsonl
@@ -0,0 +1,80 @@
+{"answer_id": "3oW4JY265ZPJGTYi2CgRYF", "model_id": "bard:20230327", "question_id": 1, "text": "Here are some tips on how to improve your time management skills:\n\n* **Set goals and prioritize your tasks.** What do you need to accomplish today, this week, and this month? Once you know what needs to be done, you can start to prioritize your tasks. Decide which tasks are most important and need to be done first.\n* **Break down large tasks into smaller, more manageable ones.** This will make them seem less daunting and help you to get started.\n* **Set deadlines for yourself and stick to them.** This will help you to stay on track and avoid procrastination.\n* **Take breaks when you need them.** Don't try to work for hours on end without taking a break. Getting up and moving around, or taking a few minutes to relax and clear your head, will help you to stay focused and productive.\n* **Don't be afraid to say no.** If you have too much on your plate, it's okay to say no to new commitments. It's better to focus on the tasks that you already have than to take on too much and end up feeling overwhelmed.\n* **Automate tasks whenever possible.** This will free up your time so that you can focus on more important things.\n* **Get organized.** This will help you to find what you need when you need it and avoid wasting time looking for things.\n* **Take care of yourself.** Make sure to get enough sleep, eat healthy foods, and exercise regularly. This will help you to stay focused and productive.\n\nTime management is a skill that takes time and practice to develop. But by following these tips, you can improve your time management skills and get more done in less time.", "metadata": {}}
+{"answer_id": "CtDuDqypbL958TQork7v54", "model_id": "bard:20230327", "question_id": 2, "text": "There are many effective ways to deal with stress. Some of the most common include:\n\n* **Exercise.** Exercise releases endorphins, which have mood-boosting effects. Aim for at least 30 minutes of moderate-intensity exercise most days of the week.\n* **Eat a healthy diet.** A healthy diet can help improve your overall mood and well-being. Make sure to eat plenty of fruits, vegetables, and whole grains.\n* **Get enough sleep.** Sleep is essential for reducing stress and improving your mood. Aim for 7-8 hours of sleep each night.\n* **Talk to someone.** Talking to a friend, family member, therapist, or counselor can help you process your thoughts and feelings.\n* **Practice relaxation techniques.** Relaxation techniques such as yoga, meditation, and deep breathing can help you calm down and manage stress.\n* **Take a break.** If you're feeling overwhelmed, take a few minutes to step away from the situation and relax. Go for a walk, listen to music, or do something else that you enjoy.\n\nIt's important to find what works best for you and to make time for these activities on a regular basis. Stress is a normal part of life, but it's important to manage it in a healthy way.", "metadata": {}}
+{"answer_id": "jqb5c6eBeyVTFo7SsqxqHA", "model_id": "bard:20230327", "question_id": 3, "text": "Here are some of the main differences between Python and JavaScript programming languages:\n\n* **Python is a general-purpose, high-level programming language, while JavaScript is a text-based, object-oriented programming language.** Python is designed to be easy to read and learn, while JavaScript is more complex.\n* **Python is dynamically typed, while JavaScript is statically typed.** This means that the type of a variable in Python can change at runtime, while the type of a variable in JavaScript is fixed at compile time.\n* **Python has a large standard library, while JavaScript does not.** The standard library contains a wide range of modules that can be used for common tasks, such as file I/O, networking, and data manipulation.\n* **Python is an interpreted language, while JavaScript is a compiled language.** This means that Python code is executed line-by-line by an interpreter, while JavaScript code is converted into machine code before it is executed.\n* **Python is used for a wide range of applications, including web development, data science, and artificial intelligence.** JavaScript is primarily used for web development, but it can also be used for other applications, such as desktop development and mobile development.\n\nUltimately, the best language for you to learn depends on your specific needs and goals. If you are new to programming, Python is a good choice because it is easy to learn and use. If you want to develop web applications, JavaScript is a good choice because it is the most popular language for front-end development.", "metadata": {}}
+{"answer_id": "P5rC8W6gTuD4aY6HR5HQj9", "model_id": "bard:20230327", "question_id": 4, "text": "Here are some tips on how to increase your productivity while working from home:\n\n* **Set up a dedicated workspace.** This will help you stay focused and avoid distractions. If you can, set up your workspace in a quiet area of your home.\n* **Start your day early.** This will give you time to get organized and tackle your most important tasks.\n* **Take breaks.** It's important to take breaks throughout the day to stretch, walk around, or get a snack. This will help you stay refreshed and focused.\n* **Avoid distractions.** This means turning off your phone, closing your email, and anything else that might take your attention away from your work.\n* **Take care of yourself.** Make sure to eat healthy, get enough sleep, and exercise regularly. This will help you stay healthy and productive.\n* **Reward yourself.** When you accomplish a task, give yourself a small reward, such as taking a break or watching a TV show. This will help you stay motivated.\n* **Communicate with your team.** Make sure to stay in touch with your team members and let them know what you're working on. This will help you stay on track and avoid duplication of effort.\n* **Set goals.** Having goals will help you stay focused and motivated. Make sure your goals are specific, measurable, achievable, relevant, and time-bound.\n* **Track your progress.** It's important to track your progress so you can see how you're doing. This will help you stay motivated and make adjustments as needed.\n* **Be patient.** Working from home can be a challenge, but it's important to be patient with yourself. It takes time to adjust to a new way of working.\n\nFollowing these tips can help you increase your productivity while working from home.", "metadata": {}}
+{"answer_id": "3uaqwfbwxtyDdqB8UVN3jM", "model_id": "bard:20230327", "question_id": 5, "text": "Quantum computing is a field of computer science that deals with the development of quantum algorithms and quantum computers. Quantum algorithms are algorithms that can be executed by quantum computers, and quantum computers are computers that can execute quantum algorithms.\n\nQuantum computing is based on the principles of quantum mechanics, which is the study of the behavior of matter and energy at the atomic and subatomic level. Quantum mechanics is a very different way of thinking about the world than classical mechanics, which is the study of the behavior of matter and energy at the macroscopic level.\n\nOne of the key concepts in quantum mechanics is superposition. Superposition means that a quantum system can exist in multiple states at the same time. For example, an electron can be in a state of superposition where it is both a wave and a particle.\n\nAnother key concept in quantum mechanics is entanglement. Entanglement means that two quantum systems can be linked together in such a way that they share the same fate, even if they are separated by a large distance.\n\nQuantum computing is still in its early stages of development, but it has the potential to revolutionize many fields, including cryptography, chemistry, and materials science.\n\nHere are some of the basics of quantum computing:\n\n* Quantum bits, or qubits, are the basic unit of information in quantum computing. Qubits can be in a superposition of states, meaning they can be both 0 and 1 at the same time.\n* Quantum gates are the basic operations that can be performed on qubits. Quantum gates can be used to create superpositions, entangle qubits, and measure qubits.\n* Quantum algorithms are algorithms that can be executed by quantum computers. Quantum algorithms can be used to solve problems that are difficult or impossible to solve with classical computers.\n* Quantum computers are computers that can execute quantum algorithms. Quantum computers are still in their early stages of development, but they have the potential to revolutionize many fields, including cryptography, chemistry, and materials science.", "metadata": {}}
+{"answer_id": "HTRJ5q97DrgzoSofxkgedz", "model_id": "bard:20230327", "question_id": 6, "text": "The main difference between plant-based and animal-based protein sources is that plant-based protein sources do not contain all nine essential amino acids, while most meat proteins do. (Most plant-based protein sources tend to lack at least 1 of the essential amino acids, although there are some that are considered complete.)\n\nAnimal proteins are considered complete proteins, meaning they contain all nine essential amino acids that the body cannot produce on its own. Plant proteins, on the other hand, are often incomplete proteins, meaning they do not contain all nine essential amino acids. However, there are some plant-based protein sources that are considered complete, such as quinoa and buckwheat.\n\nAnimal proteins also tend to be higher in saturated fat and cholesterol than plant proteins. Plant proteins are lower in saturated fat and cholesterol, and they also contain fiber, which can help to lower cholesterol levels.\n\nThere are several benefits to eating plant-based protein sources. Plant-based protein sources are often lower in calories and fat than animal protein sources. They are also a good source of fiber, which can help to keep you feeling full and satisfied. Plant-based protein sources can also help to lower cholesterol levels and reduce the risk of heart disease.\n\nThere are also some benefits to eating animal-based protein sources. Animal protein sources are a good source of B vitamins, iron, and zinc. They can also help to build muscle mass and strength. However, it is important to choose lean cuts of meat and poultry to reduce saturated fat and cholesterol intake.\n\nUltimately, the best type of protein for you depends on your individual needs and preferences. If you are looking for a diet that is low in calories and fat, plant-based protein sources are a good option. If you are looking for a diet that is high in protein and B vitamins, animal protein sources are a good option.", "metadata": {}}
+{"answer_id": "EhhyKNc3m8c9tnxm8uPfmt", "model_id": "bard:20230327", "question_id": 7, "text": "Here are some tips on how to develop your critical thinking skills:\n\n* **Be open to new ideas and information.** Don't be afraid to question everything, even your own beliefs.\n* **Be curious and ask questions.** The more you learn, the better equipped you'll be to think critically about the information you're presented with.\n* **Be skeptical of information that seems too good to be true.** If something seems too good to be true, it probably is.\n* **Be aware of your own biases.** We all have biases, but it's important to be aware of them so that they don't cloud your judgment.\n* **Be able to identify and evaluate arguments.** When you're presented with an argument, be able to identify the premises and the conclusion, and evaluate whether the premises support the conclusion.\n* **Be able to think creatively and solve problems.** Critical thinking is not just about analyzing information, but also about being able to come up with new ideas and solutions to problems.\n* **Practice regularly.** The more you practice critical thinking, the better you'll become at it.\n\nThere are many ways to develop your critical thinking skills. You can read books and articles on the subject, take classes, or even just spend time thinking about the world around you. The important thing is to be willing to learn and grow.", "metadata": {}}
+{"answer_id": "JQXPknRQd24NUPKZxi6RNf", "model_id": "bard:20230327", "question_id": 8, "text": "The education sector faces many challenges today, including:\n\n* **Funding:** Schools are often underfunded, which can lead to a lack of resources and support for students and teachers.\n* **Class size:** Classrooms are often overcrowded, which can make it difficult for teachers to give individual students the attention they need.\n* **Teacher shortages:** There is a shortage of qualified teachers in many areas, which can lead to schools hiring teachers who are not adequately prepared.\n* **Student achievement:** The achievement gap between students from different socioeconomic backgrounds persists, which means that some students are not getting the education they need to succeed.\n* **School safety:** School shootings and other acts of violence have become more common, which has made schools a less safe place for students and staff.\n* **Technology:** Technology is changing the way we learn, but it also presents new challenges for schools and educators.\n* **Charter schools and voucher programs:** Charter schools and voucher programs have become more popular in recent years, which has led to competition between public and private schools.\n\nThese are just some of the challenges that the education sector faces today. It is important to address these challenges in order to ensure that all students have access to a quality education.", "metadata": {}}
+{"answer_id": "Lb3C2xQKdLCqFj4v3rmaof", "model_id": "bard:20230327", "question_id": 9, "text": "There are many factors that influence consumer behavior, but some of the primary ones include:\n\n* **Personal factors:** These factors include a person's age, gender, income, occupation, and lifestyle. For example, a young person with a high income may be more likely to spend money on luxury items, while an older person with a lower income may be more likely to spend money on necessities.\n* **Psychological factors:** These factors include a person's motivation, perception, learning, attitudes, and beliefs. For example, a person who is motivated by a desire for self-improvement may be more likely to buy a product that they believe will help them achieve their goals.\n* **Social factors:** These factors include a person's family, friends, and social class. For example, a person who is influenced by their family may be more likely to buy a product that they believe their family will approve of.\n* **Cultural factors:** These factors include a person's culture, subculture, and social class. For example, a person who is from a culture that values luxury may be more likely to buy a product that is considered to be a luxury item.\n* **Economic factors:** These factors include a person's income, savings, and debt. For example, a person who has a high income may be more likely to buy a product that is expensive.\n\nIt is important to note that these factors are not always the same for everyone. A person's individual circumstances and preferences will also play a role in their consumer behavior.", "metadata": {}}
+{"answer_id": "DhuZJtL3jhnrsTBvDct9oV", "model_id": "bard:20230327", "question_id": 10, "text": "There are many effective strategies for conflict resolution in the workplace. Some of the most common include:\n\n* **Communication:** The first step in resolving conflict is to communicate effectively with all parties involved. This means listening to each other's perspectives, sharing your own feelings and needs, and being open to compromise.\n* **Active listening:** Active listening is a key skill in conflict resolution. It involves paying attention to what the other person is saying, both verbally and nonverbally, and trying to understand their point of view.\n* **Problem-solving:** Once you have a better understanding of the conflict, you can start to work on solutions. This may involve brainstorming ideas, negotiating compromises, or finding ways to meet everyone's needs.\n* **Time-out:** If things start to get heated, it may be helpful to take a time-out. This gives everyone a chance to cool down and collect their thoughts.\n* **Mediation:** If you are unable to resolve the conflict on your own, you may want to consider mediation. This is a process in which a neutral third party helps you and the other person to come to a resolution.\n* **Arbitration:** If mediation is unsuccessful, you may want to consider arbitration. This is a process in which a neutral third party makes a decision about how to resolve the conflict.\n\nIt is important to remember that there is no one-size-fits-all approach to conflict resolution. The best strategy will vary depending on the specific situation. However, by following these general tips, you can increase the chances of resolving conflict peacefully and effectively.", "metadata": {}}
+{"answer_id": "mDSj4BKim2eANUnEjW7xBm", "model_id": "bard:20230327", "question_id": 11, "text": "The use of single-use plastic bottles has a number of negative environmental and health implications.\n\nEnvironmentally, single-use plastic bottles contribute to the problem of plastic pollution. Every year, millions of plastic bottles end up in landfills, incinerators, or the ocean. These bottles can take hundreds of years to decompose, and they can leach harmful chemicals into the environment.\n\nHealth-wise, the use of single-use plastic bottles can increase the risk of exposure to harmful chemicals. These chemicals can leach into the water from the bottle, or they can be released into the air when the bottle is heated. Exposure to these chemicals can cause a number of health problems, including cancer, reproductive problems, and neurological problems.\n\nUsing a reusable bottle instead of a single-use plastic bottle can help to reduce the environmental and health impacts of plastic pollution. Reusable bottles are made from materials that are less harmful to the environment, and they can be used over and over again. This helps to reduce the amount of plastic waste that ends up in landfills, incinerators, or the ocean.\n\nReusable bottles are also better for human health. They do not leach harmful chemicals into the water, and they do not release chemicals into the air when they are heated. This helps to reduce the risk of exposure to harmful chemicals.\n\nOverall, using a reusable bottle instead of a single-use plastic bottle is a better choice for both the environment and human health.", "metadata": {}}
+{"answer_id": "MnkceSK7WwyXqAhbuKVYX7", "model_id": "bard:20230327", "question_id": 12, "text": "There are many factors to consider when designing an inclusive and accessible public transportation system. Some of the most important factors include:\n\n* **Accessibility**. The system must be accessible to people with disabilities, including those who use wheelchairs, mobility scooters, or other assistive devices. This means that all aspects of the system, including the vehicles, the stations, and the signage, must be designed to be accessible.\n* **Affordability**. The system must be affordable for all users, including low-income and marginalized communities. This means that the fares must be reasonable and that there must be a variety of payment options available.\n* **Convenience**. The system must be convenient to use, with frequent service and easy connections between different modes of transportation. This means that the system must be well-integrated with other transportation options, such as car sharing and bike lanes.\n* **Safety**. The system must be safe for all users, including women and children. This means that the vehicles must be well-maintained and that there must be adequate security measures in place.\n* **Sustainability**. The system must be sustainable, both environmentally and economically. This means that the system must use renewable energy sources and that it must be affordable to operate.\n\nThese are just some of the factors that must be considered when designing an inclusive and accessible public transportation system. By taking these factors into account, we can create a system that is accessible to everyone and that meets the needs of all communities.", "metadata": {}}
+{"answer_id": "EsyaBVpTN8BGbTSiFMnZUF", "model_id": "bard:20230327", "question_id": 13, "text": "Fiscal and monetary policies can be used by governments to combat economic recessions in a number of ways.\n\n* **Fiscal policy** refers to the use of government spending and taxation to influence the economy. During a recession, governments can use fiscal policy to increase demand and stimulate the economy. This can be done by increasing government spending on infrastructure, education, and healthcare. Governments can also reduce taxes to give consumers more money to spend.\n* **Monetary policy** refers to the use of interest rates and the money supply to influence the economy. During a recession, central banks can use monetary policy to lower interest rates and make it cheaper for businesses to borrow money. This can encourage businesses to invest and expand, which can create jobs and boost the economy.\n\nBoth fiscal and monetary policies can be effective in combatting economic recessions. However, the effectiveness of these policies depends on a number of factors, including the severity of the recession, the underlying causes of the recession, and the effectiveness of the policies themselves.\n\nIn recent years, there has been a debate about the best way to use fiscal and monetary policies to combat economic recessions. Some economists argue that fiscal policy should be the primary tool used to combat recessions, while others argue that monetary policy should be the primary tool. There is no single answer to this question, as the best policy approach will vary depending on the specific circumstances.\n\nHowever, both fiscal and monetary policies can be effective in combatting economic recessions, and governments should use both tools to help stimulate the economy and create jobs.", "metadata": {}}
+{"answer_id": "dX8M752A6tzqLg9KhwgG5p", "model_id": "bard:20230327", "question_id": 14, "text": "Language and cultural barriers can affect the way people communicate and form relationships in multicultural societies in a number of ways.\n\n* **Language barriers** can make it difficult for people to understand each other's words and meanings. This can lead to misunderstandings and frustration, and can make it difficult to build trust and rapport.\n* **Cultural barriers** can make it difficult for people to understand each other's values, beliefs, and customs. This can lead to misunderstandings and conflict, and can make it difficult to build strong relationships.\n* **Language and cultural barriers** can also make it difficult for people to feel comfortable and included in multicultural societies. This can lead to feelings of isolation and alienation, and can make it difficult to participate fully in the community.\n\nThere are a number of things that can be done to overcome language and cultural barriers in multicultural societies. These include:\n\n* **Learning about other cultures**. This can help people to understand and appreciate different cultures, and can make it easier to build relationships with people from other cultures.\n* **Learning about other languages**. This can help people to communicate effectively with people from other cultures, and can help to build trust and rapport.\n* **Building bridges**. This can help to bring people from different cultures together, and can help to create a more inclusive and supportive environment.\n* **Celebrating diversity**. This can help to promote understanding and appreciation for different cultures, and can help to create a more vibrant and exciting community.", "metadata": {}}
+{"answer_id": "dzwhq5XbaEBVpargyczz9B", "model_id": "bard:20230327", "question_id": 15, "text": "Artificial intelligence (AI) can be used to improve the quality and efficiency of healthcare delivery in a number of ways. For example, AI can be used to:\n\n* **Diagnose diseases:** AI-powered algorithms can be trained to analyze large amounts of medical data, such as patient records and medical images, to identify patterns that may indicate disease. This can help doctors to diagnose diseases more accurately and quickly, which can lead to better patient outcomes.\n* **Treat diseases:** AI can also be used to develop new treatments for diseases. For example, AI-powered algorithms can be used to screen large libraries of compounds for potential drug candidates. This can help to speed up the drug discovery process and bring new drugs to market more quickly.\n* **Monitor patients:** AI can be used to monitor patients for signs of disease progression or complications. For example, AI-powered algorithms can be used to analyze data from wearable devices, such as smartwatches and fitness trackers, to identify changes in heart rate, sleep patterns, and other vital signs. This can help doctors to identify problems early on and intervene before they become serious.\n* **Provide administrative support:** AI can also be used to provide administrative support to healthcare providers. For example, AI-powered chatbots can be used to answer patient questions, schedule appointments, and process insurance claims. This can free up healthcare providers to spend more time with patients and focus on providing care.\n\nOverall, AI has the potential to revolutionize healthcare delivery by improving the quality and efficiency of care. By automating tasks and providing support to healthcare providers, AI can help to free up time and resources, which can lead to better patient outcomes.", "metadata": {}}
+{"answer_id": "8zqxUtHxgtoHBkbf2bkqNW", "model_id": "bard:20230327", "question_id": 16, "text": "CRISPR-Cas9 is a gene editing technology that can be used to make precise changes to DNA. It works by cutting DNA at a specific location and then allowing the cell's natural DNA repair mechanisms to take over. This can be used to delete, add, or replace genes.\n\nCRISPR-Cas9 has a wide range of potential applications, including:\n\n* Treating genetic diseases: CRISPR-Cas9 could be used to correct genetic mutations that cause diseases such as cystic fibrosis, sickle cell anemia, and Huntington's disease.\n* Improving crop yields: CRISPR-Cas9 could be used to make crops more resistant to pests and diseases, or to increase the nutritional content of crops.\n* Creating new biofuels: CRISPR-Cas9 could be used to engineer bacteria that can produce biofuels from waste materials.\n\nHowever, CRISPR-Cas9 also raises ethical concerns, such as:\n\n* The potential to create designer babies: CRISPR-Cas9 could be used to edit the genes of human embryos, potentially creating \"designer babies\" with desired traits.\n* The potential to create genetically modified organisms (GMOs): CRISPR-Cas9 could be used to edit the genes of plants and animals, creating GMOs that some people believe could pose risks to human health or the environment.\n* The potential to create new weapons: CRISPR-Cas9 could be used to create new biological weapons that could be used to harm or kill people.\n\nIt is important to weigh the potential benefits and risks of CRISPR-Cas9 before using it.", "metadata": {}}
+{"answer_id": "WJc37t4n5PqmKKS3V4eMG2", "model_id": "bard:20230327", "question_id": 17, "text": "Vaccines work by teaching your body's immune system how to recognize and fight off a disease. When you get a vaccine, your body produces antibodies against the disease. These antibodies will stay in your blood and help protect you if you are ever exposed to the disease again.\n\nVaccinations are very effective at preventing diseases. For example, the measles vaccine is 97% effective at preventing measles, and the mumps vaccine is 88% effective at preventing mumps.\n\nVaccinations also help protect communities from infectious diseases. When a large number of people in a community are vaccinated, it makes it harder for the disease to spread. This is because even if a few people in the community are infected, the disease will not be able to spread to as many people because most people are immune. This is called herd immunity.\n\nHerd immunity is important because it can help protect people who cannot be vaccinated, such as young children, pregnant women, and people with certain medical conditions. These people are at increased risk of getting sick if they are exposed to a disease.\n\nHerd immunity can also help protect people who are vaccinated but whose immune system does not respond well to the vaccine. These people may still get sick if they are exposed to the disease, but they are less likely to get seriously ill.\n\nVaccination is one of the most important ways to protect yourself and your community from infectious diseases. If you are not sure whether you are up-to-date on your vaccinations, talk to your doctor.", "metadata": {}}
+{"answer_id": "CvVLf8FgoHywJy8j8JJ4qL", "model_id": "bard:20230327", "question_id": 18, "text": "Social media platforms have a significant influence on the way people consume and share news. They provide a platform for people to share news stories with their friends and followers, and they also allow users to curate their own news feeds. This can lead to people being exposed to a wider range of news stories than they would otherwise be, but it can also make it more difficult for them to distinguish between legitimate news sources and fake news.\n\nSocial media platforms also use algorithms to determine what news stories are shown to users. These algorithms are designed to keep users engaged on the platform, and they often prioritize news stories that are likely to get a lot of attention, such as those that are sensational or controversial. This can lead to people being exposed to a lot of news stories that are not necessarily accurate or reliable.\n\nThe spread of misinformation on social media is a serious problem. Fake news stories can be designed to look like they are from legitimate news sources, and they can spread quickly and easily on social media. This can have a significant impact on public opinion, and it can also make it more difficult for people to trust the information they see online.\n\nThere are a number of things that can be done to combat the spread of misinformation on social media. One is to educate people about how to identify fake news stories. Another is to make it easier for people to report fake news stories. And finally, social media platforms can do more to remove fake news stories from their platforms.\n\nIt is important to remember that social media is not the only way that people get news. Many people still get their news from traditional sources, such as newspapers and television. However, social media is becoming an increasingly important part of how people consume news, and it is important to understand how it is changing the way people consume and share news.", "metadata": {}}
+{"answer_id": "P5rytR6vTJjxgWxRoxT3vX", "model_id": "bard:20230327", "question_id": 19, "text": "Cultural, social, and economic factors influence people's food choices in many ways. Here are a few examples:\n\n* **Culture** plays a role in food choices by shaping what people consider to be acceptable or desirable to eat. For example, some cultures have strong taboos against eating certain foods, such as pork or beef.\n* **Social factors** such as family, friends, and peers can also influence food choices. People are more likely to eat foods that are popular among their social group.\n* **Economic factors** such as the cost of food and the availability of healthy food options can also affect food choices. People who are on a tight budget may choose to eat less expensive, less healthy foods.\n\nThis knowledge can be used to promote healthier diets by making healthy foods more accessible and affordable, and by educating people about the importance of eating a healthy diet. For example, governments can provide subsidies for healthy foods, or they can build more grocery stores in underserved communities. Schools can also teach students about nutrition and healthy eating habits.\n\nIn addition, individuals can make healthier food choices by being more mindful of the cultural, social, and economic factors that influence their decisions. For example, people can try to eat more foods that are culturally appropriate, and they can avoid foods that are not healthy simply because they are popular. People can also try to eat more healthy foods that are affordable, and they can make healthy food choices even when they are on a tight budget.\n\nBy making healthy food choices, people can improve their overall health and well-being.", "metadata": {}}
+{"answer_id": "5biCd7QRZP6rquaz8eC9Vm", "model_id": "bard:20230327", "question_id": 20, "text": "Natural selection is the process by which organisms with favorable traits survive and reproduce, passing on those traits to their offspring. This process leads to the evolution of species over time.\n\nNatural selection is based on the following principles:\n\n* Variation: There is variation within a population of organisms. This variation can be caused by genetic mutation, environmental factors, or both.\n* Differential reproduction: Not all organisms survive and reproduce equally. Some organisms are more likely to survive and reproduce than others.\n* Heredity: The traits that an organism inherits from its parents are passed down to its offspring.\n\nNatural selection can lead to the evolution of new species. For example, if a population of giraffes is living in an environment with a lot of tall trees, the giraffes with longer necks will be more likely to survive and reproduce. This is because they will be able to reach the leaves on the taller trees. Over time, the giraffes with longer necks will become more common in the population. This is an example of natural selection leading to the evolution of a new species, in this case, giraffes with longer necks.\n\nNatural selection can also lead to the adaptation of species to their environment. For example, if a population of fish is living in an environment with a lot of predators, the fish with darker colors will be more likely to survive and reproduce. This is because they will be less visible to the predators. Over time, the fish with darker colors will become more common in the population. This is an example of natural selection leading to the adaptation of a species to its environment.\n\nNatural selection is a powerful mechanism that can lead to the evolution and adaptation of species. It is a key part of the process of life on Earth.", "metadata": {}}
+{"answer_id": "363RwB6kr8nV6qFNdjXZnS", "model_id": "bard:20230327", "question_id": 21, "text": "I would introduce myself as follows:\n\n\"I am Sir [your name], a knight in the service of [your lord's name]. I have come to this banquet to pay my respects to your majesty and to enjoy your hospitality.\"\n\nI would then bow to the king or queen and take my seat at the table. If I was asked to speak, I would give a brief account of my deeds and accomplishments.\n\nI would be careful to avoid anything that could be seen as boastful or arrogant. I would also be mindful of the customs and etiquette of the time, and make sure to behave in a way that would be considered appropriate.", "metadata": {}}
+{"answer_id": "gDnYxMu5Dd52xhMqQAJaZP", "model_id": "bard:20230327", "question_id": 22, "text": "Here is what I would say to my crew to motivate them to search for hidden treasure:\n\n\"Mates, I know you're all tired and hungry, but we have a chance to find something that will make us all rich beyond our wildest dreams. There's a treasure buried out there, and I know we can find it. We just need to keep our heads up and keep searching. I believe in you, and I know you can do this. So let's get to it!\"\n\nI would also try to appeal to their sense of adventure and excitement. I would tell them about the stories I've heard of other pirates who have found great treasure, and how they have used it to live a life of luxury and adventure. I would tell them that this could be their chance to live that life too.\n\nFinally, I would try to motivate them by offering them a share of the treasure. I would tell them that if we find the treasure, they will all be rich beyond their wildest dreams. I would tell them that they will be able to buy anything they want, and that they will never have to worry about money again.\n\nI believe that if I can motivate my crew with these words, they will be more likely to search for the hidden treasure.", "metadata": {}}
+{"answer_id": "kCV5RSrnmmTyv3HA5oU38P", "model_id": "bard:20230327", "question_id": 23, "text": "If I were a Shakespearean character, I would declare my love for someone in a soliloquy by speaking from the heart and expressing my true feelings. I would use poetic language and imagery to describe my love, and I would convey the depth of my emotion. I would also be sure to include a declaration of my love, so that my beloved would know how I feel.\n\nHere is an example of how I might declare my love in a soliloquy:\n\nMy dearest, I love you with all my heart.\nYou are the sun that lights my days,\nAnd the moon that guides me through the night.\nYou are the stars that sparkle in my eyes,\nAnd the rain that washes away my tears.\nYou are the wind that fills my sails,\nAnd the earth that grounds me.\nYou are my everything,\nAnd I will love you forever.\n\nI hope this helps!", "metadata": {}}
+{"answer_id": "CTGLYNAWrWeuCYZAR9bD2r", "model_id": "bard:20230327", "question_id": 24, "text": "I would tell the child that I was once a regular person, just like them. I went to school, played games, and had friends. But one day, I got into an accident. I was hit by a car and almost died. But instead of dying, I got superpowers! I can fly, I can lift heavy things, and I can even shoot lasers out of my eyes! I use my powers to help people and make the world a better place.\n\nI would also tell the child that I was inspired by other superheroes, like Superman and Batman. They showed me that even though bad things happen in the world, there are always people who are willing to help others. I wanted to be one of those people, so I decided to use my powers to make a difference.\n\nI would hope that the child would be inspired by my story and would want to use their own powers to help others. I would tell them that even though they don't have superpowers, they can still make a difference in the world by being kind to others, helping out when they can, and standing up for what they believe in.", "metadata": {}}
+{"answer_id": "Eh5qCENdftYf9JKrBspYna", "model_id": "bard:20230327", "question_id": 25, "text": "As a time traveler from the year 3000, I would tell people about the following technological advancements:\n\n* **Dyson spheres:** Dyson spheres are theoretical megastructures that would surround a star and harness its energy. They are still in the theoretical stage, but scientists believe that they could be built in the next few centuries.\n* **Interstellar travel:** Interstellar travel is the ability to travel between stars and planets. It is currently impossible with our current technology, but scientists are working on developing new technologies that could make it possible in the future.\n* **Artificial intelligence:** Artificial intelligence (AI) is a branch of computer science that deals with the creation of intelligent agents, which are systems that can reason, learn, and act autonomously. AI has made great progress in recent years, and it is likely to continue to advance in the future.\n* **Robotics:** Robotics is the field of engineering that deals with the design, construction, operation, and application of robots. Robots are machines that can be programmed to perform a variety of tasks. They are already used in a variety of industries, and they are likely to become even more common in the future.\n* **Gene editing:** Gene editing is the ability to modify the genes of an organism. This technology has the potential to cure genetic diseases, improve crop yields, and even create new species of plants and animals.\n* **Virtual reality:** Virtual reality (VR) is a computer-generated simulation that can be similar to or completely different from the real world. VR is already being used for a variety of purposes, such as training, entertainment, and education. It is likely to become even more common in the future.\n\nThese are just a few of the technological advancements that I would tell people about. I am sure that there will be many other advancements that I cannot even imagine.", "metadata": {}}
+{"answer_id": "M6hCe8Z4JDpt75HZ5PsSWr", "model_id": "bard:20230327", "question_id": 26, "text": "Here is a possible description of the winning play in the final seconds of a championship game:\n\n\"With only seconds remaining in the game, the score is tied. The ball is on the opposing team's 40-yard line, and they have one last chance to score. The quarterback takes the snap and drops back to pass. He looks downfield and sees his wide receiver open. He throws the ball, and the wide receiver catches it in stride. He runs into the end zone for a touchdown! The crowd goes wild as the team celebrates their victory.\"\n\nThis is just one example of how a winning play in the final seconds of a championship game could be described. There are many other ways to describe the play, and the specific details of the play would depend on the specific game.", "metadata": {}}
+{"answer_id": "QCDsmbALfut5758pyiKvc5", "model_id": "bard:20230327", "question_id": 27, "text": "My signature dish is a pan-seared filet mignon wrapped in prosciutto and topped with a mushroom duxelles. It is served with a side of roasted potatoes and asparagus.\n\nThe filet mignon is cooked to perfection, and the prosciutto adds a salty, smoky flavor. The mushroom duxelles is a rich and flavorful sauce that pairs perfectly with the beef. The roasted potatoes and asparagus are a delicious and healthy accompaniment.\n\nThis dish is a true labor of love. I take great care in selecting the ingredients and preparing the dish. I want my guests to experience the best of what I have to offer.\n\nI am confident that you will enjoy this dish as much as I do. It is a perfect combination of flavors and textures that will leave you feeling satisfied.\n\nI hope you will consider my dish for your next competition. I believe it is a dish that would stand out from the competition and impress your judges.", "metadata": {}}
+{"answer_id": "NWUbhwZQCuXsuQimrjQRza", "model_id": "bard:20230327", "question_id": 28, "text": "I stand at the top of the world, my breath coming in ragged gasps. The air is thin and cold, but I don't care. I'm on top of Mount Everest, the highest point on Earth. I've made it.\n\nI look around at the vast expanse of snow and ice. The sun is just starting to rise, and the sky is a brilliant blue. The mountains around me are like giant sleeping giants.\n\nI feel a sense of awe and wonder. I've never seen anything like this before. It's so beautiful, so peaceful.\n\nI take a deep breath and savor the moment. I've worked so hard for this, and it's all worth it.\n\nI know that I'll never forget this feeling. It's the feeling of accomplishment, of reaching a goal, of overcoming adversity. It's the feeling of being alive.\n\nI turn and start my descent. I know that I'll never be able to stay at the top of the world forever. But I'll always remember this moment.", "metadata": {}}
+{"answer_id": "VYwSjZrSLW9ZSvqryyjEaB", "model_id": "bard:20230327", "question_id": 29, "text": "As a space colonist on Mars, my daily life is a constant struggle for survival. The challenges I face are many and varied, but the most pressing are the lack of oxygen, the extreme cold, and the radiation.\n\nI wake up each morning in a pressurized habitat, where I eat a quick breakfast and then get ready for the day's work. My first task is to check the air filters, as they are constantly being clogged by the dust and sand. Once that is done, I start my work on the crops. We have been able to grow some plants in the greenhouse, but they are not enough to sustain us. We are constantly on the lookout for new ways to grow food, as we know that our survival depends on it.\n\nAfter a long day of work, I return to the habitat for dinner and a few hours of relaxation. I am always tired, as the work is physically and mentally demanding. But I know that I have to keep going, as I am part of something bigger than myself. I am part of the first generation of humans to live on another planet, and that is an honor.\n\nThe challenges I face are many, but I am determined to overcome them. I know that the future of humanity depends on it.\n\nHere are some of the challenges I face as a space colonist on Mars:\n\n* **Lack of oxygen.** The atmosphere on Mars is very thin, and it contains mostly carbon dioxide. This means that I have to wear a pressurized suit whenever I go outside, and I have to bring my own oxygen with me.\n* **Extreme cold.** The average temperature on Mars is -63 degrees Celsius. This means that I have to wear multiple layers of clothing, and I have to keep my habitat heated.\n* **Radiation.** The surface of Mars is constantly bombarded by radiation from the sun and from space. This radiation can cause cancer, so I have to wear special shielding to protect myself.\n* **Dust storms.** The surface of Mars is very dusty, and there are frequent dust storms. These dust storms can block out the sun and make it difficult to breathe.\n* **Isolation.** Mars is very far away from Earth. This means that I am cut off from my family and friends, and I have to rely on my fellow colonists for support.\n\nDespite these challenges, I am determined to make a life for myself on Mars. I know that it will not be easy, but I am confident that I can overcome the challenges I face.", "metadata": {}}
+{"answer_id": "FA7PXuUbEVGKHaWpxaimy8", "model_id": "bard:20230327", "question_id": 30, "text": "I am a survivor in a post-apocalyptic world. I have been living on my own for the past few years, and I have learned to be resourceful and self-sufficient. I have a small garden where I grow my own food, and I have a few chickens that I raise for eggs. I also scavenge for food and supplies when I can.\n\nI have made a few allies in this world. There are a few other survivors who live in the area, and we help each other out when we can. We share food and supplies, and we look out for each other's safety. We are all trying to survive in this new world, and we know that we are stronger together.\n\nOne of my allies is a young woman named Sarah. She is a skilled hunter and gatherer, and she has taught me a lot about living off the land. She is also a kind and compassionate person, and she has always been there for me when I needed her.\n\nAnother one of my allies is a man named John. He is a former soldier, and he is a skilled fighter and tactician. He has taught me a lot about self-defense, and he has also helped me to develop my leadership skills. He is a loyal and trustworthy friend, and I am grateful to have him in my life.\n\nI am grateful for my allies, and I know that I am lucky to have them in my life. We are all trying to survive in this new world, and we know that we are stronger together.", "metadata": {}}
+{"answer_id": "j5EV5cZNsn9DcF6WsvXRzS", "model_id": "bard:20230327", "question_id": 31, "text": "Here are some ways to determine if a restaurant is popular among locals or mainly attracts tourists:\n\n* **Check the restaurant's website or social media pages.** If the restaurant has a lot of reviews and comments from locals, that's a good sign that it's popular among locals.\n* **Look for the restaurant's name in local publications or websites.** If the restaurant is mentioned in local newspapers or magazines, that's another sign that it's popular among locals.\n* **Ask locals for recommendations.** If you're looking for a good place to eat, ask locals for their recommendations. They're likely to know the best places to eat in their area.\n* **Look for the restaurant's location.** If the restaurant is located in a tourist area, that's a good sign that it mainly attracts tourists. However, if the restaurant is located in a residential area, that's a good sign that it's popular among locals.\n* **Look for the restaurant's atmosphere.** If the restaurant has a casual atmosphere, that's a good sign that it's popular among locals. However, if the restaurant has a formal atmosphere, that's a good sign that it mainly attracts tourists.\n\nKnowing whether a restaurant is popular among locals or mainly attracts tourists can be useful for a number of reasons. For example, if you're looking for a good place to eat that's authentic to the local culture, you'll want to find a restaurant that's popular among locals. On the other hand, if you're looking for a good place to eat that's convenient to your hotel, you'll want to find a restaurant that's popular among tourists.", "metadata": {}}
+{"answer_id": "2eAYCYmwTkPa3ejQDv8LyB", "model_id": "bard:20230327", "question_id": 32, "text": "There are a few subtle clues that suggest someone is pretending to understand a topic or conversation when they are actually confused or uninformed. Here are a few examples:\n\n* Using vague or general terms without providing any specific examples.\n* Asking questions that are already answered in the conversation.\n* Making statements that don't seem to be relevant to the topic.\n* Not making eye contact or appearing uncomfortable.\n* Fidgeting or making other nervous gestures.\n* Trying to change the subject or end the conversation quickly.\n\nIf you notice someone exhibiting these behaviors, it's possible that they are pretending to understand something they don't. If you're not sure, it's always best to err on the side of caution and ask them to clarify what they mean.", "metadata": {}}
+{"answer_id": "d562WYnhsvgJ8J6Ubitmvw", "model_id": "bard:20230327", "question_id": 33, "text": "There are a few reasons why someone might choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app:\n\n* **They may not have a smartphone or GPS device.** Not everyone has a smartphone or GPS device, especially if they are traveling to a developing country or rural area.\n* **They may not have a reliable internet connection.** Even if you have a smartphone or GPS device, you may not have a reliable internet connection to use them. This can be a problem in areas with poor cell phone reception or where internet access is expensive.\n* **They may not want to use their battery life.** Using a smartphone or GPS device can drain your battery life quickly, especially if you are using them for navigation. If you are trying to conserve battery life, you may prefer to use a paper map or ask for directions.\n* **They may prefer the tactile experience of using a paper map.** Some people simply prefer the tactile experience of using a paper map. They may find it more satisfying to fold and unfold a map, and they may find it easier to visualize their route when they can see it laid out on paper.\n* **They may prefer to interact with other people.** When you ask for directions, you are interacting with another person. This can be a good way to meet new people and learn about the local culture.\n\nIn addition, there are some specific situations where using a paper map or asking for directions may be the best option:\n\n* **If you are hiking or camping.** If you are hiking or camping in an area with no cell phone reception, you will need to use a paper map or ask for directions from other hikers or campers.\n* **If you are driving in a remote area.** If you are driving in a remote area with no cell phone reception, you will need to use a paper map or ask for directions from other drivers.\n* **If you are traveling to a country with a different language.** If you are traveling to a country with a different language, you may not be able to use a smartphone or GPS device to get directions. In this case, you will need to ask for directions from someone who speaks your language.", "metadata": {}}
+{"answer_id": "hPMvV6zL2C4qTP4mRmhJwG", "model_id": "bard:20230327", "question_id": 34, "text": "Here are some tips on how to determine if a person is genuinely interested in a conversation or simply being polite:\n\n* **Pay attention to their body language.** If they are leaning in towards you, making eye contact, and nodding their head, they are likely interested in what you have to say. If they are fidgeting, checking their phone, or looking around the room, they may not be as engaged.\n* **Listen to their questions.** If they are asking questions about you and your interests, they are likely interested in getting to know you. If they are only asking questions about the topic of the conversation, they may not be as engaged.\n* **Notice how they respond to your answers.** If they are attentively listening to your answers and asking follow-up questions, they are likely interested in what you have to say. If they are not responding to your answers or changing the topic of the conversation, they may not be as interested.\n* **Trust your gut.** If you feel like someone is not being genuine, they probably are not. If you feel like someone is genuinely interested in you and the conversation, they probably are.\n\nIt is important to remember that everyone is different and there is no one-size-fits-all answer to this question. However, by paying attention to these tips, you can get a good sense of whether someone is genuinely interested in a conversation or simply being polite.", "metadata": {}}
+{"answer_id": "npWNeKceGyqCYaRpY4w54g", "model_id": "bard:20230327", "question_id": 35, "text": "There are many reasons why someone might prefer to shop at a small, locally-owned business instead of a large chain store, even if the prices are higher. Here are a few:\n\n* **Supporting the local economy.** When you shop at a small business, more of your money stays in your community. This helps to support local jobs, schools, and infrastructure.\n* **Getting unique products.** Small businesses are often more likely to carry unique or one-of-a-kind products that you won't find at a chain store.\n* **Getting better service.** Small businesses are often more customer-oriented than chain stores. Owners and employees are more likely to take the time to get to know you and your needs.\n* **Feeling good about supporting a local family or community.** When you shop at a small business, you're supporting the dreams of a local family or community. This can give you a sense of pride and satisfaction.\n\nOf course, there are also some downsides to shopping at small businesses. The prices may be higher, and the selection may be smaller. But for many people, the benefits outweigh the costs.", "metadata": {}}
+{"answer_id": "WVuaK9m8Sedcws27tNu7Ev", "model_id": "bard:20230327", "question_id": 36, "text": "Here are some tips on how to assess the credibility of a source of information without relying solely on the reputation of the author or publisher:\n\n* **Check the source's credentials.** Does the author have any relevant experience or education? Is the publisher a reputable organization?\n* **Look for bias.** Does the source seem to be trying to push an agenda? Is it more likely to present one side of an issue than the other?\n* **Check the sources of the information.** Does the source cite any other sources? Are those sources credible?\n* **Look for errors.** Is there any evidence that the source has made any mistakes?\n* **Read the source critically.** Ask yourself if the information makes sense. Does it seem consistent with what you know about the topic?\n\nIt is important to remember that even the most reputable sources can make mistakes. It is always a good idea to do your own research and to corroborate information from multiple sources.", "metadata": {}}
+{"answer_id": "HLtTf83Y5QRP4TxX6nw5TC", "model_id": "bard:20230327", "question_id": 37, "text": "There are a few reasons why people might enjoy being scared. One reason is that it can release endorphins, which are hormones that have mood-boosting effects. Endorphins can make us feel happy and euphoric, and they can also help to reduce stress and pain.\n\nAnother reason people might enjoy being scared is that it can provide a sense of excitement and adrenaline. When we are scared, our bodies release adrenaline, which can make us feel more alert and energized. This can be a pleasurable experience for some people.\n\nFinally, some people might enjoy being scared because it allows them to experience a sense of control over their fear. When we are scared, we often feel like we are at the mercy of our emotions. However, if we are able to face our fears and overcome them, it can give us a sense of empowerment and accomplishment.\n\nOn the other hand, some people avoid scary experiences because they find them to be unpleasant. They might be afraid of feeling scared, or they might worry that they will not be able to handle the fear. Others might simply not find the experience of being scared to be enjoyable.\n\nUltimately, whether or not someone enjoys being scared is a personal preference. There is no right or wrong answer, and people should do what makes them happy.", "metadata": {}}
+{"answer_id": "Fmdtexq6QQNuoqZkZfDURY", "model_id": "bard:20230327", "question_id": 38, "text": "Observing the behavior of other people in a social situation can provide clues about cultural norms and expectations by:\n\n* **Identifying patterns of behavior.** If you see that people are all doing something in a certain way, it can be a sign that there is an unspoken rule about how to behave in that situation. For example, if you see that everyone in a restaurant is using their napkins to wipe their mouths, it can be a sign that there is an expectation that people will use their napkins to eat.\n* **Paying attention to how people react to violations of cultural norms.** If you see someone violate a cultural norm, take note of how other people react. Do they seem surprised? Angry? Upset? This can give you clues about how important the norm is to people in that culture. For example, if you see someone in a restaurant talking loudly on their phone, and other people around them seem annoyed, it can be a sign that people in that culture value quietness in restaurants.\n* **Talking to people about their culture.** If you are curious about cultural norms and expectations, you can always talk to people who are from that culture. They can tell you about the rules that they have learned and how they are expected to behave in different situations. For example, if you are visiting a country that has a different language than your own, you could talk to a native speaker of that language about how to greet people in their culture.\n\nObserving the behavior of other people in a social situation can be a helpful way to learn about cultural norms and expectations. By paying attention to patterns of behavior, how people react to violations of cultural norms, and talking to people about their culture, you can gain a deeper understanding of how people in different cultures behave.", "metadata": {}}
+{"answer_id": "WxnC69jTMkyJvcqvMCgCwY", "model_id": "bard:20230327", "question_id": 39, "text": "There is no easy answer to this question, as it is a complex and multifaceted issue. There are many factors to consider, such as the potential benefits of space exploration, the potential costs, and the ethical implications of exploring space.\n\nSome of the potential benefits of space exploration include:\n\n* Expanding our knowledge of the universe and our place in it\n* Developing new technologies that can be used to solve Earth's problems\n* Creating new industries and jobs\n* Inspiring people and motivating them to pursue their dreams\n\nSome of the potential costs of space exploration include:\n\n* The financial costs of developing and launching spacecraft\n* The environmental costs of space debris\n* The risks to human life\n* The potential for conflict between nations as they compete for resources in space\n\nThe ethical implications of exploring space are also complex. Some people believe that it is our moral duty to explore space, as it is the next frontier for humanity. Others believe that we should focus on solving Earth's problems first, before we embark on a journey into the unknown.\n\nUltimately, the decision of whether or not to explore space is a personal one. There is no right or wrong answer, and each person must weigh the potential benefits and costs before making a decision.", "metadata": {}}
+{"answer_id": "npZdTFPRqZfoqzt5YurYEL", "model_id": "bard:20230327", "question_id": 40, "text": "In a world where automation is becoming increasingly prevalent, it is important to prioritize both job creation and technological progress. Technological progress can lead to job creation by creating new industries and demand for new products and services. However, it can also lead to job destruction by automating tasks that were previously done by humans. It is important to find ways to mitigate the negative effects of automation on employment while also harnessing its potential to create new jobs.\n\nOne way to mitigate the negative effects of automation on employment is to invest in education and training so that workers can develop the skills they need to succeed in the new economy. Another way is to provide financial assistance to workers who are displaced by automation.\n\nIt is also important to create new industries and demand for new products and services that can be created by technological progress. This can be done by encouraging entrepreneurship and innovation. It is also important to invest in research and development so that new technologies can be developed that create new jobs.\n\nBy prioritizing both job creation and technological progress, we can ensure that automation has a positive impact on the economy and on society as a whole.", "metadata": {}}
+{"answer_id": "iy9aa5sqFeNA2uPQZLpxEz", "model_id": "bard:20230327", "question_id": 41, "text": "The average human blinks 15-20 times per minute, 1,200 times per hour, 28,800 times per day, 10,512,000 times a year, and 518,017,359 times in their entire lifetime.\n\nWe blink to keep our eyes moist and clean, and to protect them from foreign objects. Blinking also helps us to focus our vision.\n\nThe number of times we blink per minute varies depending on a number of factors, including our age, our activity level, and the environment we are in. For example, we tend to blink more often when we are tired or when we are exposed to bright lights.\n\nThe number of times we blink per day also varies depending on our age. For example, infants blink more often than adults, and older adults blink less often than younger adults.\n\nThe number of times we blink per year is a constant number, regardless of our age or activity level.\n\nThe number of times we blink per lifetime is a large number, but it is not an infinite number. If we lived for 100 years, we would blink an average of 518,017,359 times.", "metadata": {}}
+{"answer_id": "XAALo4GKWE3QNb7wbkWVNk", "model_id": "bard:20230327", "question_id": 42, "text": "There are approximately 10^18 atoms in a grain of salt. This number can be calculated by using the following steps:\n\n1. The atomic mass of sodium chloride (NaCl) is 58.44 g/mol.\n2. One mole contains Avogadro's number of atoms, which is 6.022 x 10^23 atoms.\n3. Therefore, the mass of one grain of salt is 58.44 x 10^-3 g.\n4. Dividing the mass of one grain of salt by the atomic mass of sodium chloride, we get the number of moles of sodium chloride in one grain of salt.\n5. Dividing the number of moles of sodium chloride by Avogadro's number, we get the number of atoms in one grain of salt.\n\nThis number is very large, and it can be difficult to comprehend. However, it is important to remember that atoms are very small, and that a grain of salt is also very small. The average grain of salt is about 0.05 mm in diameter, which is about the size of a grain of sand. Therefore, the number of atoms in a grain of salt is still relatively small compared to the size of the grain.", "metadata": {}}
+{"answer_id": "XRWjLnJNyGNMfktTvWpbRV", "model_id": "bard:20230327", "question_id": 43, "text": "There are about 8.6 million lightning strikes on Earth each day. This is based on the average number of lightning strikes worldwide every second, which is 100. \n\nLightning strikes occur when there is an imbalance between the Earth's surface and the atmosphere. The Earth's surface is negatively charged, while the atmosphere is positively charged. When these two charges come into contact, they create an electrical discharge, which is lightning.\n\nLightning strikes are most common in areas with a lot of thunderstorms, such as the tropics. They can also occur in other areas, but they are less common.\n\nLightning strikes can be dangerous, and they can cause a lot of damage. They can start fires, injure people, and even kill people. It is important to be aware of the dangers of lightning strikes and to take precautions to stay safe.", "metadata": {}}
+{"answer_id": "CHeyn9eR3u5eFq99UJ3pbB", "model_id": "bard:20230327", "question_id": 44, "text": "In the movie \"Up,\" the house is a typical American house that weighs about 10,000-25,000 kg. Helium balloons create about 10 g of lift each, so you would need at least 1,000,000 balloons to lift the house. In the film, we see an enormous collection of balloons above the house, perhaps 10 times the volume of the house. This would be about 10 million balloons, which is enough to lift the house.\n\nHowever, there are a few things to consider. First, the balloons would need to be very strong to withstand the weight of the house. Second, the house would need to be very light to be lifted by the balloons. Third, the balloons would need to be very buoyant to lift the house against the force of gravity.\n\nEven if all of these factors were met, it is still unlikely that a house could be lifted by balloons in the real world. The cost of the balloons, the time it would take to fill them, and the risk of them bursting would make it an impractical solution.", "metadata": {}}
+{"answer_id": "kfoNRLCHFucfWcxFBPoJgP", "model_id": "bard:20230327", "question_id": 45, "text": "According to Domo, 15,220,700 texts are sent every minute of every day worldwide, not including app-to-app messaging. This means that in a single minute, more than 15 million text messages are sent around the globe.\n\nThere are a few reasons why this number is so high. First, texting is a convenient and efficient way to communicate. It allows people to send quick messages to multiple people at once, and it doesn't require the sender or receiver to be available at the same time. Second, texting is a relatively inexpensive way to communicate. Most carriers offer unlimited texting plans, so people can send as many messages as they want without having to worry about running out of minutes or data. Finally, texting is a popular way to communicate with friends and family. Many people prefer to text rather than call or email, because it is a more casual and informal way to communicate.\n\nThe number of text messages sent globally is expected to continue to grow in the coming years. This is due to a number of factors, including the increasing popularity of smartphones, the growing availability of high-speed internet, and the continued development of new text messaging features.", "metadata": {}}
+{"answer_id": "A4sDEGXpWaNVA39uRsDNjB", "model_id": "bard:20230327", "question_id": 46, "text": "The number of words spoken daily on Earth is a difficult question to answer, as there are many factors that can affect the number of words spoken. However, there are some estimates that can be made.\n\nOne estimate is that the average person speaks about 7,000 words per day. This is based on a study that found that the average person speaks about 182,500 words per week. If we assume that the average person speaks for 5 days per week, then this would mean that they speak about 7,000 words per day.\n\nHowever, this is just an estimate, and the actual number of words spoken per day can vary greatly depending on the individual. Some people may speak more than 10,000 words per day, while others may speak less than 5,000 words per day.\n\nThere are a number of factors that can affect the number of words spoken per day. These include the person's occupation, their culture, and their personality. For example, people who work in sales or customer service may speak more words per day than people who work in jobs that are more solitary. People who live in cultures that are more talkative may also speak more words per day than people who live in cultures that are more reserved. And people who are more extroverted may also speak more words per day than people who are more introverted.\n\nSo, while the exact number of words spoken daily on Earth is unknown, it is estimated that the average person speaks about 7,000 words per day. This number can vary greatly depending on the individual, but it is a good starting point for estimating the number of words spoken on our planet.", "metadata": {}}
+{"answer_id": "hUAc6BCs5NuY7tS62PPXbT", "model_id": "bard:20230327", "question_id": 47, "text": "There are an estimated 1 septillion snowflakes that fall each year. A septillion is a 1 with 24 zeros after it. To put that into perspective, there are only about 8 billion people on Earth. So, there are more snowflakes that fall in a single year than there are people on the entire planet!\n\nThe number of snowflakes that fall each year depends on a number of factors, including the amount of snowfall, the temperature, and the wind. In general, areas that receive a lot of snowfall will have more snowflakes falling each year. And, of course, areas that are colder will also have more snowflakes falling.\n\nSnowflakes are formed when water droplets in the atmosphere freeze. The water droplets need something to cling to in order to freeze, such as a dust particle or a piece of ice. Once the water droplets freeze, they start to grow and form snowflakes.\n\nThe shape of a snowflake depends on the temperature and the amount of water vapor in the atmosphere. If the temperature is very cold, the snowflakes will be very small and have a simple shape. If the temperature is warmer, the snowflakes will be larger and more complex.\n\nSnowflakes are a beautiful and fascinating part of winter. They are a reminder of the beauty of nature and the power of the cold.", "metadata": {}}
+{"answer_id": "TMtjmb5tDizQ9RETRxpt2s", "model_id": "bard:20230327", "question_id": 48, "text": "The number of pages in all the books ever written is an impossible number to know with certainty. There are many factors that would affect the total, including the number of books that have been written, the average length of a book, and the number of books that have been lost or destroyed.\n\nHowever, we can make some educated guesses based on available data. For example, Google Books has indexed over 129 million books, and the average length of a book is 300 pages. This would mean that the total number of pages in all the books ever written is over 38 trillion.\n\nHowever, this number is likely an underestimate. First, it does not take into account self-published books, which are often longer than traditionally published books. Second, it does not take into account books that have been lost or destroyed.\n\nA more accurate estimate of the total number of pages in all the books ever written would be somewhere in the range of 100 trillion to 1 quadrillion pages. This is a truly staggering number, and it is difficult to imagine how many books it would take to fill such a library.\n\nBut even this number is just an estimate. The true number of pages in all the books ever written is likely much higher, and we will never know for sure.", "metadata": {}}
+{"answer_id": "AwQMPhhaJ32ByA3VjKF5Ph", "model_id": "bard:20230327", "question_id": 49, "text": "The Earth has orbited the Sun 4.543 billion times since the beginning of life. This number is based on the age of the Earth, which is 4.543 billion years old, and the time it takes for the Earth to orbit the Sun, which is 365.2422 days.\n\nThe first life on Earth is thought to have arisen about 3.8 billion years ago. This means that the Earth has orbited the Sun about 1 billion times since the first life arose.\n\nThe Earth's orbit around the Sun is elliptical, which means that it is not a perfect circle. The Earth's orbit is closest to the Sun in January and farthest from the Sun in July. This difference in distance causes the Earth's seasons.\n\nThe Earth's orbit is also tilted relative to the Sun's equator. This tilt causes the Earth's seasons to change.\n\nThe Earth's orbit around the Sun is a complex process that has been shaped by the forces of gravity, inertia, and tidal forces. The Earth's orbit is constantly changing, but it has been remarkably stable for billions of years. This stability is essential for life on Earth.", "metadata": {}}
+{"answer_id": "UCqxXfMAnqJKbApsjrPFcq", "model_id": "bard:20230327", "question_id": 50, "text": "There is no definitive answer to the question of how many songs have been recorded throughout history. However, there are a few ways to estimate this number.\n\nOne way is to look at the number of songs that have been registered with the United States Copyright Office. As of 2023, the US Copyright Office has registered over 97 million songs. However, this is likely an underestimate, as many songs are not registered with the Copyright Office.\n\nAnother way to estimate the number of songs that have been recorded is to look at the number of songs that are available for streaming on services like Spotify and Apple Music. As of 2023, Spotify has over 82 million songs in its catalog, and Apple Music has over 60 million songs. This suggests that there are at least 142 million songs that have been recorded and released to the public.\n\nHowever, this is still likely an underestimate, as many songs are not available for streaming on these services. For example, many songs are only available on physical media, such as vinyl records or CDs. Additionally, many songs are not released to the public, and are only available to be recorded by professional musicians.\n\nUltimately, the number of songs that have been recorded throughout history is likely in the billions. This is a staggering number, and it is a testament to the creativity and ingenuity of human beings.", "metadata": {}}
+{"answer_id": "YMRg5Xi9BBvvqqny2tqJZ3", "model_id": "bard:20230327", "question_id": 51, "text": "If the Internet had been invented during the Renaissance period, it would have had a profound impact on the world. The Renaissance was a time of great intellectual and cultural growth, and the Internet would have accelerated this growth. It would have allowed people to share ideas more easily, and to collaborate on projects from all over the world. It would have also helped to spread education and knowledge, and to promote understanding between different cultures.\n\nThe Internet would have also had a significant impact on the arts. It would have allowed artists to share their work more easily, and to connect with other artists from all over the world. It would have also helped to promote the arts, and to make them more accessible to the public.\n\nOverall, the Internet would have had a positive impact on the world if it had been invented during the Renaissance period. It would have accelerated the growth of knowledge and understanding, and it would have promoted the arts and culture.", "metadata": {}}
+{"answer_id": "fhr7iMPb9SVJ663mXKBuct", "model_id": "bard:20230327", "question_id": 52, "text": "If the Aztecs had successfully repelled the Spanish conquistadors, the course of history in the Americas would have been very different. The Aztecs were a powerful empire with a strong military, and their defeat by the Spanish was a major blow to the indigenous peoples of the Americas. If the Aztecs had won, they might have been able to resist the further expansion of European colonialism, and the Americas might have developed in a more independent and indigenous-centered way.\n\nHowever, it is important to note that the Aztecs were not immune to disease, and they were also facing internal political and economic problems. Even if they had defeated the Spanish, it is possible that their empire would have collapsed in the years to come.\n\nUltimately, the question of what would have happened if the Aztecs had successfully repelled the Spanish is a matter of speculation. However, it is clear that their defeat had a profound impact on the course of history in the Americas.", "metadata": {}}
+{"answer_id": "4rrmyZw9zhyJGqdrcazFvt", "model_id": "bard:20230327", "question_id": 53, "text": "The Black Death was a devastating event that had a profound impact on the course of European history. It is impossible to say for certain what would have happened if the Black Death had not occurred, but it is likely that Europe would have been a very different place.\n\nOne of the most significant effects of the Black Death was the decline of the population. Millions of people died from the plague, and many more were left weakened or orphaned. This led to a labor shortage, which had a major impact on the economy.\n\nThe Black Death also had a profound impact on society. The deaths of so many people led to a sense of despair and hopelessness. Many people turned to religion for comfort, and there was a rise in superstition and fear.\n\nThe Black Death also led to changes in the way people lived. People began to avoid crowded areas, and they took steps to prevent the spread of the plague, such as burning the bodies of the dead and avoiding contact with people who were sick.\n\nIt is impossible to say for certain what would have happened if the Black Death had not occurred, but it is clear that it had a profound impact on the course of European history.", "metadata": {}}
+{"answer_id": "Za3mY9xwxpZdPmGW48wtzu", "model_id": "bard:20230327", "question_id": 54, "text": "If Isaac Newton had focused on biology instead of physics, he might have made some important discoveries in the field of medicine. He could have studied the mechanics of the human body and how it moves, or he could have investigated the causes of disease. He might have even developed new treatments or cures for illnesses.\n\nHowever, it is also possible that Newton would not have made any significant discoveries in biology. He might have been too focused on physics to devote himself to another field. Additionally, biology is a much more complex field than physics, and Newton might not have had the same level of success in it.\n\nUltimately, it is impossible to say for sure what would have happened if Newton had focused on biology instead of physics. However, it is clear that he was a brilliant scientist with a lot to offer both fields.", "metadata": {}}
+{"answer_id": "cbAaJS9ULjR4XYSHATujSG", "model_id": "bard:20230327", "question_id": 55, "text": "It is impossible to say for certain what the world would be like without the Beatles, but it is clear that they had a profound impact on popular culture. The band's music and style were hugely influential, and their success helped to popularize rock and roll music around the world.\n\nIf the Beatles had never formed, it is possible that another band would have taken their place and become just as popular. However, it is also possible that the world would have been a very different place. The Beatles' music was often seen as a symbol of hope and optimism, and their message of peace and love resonated with many people. Without them, the world might be a darker and more cynical place.\n\nIt is also worth noting that the Beatles were not just a band. They were also cultural icons who helped to shape the way we view the world. Their fashion, their haircuts, and their language all became part of the fabric of popular culture. Without them, the world might be a more boring place.\n\nUltimately, it is impossible to say for certain what the world would be like without the Beatles. However, it is clear that they had a profound impact on popular culture, and their absence would be felt by many people.", "metadata": {}}
+{"answer_id": "ZEgb9fvopGo7HF5wPeoeHs", "model_id": "bard:20230327", "question_id": 56, "text": "If Alan Turing had not cracked the Enigma code during World War II, the Allies would have been at a significant disadvantage. The Enigma code was used by the German military to communicate securely, and it was very difficult to break. Turing's work was essential to the Allied victory, and it is likely that the war would have lasted much longer and resulted in more casualties if he had not been able to break the code.\n\nTuring's work at Bletchley Park was also important because it helped the Allies to understand the German military's plans and intentions. This information helped the Allies to make better decisions about where to deploy their troops and how to use their resources.\n\nOverall, Turing's work on the Enigma code was essential to the Allied victory in World War II. If he had not been able to break the code, the war would have lasted much longer and resulted in more casualties.", "metadata": {}}
+{"answer_id": "igMXoEiszFM65ZS2KUTvtm", "model_id": "bard:20230327", "question_id": 57, "text": "If the Suez Canal had never been constructed, it would have had a significant impact on the global economy. The canal is a major shipping route, and its absence would have forced ships to take a much longer and more expensive route around Africa. This would have made goods more expensive and slower to arrive.\n\nThe Suez Canal is also a major source of income for Egypt. The government collects tolls from ships that use the canal, and this money is used to fund important projects such as schools, hospitals, and infrastructure. If the canal had never been built, Egypt would have lost a significant source of income.\n\nThe Suez Canal is also a major symbol of Egyptian pride. The canal was built by Egyptian workers, and it is a source of national pride. If the canal had never been built, it would have been a major blow to Egyptian morale.\n\nOverall, the absence of the Suez Canal would have had a significant impact on the global economy and on Egypt. The canal is a major shipping route, a source of income for Egypt, and a symbol of Egyptian pride.", "metadata": {}}
+{"answer_id": "Up4h8RpgVVafBtUj4tiGPZ", "model_id": "bard:20230327", "question_id": 58, "text": "If the Maya civilization had never mysteriously collapsed, it is likely that they would have continued to thrive and develop. They were a highly advanced civilization with a sophisticated understanding of mathematics, astronomy, and engineering. They also had a rich culture and a strong sense of community.\n\nIf the Maya had not collapsed, they would have likely continued to expand their territory and influence. They might have even become a global power. Their advanced technology and culture would have had a profound impact on the world.\n\nHowever, it is also possible that the Maya would have eventually collapsed even if they had not experienced the sudden and mysterious decline that they did. They were a very complex civilization with a lot of interconnected parts. If one part of the system collapsed, it could have had a cascading effect on the rest of the civilization.\n\nUltimately, it is impossible to say for sure what would have happened if the Maya civilization had never collapsed. However, it is clear that they were a very advanced and influential civilization. If they had not collapsed, they would have had a profound impact on the world.", "metadata": {}}
+{"answer_id": "CMg3ypKyykGUzQsDmX7i35", "model_id": "bard:20230327", "question_id": 59, "text": "If Christopher Columbus had not discovered the Americas, it is likely that other European explorers would have eventually found the continent. However, the timing of Columbus's voyage was important, as it coincided with the beginning of the Age of Exploration. Columbus's discovery helped to spur further exploration and colonization of the Americas, which had a profound impact on the history of the world.\n\nIf Columbus had not discovered the Americas, it is possible that the continent would have remained largely unknown to Europeans for centuries. This would have delayed the development of trade and communication between Europe and the Americas, and it would have also delayed the spread of European diseases to the Americas.\n\nIt is also possible that the Americas would have been settled by other peoples, such as the Chinese or the Japanese. These civilizations had the technology and the resources to reach the Americas, and they were also interested in exploring new lands.\n\nUltimately, it is impossible to say for sure what would have happened if Christopher Columbus had not discovered the Americas. However, it is clear that his discovery had a profound impact on the history of the world.", "metadata": {}}
+{"answer_id": "WhLiJXznzRBkzxDdnc5ndX", "model_id": "bard:20230327", "question_id": 60, "text": "If Vincent van Gogh had been a successful artist during his lifetime, he would have likely sold more paintings and had a greater impact on the art world. He might have been able to support himself financially and pursue his art career without having to rely on his brother Theo for financial support. He might have also been able to travel more and experience more of the world, which would have influenced his art. \r\n\r\nHowever, it is also possible that Van Gogh's tragic story is part of the appeal of his art. His paintings are often seen as a window into his tortured soul and his passionate love of nature. If he had been successful during his lifetime, he might not have been able to create such powerful and moving works of art. \r\n\r\nUltimately, it is impossible to say for sure what would have happened if Vincent van Gogh had been a successful artist during his lifetime. However, it is clear that his tragic story has not diminished his impact on the art world. His paintings continue to be admired and loved by people all over the world.", "metadata": {}}
+{"answer_id": "Vzmhr5RMndkR866p4pUef6", "model_id": "bard:20230327", "question_id": 61, "text": "It looks like you want my help with coding, but I'm not trained to do that, yet. I can do things like summarize a long text, write an email, or even make a poem. Is there something else you would like to try?", "metadata": {}}
+{"answer_id": "jPrfhv9oaZGrkXxDUw5Vbt", "model_id": "bard:20230327", "question_id": 62, "text": "Sure, here is a Python function to find the longest common subsequence of two input strings using dynamic programming:\n\n```python\ndef lcs(s1, s2):\n m = len(s1)\n n = len(s2)\n dp = [[0] * (n + 1) for i in range(m + 1)]\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if s1[i - 1] == s2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n\n return dp[m][n]\n```\n\nThis function uses dynamic programming to solve the problem. The idea is to first fill a table with the lengths of the longest common subsequences of all possible subsequences of the two strings. Then, we can find the longest common subsequence of the two strings by looking at the last element of the table.\n\nThe time complexity of this algorithm is O(nm), where m and n are the lengths of the two strings. The space complexity is O(mn).", "metadata": {}}
+{"answer_id": "KdZNrf6udTfCxGGcQxRbXk", "model_id": "bard:20230327", "question_id": 63, "text": "Sure, here is a regular expression in Python to validate an email address:\n\n```python\nimport re\n\nregex = '^[a-zA-Z0-9]+[\\._]?[ a-zA-Z0-9]+[@]\\w+[. ]\\w{2,3}$'\n\ndef check(email):\n if re.search(regex, email):\n return True\n else:\n return False\n```\n\nThis regular expression matches the following format:\n\n* A username, which can contain letters, numbers, and underscores.\n* The @ symbol\n* A domain name, which can contain letters, numbers, and dots.\n* The top-level domain, which must be 2-3 characters long.\n\nThis regular expression does not validate the following:\n\n* The existence of the email server.\n* The validity of the domain name.\n* The availability of the username.\n\nIf you want to validate all of these things, you will need to use a different method.", "metadata": {}}
+{"answer_id": "AFzqAfbYVL25DBqFA2cv4G", "model_id": "bard:20230327", "question_id": 64, "text": "Here is a Python program to find the nth Fibonacci number using dynamic programming:\n\n```python\ndef fib(n):\n\n # Create a table of size n to store the Fibonacci numbers\n dp = [0] * n\n\n # Fill the first two elements of the table\n dp[0] = 0\n dp[1] = 1\n\n # Fill the remaining elements of the table\n for i in range(2, n):\n dp[i] = dp[i-1] + dp[i-2]\n\n # Return the nth Fibonacci number\n return dp[n-1]\n\n# Driver code\nn = int(input(\"Enter the nth Fibonacci number: \"))\nprint(\"The nth Fibonacci number is\", fib(n))\n```\n\nThis program uses dynamic programming to solve the Fibonacci sequence problem. Dynamic programming is a technique that can be used to solve problems that have overlapping subproblems. In this case, the subproblems are the Fibonacci numbers. The program stores the results of previously solved subproblems in a table, and then uses that table to calculate the nth Fibonacci number.", "metadata": {}}
+{"answer_id": "Lt56JzWfXBxbvxrLf4fn3t", "model_id": "bard:20230327", "question_id": 65, "text": "Sure, here is a binary search algorithm to find a specific element in a sorted array:\n\n```python\ndef binary_search(array, low, high, x):\n if high >= low:\n mid = (high + low) // 2\n if array[mid] == x:\n return mid\n elif array[mid] < x:\n low = mid + 1\n else:\n high = mid - 1\n else:\n return -1\n```\n\nThis algorithm works by repeatedly dividing the search interval in half until the target value is found. The algorithm starts by comparing the target value to the middle element of the array. If they are not equal, the half in which the target cannot lie is eliminated and the search continues on the remaining half, again taking the middle element to compare to the target value, and repeating this until the target value is found. If the search ends with the remaining half being empty, the target is not in the array.\n\nThe time complexity of binary search is O(log n), where n is the number of elements in the array. This means that the algorithm takes logarithmic time to find the target value, even if the array is very large.", "metadata": {}}
+{"answer_id": "bmaShWUyEqPE6kbHD6FjLk", "model_id": "bard:20230327", "question_id": 66, "text": "Here is an implementation of a queue data structure using two stacks in Python:\n\n```python\nclass Queue:\n\n def __init__(self):\n self.in_stack = []\n self.out_stack = []\n\n def enqueue(self, element):\n self.in_stack.append(element)\n\n def dequeue(self):\n if len(self.out_stack) == 0:\n while len(self.in_stack) > 0:\n self.out_stack.append(self.in_stack.pop())\n return self.out_stack.pop()\n\n def __len__(self):\n return len(self.in_stack) + len(self.out_stack)\n\n def __iter__(self):\n return iter(self.in_stack)\n\n def __contains__(self, element):\n return element in self.in_stack\n```\n\nThis implementation uses two stacks, an `in_stack` and an `out_stack`. The `in_stack` is used to store elements that are waiting to be processed, and the `out_stack` is used to store elements that have been processed. When an element is enqueued, it is added to the `in_stack`. When an element is dequeued, it is removed from the `out_stack`. If the `out_stack` is empty, the elements in the `in_stack` are popped into the `out_stack` and the `in_stack` is emptied.\n\nThis implementation of a queue data structure using two stacks is efficient, as it uses only two data structures. It is also easy to implement and maintain.", "metadata": {}}
+{"answer_id": "hiSVom5YqSrKZKor8CjHdU", "model_id": "bard:20230327", "question_id": 67, "text": "Sure, here is a program to find the common elements in two arrays without using any extra data structures:\n\n```python\ndef common_elements(array1, array2):\n \"\"\"Finds the common elements in two arrays.\n\n Args:\n array1 (list): The first array.\n array2 (list): The second array.\n\n Returns:\n set: The set of common elements.\n \"\"\"\n # Initialize the set of common elements.\n common_elements = set()\n\n # Iterate through the first array.\n for element in array1:\n # Iterate through the second array.\n for element2 in array2:\n # If the elements are equal, add them to the set of common elements.\n if element == element2:\n common_elements.add(element)\n\n # Return the set of common elements.\n return common_elements\n```\n\nThis program uses a simple algorithm to find the common elements in two arrays. The algorithm first initializes a set of common elements. Then, it iterates through the first array and compares each element to each element in the second array. If an element is found in both arrays, it is added to the set of common elements. The algorithm then returns the set of common elements.\n\nThis algorithm is efficient, as it does not require any extra data structures. However, it can be slow for large arrays, as it has to compare all of the elements in the two arrays.", "metadata": {}}
+{"answer_id": "JRHeURg4TrVAgWNp5yqjFx", "model_id": "bard:20230327", "question_id": 68, "text": "To find the value of f(2), we simply substitute x=2 into the equation:\n\nf(2) = 5(2)^3 - 2(2) + 3 = 40 - 4 + 3 = 39\n\nTherefore, the value of f(2) is 39.", "metadata": {}}
+{"answer_id": "hZwFKeRYBLKihBEWSEGMYJ", "model_id": "bard:20230327", "question_id": 69, "text": "Distribute the 5 on the right-hand side of the equation:\n\n3x + 10 = 5x - 10\n\nCombine like terms:\n\n-2x = -10\n\nDivide both sides by -2:\n\nx = 5\n\nTherefore, the solution to the equation is $x=5$.", "metadata": {}}
+{"answer_id": "iG4SJq9xCKCDDtiqU3szJZ", "model_id": "bard:20230327", "question_id": 70, "text": "The length of a line segment is the distance between its endpoints. The distance formula is:\n\n$$d = \\sqrt{(x_2 - x_1)^2 + (y_2 - y_1)^2}$$\n\nwhere $(x_1, y_1)$ and $(x_2, y_2)$ are the coordinates of the endpoints. In this case, $(x_1, y_1) = (2, -2)$ and $(x_2, y_2) = (10, 4)$. Substituting these values into the distance formula, we get:\n\n$$d = \\sqrt{(10 - 2)^2 + (4 - (-2))^2} = \\sqrt{8^2 + 6^2} = \\sqrt{100} = 10$$\n\nTherefore, the length of the line segment is 10 units.", "metadata": {}}
+{"answer_id": "CjhV8cJ6UEp7Y4f3eWB6E5", "model_id": "bard:20230327", "question_id": 71, "text": "Sure, here is a sample email that you can use as a template for your own joint venture proposal:\n\nDear [Potential Partner Name],\n\nI am writing to you today to propose a joint venture between our two companies. Our two companies have been working together for many years, and we have a proven track record of success. I believe that a joint venture between our two companies would be a mutually beneficial arrangement.\n\nThe purpose of this joint venture would be to develop a new product or service that would be marketed to a specific market. We would each contribute our own unique skills and resources to the project, and we would share the profits from the venture equally.\n\nI believe that this joint venture would be a success for several reasons. First, our two companies have a strong track record of success. We have both been in business for many years, and we have both developed a loyal customer base. Second, our two companies have complementary skills and resources. We each have our own strengths and weaknesses, and a joint venture would allow us to combine our strengths to create something even greater. Finally, I believe that the market for this new product or service is growing rapidly. There is a lot of demand for this type of product or service, and I believe that we can capture a significant share of the market.\n\nI would like to meet with you to discuss this proposal in more detail. I am available to meet at your convenience. Please let me know if you are interested in discussing this further.\n\nThank you for your time and consideration.\n\nSincerely,\n[Your Name]", "metadata": {}}
+{"answer_id": "5waLH9pZsr8jVTpy67QH58", "model_id": "bard:20230327", "question_id": 72, "text": "Dear [Boss's name],\n\nI am writing to inform you of my decision to resign from my position as [Your job title] at [Company name]. My last day of employment will be [Date].\n\nI have accepted a position at another company that will allow me to [Reason for leaving]. I have enjoyed my time at [Company name] and I am grateful for the opportunities I have been given. I have learned a great deal and I am confident that the skills I have developed will be valuable in my new role.\n\nI would like to thank you for your support and guidance during my time at [Company name]. I wish you and the company all the best in the future.\n\nSincerely,\n[Your name]", "metadata": {}}
+{"answer_id": "ZV4sUNujQGS2w7Wg4jbXbf", "model_id": "bard:20230327", "question_id": 73, "text": "Dear Admissions Committee,\n\nI am writing to recommend [student name] for admission to your prestigious graduate program in computer science. I have known [student name] for [number] years, first as their undergraduate advisor at [university name] and now as their research advisor at [university name].\n\n[Student name] is an outstanding student with a strong academic record. They have consistently earned top grades in their coursework, and they have a deep understanding of the theoretical and practical foundations of computer science. [Student name] is also an accomplished researcher. They have worked on several projects that have been published in top academic journals, and they have presented their work at several prestigious conferences.\n\nIn addition to their academic and research accomplishments, [student name] is also a well-rounded individual with a strong work ethic and a commitment to service. They are a member of several student organizations, and they volunteer their time at a local homeless shelter.\n\nI have no doubt that [student name] would be an asset to your graduate program. They are a highly intelligent and motivated individual with a strong work ethic and a commitment to excellence. I highly recommend [student name] for admission to your program.\n\nSincerely,\n[Your name]", "metadata": {}}
+{"answer_id": "X94xm9m7tayWsKFNACqAG4", "model_id": "bard:20230327", "question_id": 74, "text": "Subject: Introducing our new software solution: [Product Name]\n\nBody:\n\nHi [Customer Name],\n\nWe're excited to announce the launch of our new software solution, [Product Name]. [Product Name] is a revolutionary new tool that will help you [solve a problem or improve a process].\n\nWith [Product Name], you can [list of benefits].\n\nWe've been working hard to make [Product Name] the best it can be, and we're confident that you'll love it.\n\nTo learn more about [Product Name] and to sign up for a free trial, please visit our website at [website address].\n\nWe can't wait to hear what you think!\n\nThanks,\n[Your Name]", "metadata": {}}
+{"answer_id": "EFL7fU6gnicQY6DRpHvbTK", "model_id": "bard:20230327", "question_id": 75, "text": "Dear [Customer Name],\n\nI am writing to apologize for the delay in your order. I know that you were expecting your order to arrive on [date], and I am truly sorry that it did not.\n\nThere was an issue with our shipping system that caused your order to be delayed. We have since fixed the issue and your order is now on its way to you. You should receive it within the next few days.\n\nI sincerely apologize for the inconvenience this has caused you. We value your business and we are committed to providing you with the best possible service.\n\nIf you have any questions or concerns, please do not hesitate to contact me.\n\nSincerely,\n[Your Name]", "metadata": {}}
+{"answer_id": "XJLFjCr6vgQZjEXbaMjTa6", "model_id": "bard:20230327", "question_id": 76, "text": "Sure, here is a script for a YouTube video exploring the history and cultural significance of jazz:\n\n* * *\n\n**Intro**\n\nJazz is a music genre that originated in the African-American communities of New Orleans, Louisiana, in the late 19th and early 20th centuries. Since the 1920s Jazz Age, it has been recognized as a major form of musical expression in traditional and popular music.\n\nJazz is characterized by swing and blue notes, complex chords, call and response vocals, polyrhythms and improvisation. Jazz has roots in European harmony and African rhythmic rituals.\n\n**History of Jazz**\n\nJazz has its roots in the blues, which was a form of music that originated in the American South in the late 19th century. The blues was a way for African Americans to express their pain and suffering under slavery and segregation.\n\nJazz also has its roots in ragtime, which was a form of music that originated in the United States in the late 19th century. Ragtime was a fast-paced, syncopated style of music that was often played on the piano.\n\nJazz emerged in New Orleans in the early 20th century. The city was a melting pot of cultures, and jazz was a way for people from different backgrounds to come together and express themselves.\n\nJazz quickly spread to other parts of the United States and the world. In the 1920s, jazz became popular in Europe, and in the 1930s, it became popular in Latin America.\n\n**Cultural Significance of Jazz**\n\nJazz has had a profound impact on American culture. It has been used in film, television, and advertising. It has also been used in political protests and social movements.\n\nJazz has also been a major influence on other genres of music, such as rock and roll, hip hop, and soul music.\n\n**Conclusion**\n\nJazz is a living, breathing art form that has been around for over a century. It has evolved over time, but its core elements remain the same: swing, blue notes, complex chords, call and response vocals, polyrhythms, and improvisation.\n\nJazz is a music that is constantly being reinvented by new musicians. It is a music that is constantly being reinterpreted by new listeners. And it is a music that is constantly being celebrated by new generations of fans.\n\n* * *", "metadata": {}}
+{"answer_id": "bz3rS9MRs9QmaLGg4A8a7B", "model_id": "bard:20230327", "question_id": 77, "text": "**Hawaii: A Cultural Experience of a Lifetime**\n\nHawaii is a state of the United States that is known for its beautiful beaches, lush rainforests, and stunning volcanoes. But what many people don't know is that Hawaii is also home to a rich and vibrant culture.\n\nIf you're planning a trip to Hawaii, be sure to add some cultural experiences to your itinerary. Here are a few suggestions:\n\n* Visit a luau: A luau is a traditional Hawaiian feast that includes food, music, and dancing. It's a great way to experience Hawaiian culture and cuisine.\n* Take a hula class: Hula is a traditional Hawaiian dance that tells stories through movement. It's a beautiful and graceful art form that anyone can learn.\n* Visit a museum: Hawaii has several museums that showcase the state's unique history and culture. These museums are a great place to learn about Hawaiian culture and history.\n* Go on a hike: Hawaii is home to some of the most beautiful hiking trails in the world. These hikes are a great way to experience the natural beauty of the islands.\n* Go snorkeling or scuba diving: The waters around Hawaii are home to a variety of marine life. Snorkeling or scuba diving is a great way to see this marine life up close.\n* Visit a coffee farm: Hawaii is home to some of the best coffee in the world. Visiting a coffee farm is a great way to learn about the coffee-making process and sample some of the local coffee.\n\nThese are just a few of the many cultural experiences that you can have in Hawaii. If you're looking for a truly unique and memorable vacation, be sure to add some cultural experiences to your itinerary.\n\nHere are some must-see attractions in Hawaii:\n\n* Haleakala National Park: Haleakala National Park is home to the dormant Haleakala volcano. The park is a great place to see the sunrise and hike through the rainforest.\n* The North Shore: The North Shore of Oahu is home to some of the most beautiful beaches in the world. The North Shore is also a great place to go surfing.\n* The Big Island of Hawaii: The Big Island of Hawaii is the largest of the Hawaiian Islands. The island is home to volcanoes, rainforests, and beaches.\n* Kauai: Kauai is the fourth largest of the Hawaiian Islands. Kauai is known for its beautiful scenery, including the Na Pali Coast.\n* Maui: Maui is the second largest of the Hawaiian Islands. Maui is known for its beaches, volcanoes, and rainforests.\n\nNo matter what you're interested in, Hawaii has something to offer. Whether you're looking for a relaxing beach vacation or an adventure-filled journey, Hawaii is the perfect destination.", "metadata": {}}
+{"answer_id": "kCS2ZqzKTdDpaXwu3TiFXP", "model_id": "bard:20230327", "question_id": 78, "text": "Denis Villeneuve's Dune is a visually stunning and epic sci-fi film that is sure to please fans of the genre. The film is based on the classic novel of the same name by Frank Herbert, and it tells the story of Paul Atreides, a young man who is destined to become the leader of the Fremen, a desert people who have adapted to life on the harsh planet of Arrakis.\n\nThe film's plot is complex and well-developed, and the characters are well-acted and believable. Timoth\u00e9e Chalamet gives a particularly strong performance as Paul, and he is ably supported by a talented cast that includes Rebecca Ferguson, Oscar Isaac, Josh Brolin, Zendaya, and Jason Momoa.\n\nThe film's special effects are simply stunning. The sandworms of Arrakis are particularly impressive, and the film's desert scenes are incredibly realistic. The film's score by Hans Zimmer is also excellent, and it helps to create a truly immersive experience.\n\nOverall, Dune is a must-see for fans of sci-fi and epic films. It is a visually stunning and well-crafted film that is sure to please audiences.\n\nHere are some specific examples of the film's special effects:\n\n* The sandworms of Arrakis are incredibly realistic. They are massive creatures that can swallow entire buildings, and they are a terrifying sight to behold.\n* The desert scenes of Arrakis are incredibly realistic. The film's director, Denis Villeneuve, went to great lengths to capture the harsh beauty of the desert, and the results are stunning.\n* The film's score by Hans Zimmer is excellent. It helps to create a truly immersive experience and helps to set the tone for the film's epic scope.", "metadata": {}}
+{"answer_id": "7WWWS5UhWcVzkhTypqcFU4", "model_id": "bard:20230327", "question_id": 79, "text": "Sure, here is a possible podcast script for an episode discussing the influence of streaming platforms on the music industry:\n\n**Intro:**\n\nHello and welcome to the podcast! In today's episode, we're going to be discussing the influence of streaming platforms on the music industry. Streaming platforms have become increasingly popular in recent years, and they've had a major impact on how people listen to music. In this episode, we'll discuss the pros and cons of streaming platforms, and how they're changing the music industry.\n\n**Body:**\n\nOne of the biggest benefits of streaming platforms is that they give people access to a wide variety of music. In the past, people were limited to the music that was available in their local stores or on the radio. But with streaming platforms, people can listen to anything they want, whenever they want. This has led to a more diverse and eclectic music scene, as people are exposed to new artists and genres that they might not have heard otherwise.\n\nAnother benefit of streaming platforms is that they're very convenient. You can listen to music on your phone, your computer, or your TV. You don't have to worry about carrying around CDs or MP3 players, and you can easily create playlists of your favorite songs. This convenience has made streaming platforms very popular with people of all ages.\n\nHowever, there are also some drawbacks to streaming platforms. One of the biggest problems is that they don't pay artists very much money. When you listen to a song on a streaming platform, the artist only gets a fraction of a penny. This makes it difficult for artists to make a living from their music, and it's one of the reasons why many artists are unhappy with streaming platforms.\n\nAnother problem with streaming platforms is that they can be addictive. It's easy to spend hours scrolling through your favorite songs, and this can lead to problems like sleep deprivation and anxiety. If you find yourself spending too much time on streaming platforms, it's important to take a step back and find other ways to relax and de-stress.\n\n**Conclusion:**\n\nOverall, streaming platforms have had a major impact on the music industry. They've made it easier for people to listen to music, but they've also made it more difficult for artists to make a living. It's important to be aware of the pros and cons of streaming platforms, so that you can make informed decisions about how you use them.\n\n**Outro:**\n\nThanks for listening to this episode of the podcast! We hope you found it informative and helpful. If you have any questions or comments, please feel free to leave them below. We'll see you next time!", "metadata": {}}
+{"answer_id": "QcSFqbgqLWqn96V6BvWT7B", "model_id": "bard:20230327", "question_id": 80, "text": "The Boston Symphony Orchestra gave a stunning performance of Beethoven's Symphony No. 9 in D minor, Op. 125, \"Choral,\" on Saturday night at Symphony Hall. The orchestra was in top form, and the performance was a thrilling and moving experience for the audience.\n\nThe BSO opened the concert with a lively performance of Beethoven's Symphony No. 5 in C minor, Op. 67. The orchestra played with great energy and precision, and the performance was a perfect way to set the stage for the evening's main event.\n\nAfter intermission, the BSO launched into a powerful and moving performance of Beethoven's Symphony No. 9. The orchestra played with great passion and conviction, and the performance was a truly transcendent experience. The audience was on its feet at the end of the performance, cheering and applauding for minutes on end.\n\nThe BSO's performance of Beethoven's Symphony No. 9 was a truly memorable experience. The orchestra played with great skill and artistry, and the performance was a thrilling and moving experience for the audience. If you have the opportunity to see the BSO in concert, I highly recommend it.\n\nThe overall audience experience was excellent. The hall was packed with people, and the atmosphere was electric. The audience was clearly excited to be there, and they responded enthusiastically to the orchestra's performance. The concert was a great example of the power of music to bring people together and create a shared experience.", "metadata": {}}
diff --git a/model/fastchat/eval/table/answer/answer_gpt35.jsonl b/model/fastchat/eval/table/answer/answer_gpt35.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..915947ff663fae5f7cfdc1967acd39fe176c7518
--- /dev/null
+++ b/model/fastchat/eval/table/answer/answer_gpt35.jsonl
@@ -0,0 +1,80 @@
+{"answer_id": "BZGowHM7L3RvtWRktKZjLT", "model_id": "gpt-3.5-turbo:20230327", "question_id": 1, "text": "Here are some tips to improve your time management skills:\n\n1. Create a schedule: Make a to-do list for the day, week or month and prioritize tasks by importance and deadline.\n\n2. Set realistic goals: Be realistic about what you can accomplish in a given amount of time. Set goals that can be achieved but are also challenging.\n\n3. Minimize distractions: Minimize distractions such as emails, phone calls, and social media notifications. Turn off notifications or use apps that block these distractions.\n\n4. Take breaks: Taking a break can help you recharge and increase productivity. Take a break every hour or so to stretch, go for a walk, or listen to some music.\n\n5. Learn to say 'no': Learn to say 'no' to tasks and requests that are not a priority or are not within your capabilities.\n\n6. Delegate tasks: If possible, delegate tasks to others who are capable of completing them. This will help to reduce your workload.\n\n7. Prioritize self-care: Make sure to take care of yourself by prioritizing self-care activities such as exercise, meditation or spending time with friends and family.\n\nRemember, time management is a skill that takes time and practice to develop.", "metadata": {}}
+{"answer_id": "GsrPLfiNEcRDYAC5aUoY23", "model_id": "gpt-3.5-turbo:20230327", "question_id": 2, "text": "Here are some effective ways to deal with stress:\n\n1. Exercise regularly: Physical activity can help reduce stress and improve mood.\n\n2. Practice relaxation techniques: Deep breathing, meditation, and yoga are some effective ways to relax and reduce stress levels.\n\n3. Get enough sleep: Lack of sleep can increase stress levels. Try to get at least 7-8 hours of sleep each night.\n\n4. Eat a healthy diet: A well-balanced diet can help reduce stress levels and improve overall health.\n\n5. Connect with others: Spending time with friends and family, joining a support group, or participating in social activities can help reduce stress levels and improve mood.\n\n6. Time management: Prioritizing tasks and creating a schedule can help reduce stress and increase productivity.\n\n7. Seek professional help: If stress is impacting your daily life, consider seeking professional help such as therapy or counseling.", "metadata": {}}
+{"answer_id": "5SGfexmPoJTWoVbPMB87KB", "model_id": "gpt-3.5-turbo:20230327", "question_id": 3, "text": "Python and JavaScript are both popular programming languages, but they differ in a number of ways:\n\n1. Programming paradigm: Python is primarily an object-oriented programming (OOP) language, while JavaScript is a multi-paradigm language that supports both functional and OOP styles.\n\n2. Syntax: Python uses indentation (whitespace) to indicate the structure of the code, while JavaScript uses curly braces.\n\n3. Typing: Python is a dynamically typed language, which means that the type of a variable is determined at runtime, while JavaScript is both dynamically and weakly typed, meaning that variable types can change at runtime and type errors may not be caught immediately.\n\n4. Application areas: Python is often used for scientific computing, data analysis, and machine learning, while JavaScript is most commonly used for web development and client-side scripting.\n\nOverall, both Python and JavaScript have their own strengths and weaknesses, and which language to use largely depends on the specific requirements of the project.", "metadata": {}}
+{"answer_id": "RcqQg364kgCkYPshqvmGSp", "model_id": "gpt-3.5-turbo:20230327", "question_id": 4, "text": "Here are some tips that can help you increase your productivity while working from home:\n\n1. Create a dedicated workspace: Pick a place where you can work undisturbed and create an environment that resembles your office.\n\n2. Set a schedule: Establish a specific working schedule and stick to it as much as possible.\n\n3. Take breaks: Take short breaks throughout the day to recharge and refocus your energy.\n\n4. Eliminate distractions: Turn off your phone notifications and avoid any unnecessary distractions.\n\n5. Make a to-do list: Prioritize your tasks in order of importance and create a to-do list.\n\n6. Get dressed: Dressing as you would for work helps put you in the right mindset for productivity.\n\n7. Challenge yourself: Set small and achievable goals and push yourself to complete them.\n\n8. Stay positive: Maintain a positive attitude towards your work, and try to avoid negativity.\n\nWith these tips, you can stay productive and enjoy the benefits of working from home.", "metadata": {}}
+{"answer_id": "3R9jAndzLHQqJwmMi5mNox", "model_id": "gpt-3.5-turbo:20230327", "question_id": 5, "text": "Certainly! Quantum computing is a type of computing that uses quantum-mechanical phenomena to process information. \n\nIn classical computing, a bit can exist in one of two states - 0 or 1. In quantum computing, a qubit can exist in a superposition of states, which means it can exist in a combination of 0 and 1 at the same time. This ability to exist in multiple states simultaneously allows for the processing of large amounts of information simultaneously and can significantly speed up certain types of computations.\n\nQuantum computers use a series of quantum gates - which are similar to the logic gates used in classical computers - to manipulate the state of the qubits, allowing for the computations to be completed. There are several different technologies used to build quantum computers, including superconducting quantum processors, trapped-ion quantum processors, and photonic quantum processors.\n\nOne of the most important applications of quantum computing is in solving problems that are intractable for classical computers. For example, quantum computers can be used to factor large numbers, which is the basis for many encryption protocols - leading to exciting possibilities in code-breaking, cryptography, and cybersecurity in the future.", "metadata": {}}
+{"answer_id": "Uw8SaYLPMGR5sdV9FDx8pb", "model_id": "gpt-3.5-turbo:20230327", "question_id": 6, "text": "Plant-based protein sources are derived from plants, including legumes, nuts, seeds, and whole grains. They tend to be lower in saturated fat, higher in fiber and several micronutrients, and have less of an impact on the environment when compared to animal-based protein sources.\n\nAnimal-based protein sources are derived from animals, including meat, dairy, and eggs. They tend to be higher in saturated fat and cholesterol, which can increase the risk of heart disease and other health problems when consumed in excess. They are also associated with a higher environmental impact.\n\nHowever, animal-based protein sources tend to be more readily absorbed and complete (containing all essential amino acids), making them a good choice for those with higher protein needs, such as athletes or individuals who have difficulty meeting their protein requirements through plant-based sources alone.", "metadata": {}}
+{"answer_id": "53gmokt2KBgKu6NMPopxcu", "model_id": "gpt-3.5-turbo:20230327", "question_id": 7, "text": "Sure, here are a few practical tips to develop critical thinking skills:\n\n1. Ask questions: Don't simply accept information at face value. Ask questions that allow you to evaluate the reason, perspective and logic behind the information.\n\n2. Analyze: Take the time to analyze and evaluate the information that you receive. This will help you to identify assumptions, biases, strengths and weaknesses of an argument.\n\n3. Consider different perspectives: When analyzing information, try to consider different perspectives to gain a wider understanding of any topic.\n\n4. Research and read extensively: Read a wide range of sources to get a better understanding of different viewpoints and opinions.\n\n5. Practice logic and reasoning: Practice thinking through problems logically by breaking them down into smaller parts.\n\n6. Establish a point of view: Consider developing an opinion or thesis statement, and then collect evidence to support that position.\n\n7. Reflect on your own thinking: Consider how you arrived at a conclusion, and whether you have any biases or assumptions that may have influenced your thinking.\n\nWith time, and practicing these skills consistently, you can become a better critical thinker.", "metadata": {}}
+{"answer_id": "bKrbrGsN7zjKLvBk2h3tvo", "model_id": "gpt-3.5-turbo:20230327", "question_id": 8, "text": "There are several major challenges faced by the education sector today, including:\n\n1. Access to Education: Many children, particularly those in developing countries, do not have access to quality education.\n\n2. Funding: Education is expensive, and many governments struggle to find the necessary funding to provide quality education to all students.\n\n3. Teacher Shortages: Many countries face a shortage of qualified teachers, which can negatively impact the quality of education.\n\n4. Technological Integration: Many schools struggle to effectively integrate technology into their classrooms, which can limit student learning and engagement.\n\n5. Student Engagement and Motivation: Getting students to engage with their learning and stay motivated can be a significant challenge for educators.\n\n6. Student Diversity: Students come from diverse backgrounds and have different learning styles, making it difficult for educators to provide a one-size-fits-all approach to teaching.\n\n7. Assessment and Evaluation: Measuring student learning and progress can be a challenge, particularly in non-standardized environments where students are assessed based on different criteria.", "metadata": {}}
+{"answer_id": "HEGL3aPUnNrdNtNt3XLDKi", "model_id": "gpt-3.5-turbo:20230327", "question_id": 9, "text": "There are several primary factors that influence consumer behavior, including:\n\n1. Personal factors - These include individual characteristics such as age, gender, income, education, personality, and lifestyle.\n\n2. Psychological factors - These include the consumer's perception of the product or service, motives, attitudes, beliefs, and values.\n\n3. Social factors - These include the consumer's family, friends, social class, culture, and reference groups.\n\n4. Situational factors - These include the physical environment, time, and mood of the consumer.\n\n5. Marketing mix factors - These include product, price, promotion, and distribution, and how they affect the consumer's decision-making process.", "metadata": {}}
+{"answer_id": "W9zpMVa2cJUJW8B2uGMCJy", "model_id": "gpt-3.5-turbo:20230327", "question_id": 10, "text": "Here are some effective strategies for conflict resolution in the workplace:\n\n1. Listen actively: Allow both parties to express their perspective without interrupting or judging them.\n\n2. Find common ground: Look for areas of agreement and shared goals between the parties.\n\n3. Identify the root cause: Determine the underlying issue that led to the conflict.\n\n4. Brainstorm solutions: Collaborate with both parties to come up with a solution that addresses their needs and interests.\n\n5. Communicate clearly: Make sure that everyone involved understands the resolution and the steps needed to move forward.\n\n6. Follow up: Check in with both parties to determine if the resolution worked and if any adjustments need to be made.\n\n7. Seek outside help if necessary: If the conflict is too complex, it may be necessary to involve a mediator or other neutral third party to facilitate the resolution process.", "metadata": {}}
+{"answer_id": "LacdmoweqKYGzt3aMBYjEa", "model_id": "gpt-3.5-turbo:20230327", "question_id": 11, "text": "Using a single-use plastic bottle has negative implications on both the environment and human health. Here are some of the potential consequences:\n\n1. Environmental Implications: Single-use plastic bottles contribute to plastic pollution in the environment. These bottles take thousands of years to decompose, and they pose a severe threat to wildlife, especially marine life. They often end up in oceans, rivers, and other water bodies, where they harm marine animals by entangling them or being ingested. It also contributes to the depletion of natural resources, as these bottles are typically made from non-renewable fossil fuels.\n\n2. Human Health Implications: There are potential health implications for both consumers and workers involved in the production and disposal of single-use plastic bottles. The production of these bottles releases toxic chemicals that can harm the environment and the workers involved. Ingesting plastic particles can also affect human health, resulting in hormonal imbalances and other negative health impacts.\n\nIn contrast, using a reusable bottle has several positive implications:\n\n1. Environmental Benefits: Reusable bottles can reduce plastic waste and help conserve natural resources. They reduce the number of disposable bottles that end up in landfills and oceans, helping reduce plastic pollution.\n\n2. Health Benefits: Using a reusable bottle helps to reduce the exposure to harmful chemicals associated with plastic production, as well as reducing the potential health impact of ingesting plastic particles.\n\nOverall, using a reusable bottle is better for the environment, wildlife, and human health. It is a more sustainable and responsible choice that supports efforts to conserve natural resources and reduce plastic waste.", "metadata": {}}
+{"answer_id": "JqVreebbPuNdjw8E8K4Ssf", "model_id": "gpt-3.5-turbo:20230327", "question_id": 12, "text": "There are several factors that should be considered when designing an inclusive and accessible public transportation system, including:\n\n1. Physical accessibility: ensuring that transportation infrastructure, such as bus stops or train stations, are physically accessible to people with disabilities or mobility impairments, with features such as ramps, elevators, and tactile paving.\n\n2. Signage and wayfinding: providing clear, visible and easy-to-read signs, maps, and audio announcements that help passengers navigate the system and find their way around.\n\n3. Sensory inclusivity: designing the system with the needs of passengers who have sensory sensitivities or disabilities in mind, such as minimizing noise levels from trains or buses and providing calming spaces.\n\n4. Universal Design: adopting universal design principles and practices, making sure that the system is designed to be user-friendly for all, regardless of ability or disability.\n\n5. Employee training and awareness: ensuring that staff, including drivers, are properly trained and have the necessary knowledge to assist passengers with disabilities or other accessibility needs.\n\nOther factors to consider may include affordability, availability, reliability, and safety in order to provide a comprehensive and inclusive transportation system for all.", "metadata": {}}
+{"answer_id": "hEMThhsN85Ud5X8xBv9BZJ", "model_id": "gpt-3.5-turbo:20230327", "question_id": 13, "text": "Governments typically use a combination of fiscal and monetary policies to combat economic recessions.\n\n1. Fiscal policies: \n\nFiscal policies refer to the government's decisions related to taxes and government spending. In a recession, governments can use fiscal policies to boost economic activity by:\n\n- Increasing government spending: Governments can increase spending on infrastructure projects, social welfare programs, and other public projects to create jobs and stimulate economic activity.\n\n- Cutting taxes: Reducing taxes, particularly for low- and middle-income households, can boost consumer spending and encourage businesses to invest and create new jobs.\n\n- Providing subsidies: Governments can provide subsidies to businesses and industries in order to encourage investment and growth. \n\n2. Monetary policies: \n\nMonetary policies refer to the management of the money supply and interest rates. In a recession, governments can use monetary policies to stimulate economic activity by:\n\n- Lowering interest rates: Lower interest rates can make borrowing cheaper and businesses may be more likely to invest in new projects and hire workers.\n\n- Quantitative easing: Central banks can engage in quantitative easing by purchasing government bonds or other assets, which can increase the money supply and stimulate economic activity.\n\nOverall, the goal of both fiscal and monetary policies is to inject demand into the economy and get people and businesses spending and investing again in order to help lift the economy out of recession.", "metadata": {}}
+{"answer_id": "BvFV7sx53PAK5bNn89urFs", "model_id": "gpt-3.5-turbo:20230327", "question_id": 14, "text": "Language and cultural barriers can have a significant impact on communication and relationships in multicultural societies. \n\nIn terms of language barriers, individuals who do not share a common language may find it difficult to communicate effectively. This can lead to misunderstandings, miscommunications, and frustrations in trying to convey their thoughts and feelings. Language barriers can also make it challenging to establish trust and build strong relationships, as people may feel hesitant to share personal information or express their opinions if they are unable to do so fluently in a shared language.\n\nCultural barriers can also play a role in shaping communication and relationships. People from different cultural backgrounds may have different beliefs, values, and norms around topics such as social interaction, personal space, and nonverbal communication, among others. These cultural differences can create confusion or misunderstandings that can impact communication and relationships.\n\nHowever, it's important to note that language and cultural barriers do not need to be insurmountable obstacles. Through efforts such as language classes, cultural exchange programs, and sensitivity training, individuals can learn to navigate these differences and build stronger relationships across cultures.", "metadata": {}}
+{"answer_id": "dM5GHbLuPNfzUbBnJz6w7K", "model_id": "gpt-3.5-turbo:20230327", "question_id": 15, "text": "Sure, here is one possible scenario:\n\nImagine a hospital that uses AI-powered chatbots to assist patients with their medical inquiries quickly and accurately. When patients arrive at the hospital for appointments, they could interact with the chatbot on their smartphones or on a tablet provided in the waiting area.\n\nThe chatbot could ask patients a series of questions to understand their symptoms or medical condition. It could then provide basic medical advice, schedule a doctor's appointment, order lab tests or prescription refills, or refer patients to specialists as required.\n\nBy using AI, the hospital could improve the quality and efficiency of healthcare delivery in several ways. The chatbot could triage patients based on their symptoms and urgency, reducing the burden on doctors and nurses to answer basic inquiries. This would free up medical staff to focus on more complex cases, leading to improved patient outcomes and satisfaction.\n\nMoreover, by automating routine tasks such as scheduling appointments, ordering lab tests or refilling prescriptions, hospitals could improve their operational efficiency and reduce costs. This would lead to better resource allocation, less waiting time for patients, and ultimately a more satisfied patient population.\n\nOverall, by leveraging AI for patient triage and routine tasks, care providers could offer more targeted and personalized care, while reducing the burden on medical staff and improving the efficiency of healthcare delivery.", "metadata": {}}
+{"answer_id": "BX7maaP5kGY6bBTLJRwkit", "model_id": "gpt-3.5-turbo:20230327", "question_id": 16, "text": "Gene editing is a process of changing the DNA of an organism in order to alter its traits. CRISPR-Cas9 is a gene editing technology that has revolutionized the field of genetic engineering. In this technique, the Cas9 enzyme is guided by a molecule called guide RNA (gRNA), which is programmed to recognize and cut specific sequences of DNA. This cut then triggers the cell\u2019s natural DNA repair mechanism, leading to the addition, deletion, or replacement of specific DNA sequences.\n\nCRISPR-Cas9 has a wide range of potential applications, including the ability to cure genetic diseases, develop new treatments for cancer, and create new crop varieties with improved yield and resistance to pests and disease. It can also be used to enhance human traits, such as intelligence or physical performance, which raises ethical concerns about the potential for eugenics.\n\nOne of the main ethical concerns associated with CRISPR-Cas9 is the risk of unintended consequences. Gene editing can have unforeseeable impacts on the environment and future generations, which could lead to potentially harmful and irreversible effects. There are also concerns about the potential misuse of gene editing technology for unethical purposes, such as creating designer babies or enhancing traits for non-therapeutic purposes.\n\nAnother ethical consideration is the issue of access and equity. CRISPR-Cas9 is an expensive technology, and its use could widen the gap between the haves and the have-nots, with potentially devastating consequences for global health and social justice.\n\nIn conclusion, while the potential benefits of CRISPR-Cas9 are immense, its ethical implications are complex and multifaceted. Careful consideration and regulation of its use will be required to maximize its benefits, while minimizing its potential risks and harms.", "metadata": {}}
+{"answer_id": "STuX8oc7Gu3SN6EWzwpUpp", "model_id": "gpt-3.5-turbo:20230327", "question_id": 17, "text": "Vaccinations work by introducing a weakened or deactivated form of a virus or bacteria into the body, which triggers the immune system to create a defense against it. This defense, in the form of antibodies, allows the immune system to recognize and destroy the actual virus or bacteria if a person is exposed to it later on. By receiving the vaccine, individuals become immune to the disease without having to experience its symptoms.\n\nWhen a large percentage of a population is vaccinated against a particular disease, it becomes difficult for the disease to spread because there are fewer people who can catch and transmit it. This concept is called herd immunity. Herd immunity protects individuals who cannot receive vaccines, such as those with weakened immune systems, and those for whom vaccines are not effective. It also helps prevent the spread of infectious diseases in the community at large, making outbreaks less likely to occur.", "metadata": {}}
+{"answer_id": "TFUUXWS7yn2u2b4n7eM3ZB", "model_id": "gpt-3.5-turbo:20230327", "question_id": 18, "text": "Social media platforms play a significant role in the way people consume and share news. They have revolutionized the distribution of news by providing instant access to real-time happenings around the world. Social media platforms promote the democratization of information, providing a platform where everyone can share their opinions, thoughts, and ideas.\n\nHowever, the downside to social media platforms is the implications they create for the spread of misinformation. Since everyone can share information, it is often challenging to verify the accuracy and relevance of the information presented online. It's also difficult for readers to identify credible sources of information and, thus, the authenticity of the news.\n\nThe spread of misinformation has grave implications for society, as it can significantly impact public opinion, harm reputations, and even incite violence. As a result, social media platforms have a responsibility to control the spread of false information and misinformation.\n\nConclusion\n\nIn conclusion, social media platforms play a critical role in the distribution of news. While the democratization of information is a fantastic feat, the spread of misinformation is an on-going challenge, inhibiting the accurate distribution of news. Social media platforms must take accountability and act to address the issue of misinformation by creating control mechanisms to contain the problem.", "metadata": {}}
+{"answer_id": "3yRq2XXPi83H7Rr5SZS9rE", "model_id": "gpt-3.5-turbo:20230327", "question_id": 19, "text": "Cultural, social, and economic factors are among the most important factors that influence people's food choices. Cultural factors refer to the customs, beliefs, and traditions that people have when it comes to the foods they eat. Social factors include peer pressure, advertising, and media. Economic factors include cost, availability, and accessibility.\n\nFor example, some cultures emphasize the consumption of large amounts of meat and animal products as a sign of wealth and prosperity. In such circumstances, it can be challenging to promote healthier diets that focus on plant-based sources of protein.\n\nSimilarly, social factors like advertising can influence people's food choices by promoting unhealthy food options that are high in sugar, salt, and fat. These foods tend to be cheap and readily available, making them an attractive option for people who are cost-conscious.\n\nFinally, economic factors affect food choices by limiting access to healthy foods in certain areas or pricing them out of reach for many people.\n\nTo promote healthier diets, it is crucial to understand these cultural, social, and economic factors and address them through education, policy changes, and community-based interventions. For example, raising awareness of the benefits of a plant-based diet, imposing taxes on unhealthy foods, and increasing funding for community gardens and farmers' markets can help encourage healthier food choices. Additionally, working with local businesses and restaurants to provide healthier, affordable meal options can also have a significant impact on improving population health.", "metadata": {}}
+{"answer_id": "Sw34dAwQPCfGGotwRwhvtv", "model_id": "gpt-3.5-turbo:20230327", "question_id": 20, "text": "Sure, here\u2019s an explanation:\n\nNatural selection is the process through which certain traits or characteristics become more common in a population over time, because individuals with those traits are more likely to survive and reproduce. This process contributes to the evolution and adaptation of species.\n\nHere\u2019s how it works: Within a population, there is genetic variation \u2013 some individuals will have different traits, such as size, color, or behavior. In environments with limited resources or where there are predators, individuals with certain advantageous traits are better suited to survive, find resources, and avoid danger. These individuals are more likely to reproduce and pass on their advantageous traits to their offspring. Over time, this process results in the spread and eventual dominance of those advantageous traits in the population. This is the essence of natural selection.\n\nAs a result, natural selection is an important driver of evolution and adaptation of species to their environments. Through natural selection, advantageous traits become more common and disadvantageous ones become less common, leading to the emergence of new species or the modification of existing ones.", "metadata": {}}
+{"answer_id": "cZw4Jw8Zyz6ZUy4WDsC6ta", "model_id": "gpt-3.5-turbo:20230327", "question_id": 21, "text": "Greetings, noble lords and ladies. I am Sir [Name], a humble knight in service to this great kingdom. It is an honor to be in the presence of such esteemed company, and I am grateful for the opportunity to share in this feast with all of you. I have served my lord with valor and courage, and I pledge to continue to defend and protect this land with my life and sword. May our cups overflow and our merriment be endless on this joyous occasion.", "metadata": {}}
+{"answer_id": "nj9y2HTWFGsD5B278ozm73", "model_id": "gpt-3.5-turbo:20230327", "question_id": 22, "text": "Ahoy, my fellow pirates! As we set sail in search of treasure, let us remember the thrill of adventure and the rewards that await us. There are riches beyond our wildest dreams waiting to be discovered, and we are the brave souls who will claim them. Each of us has a role to play in this mission, whether it be navigating, scouting, or fighting. Together, we will overcome any obstacle in our path and emerge victorious, with treasure beyond measure. So, let us hoist the sails and set course for glory!", "metadata": {}}
+{"answer_id": "Ep9rLcNzEGhG7AgbLTpRtm", "model_id": "gpt-3.5-turbo:20230327", "question_id": 23, "text": "Oh, my dearest love, my heart doth beat\n\nWith every step thou tak'st, my soul doth meet\n\nA sudden warmth, a new and fiery thrust\n\nThat fills my being with an ardent lust.\n\nThou art the sun that lights my every day\n\nThe moon that guides me on my nightly way\n\nThou art the very air that I do breathe\n\nThou art the one for whom my heart doth cleave.\n\nSo hear me now, ye gods above\n\nAnd witness to this heartening love\n\nThat I do feel, with all my heart\n\nFor her, my love, the fairest art.", "metadata": {}}
+{"answer_id": "oNULT72cYUvit7D9SHb5aM", "model_id": "gpt-3.5-turbo:20230327", "question_id": 24, "text": "Well, little one, my origin story begins with an ordinary person like you or me. One day, something extraordinary happened that changed my life forever. For me, it was an accident that infused me with amazing powers.\n\nAt first, I wasn't sure what to do with my new abilities, but I knew I wanted to use them to help people. So, I put on a costume and became a superhero. Every day, I use my powers to fight crime, protect the innocent, and make the world a better place.\n\nBut the most important thing to remember is that anyone can be a hero, even if they don't have powers. All you need is the courage to do what's right and help those in need.", "metadata": {}}
+{"answer_id": "TX86xjPKTk2UxWwV4e8zRK", "model_id": "gpt-3.5-turbo:20230327", "question_id": 25, "text": "As a time traveler from the year 3000, I would tell people about the following technological advancements:\n\n1. Advanced Artificial Intelligence: In the future, AI is so advanced that it can completely automate many jobs that humans currently do. This has resulted in increased productivity and efficiency across many industries.\n\n2. Quantum Computing: Quantum computers are used extensively in the future and have led to huge advancements in fields like physics, chemistry, and cryptography.\n\n3. Space Travel: With new propulsion systems and faster-than-light travel, space travel is much faster and more accessible to the general public. We have even established a permanent settlement on Mars, and plans are underway for further exploration and colonization of the galaxy.\n\n4. Renewable Energy: Energy production has shifted primarily to renewable sources like solar and wind, with new breakthroughs in energy storage and distribution making it easier to power societies without relying on fossil fuels.\n\n5. Medical Advancements: With gene editing and advanced prosthetics, people are able to live much longer and healthier lives. Diseases like cancer and Alzheimer's have been largely eradicated, and there are even experimental treatments that can reverse the aging process.\n\nThese technological advancements have transformed society in countless ways, and I believe they will continue to shape the course of human history for years to come.", "metadata": {}}
+{"answer_id": "e5YFb6PojDThkcZdSH8kpC", "model_id": "gpt-3.5-turbo:20230327", "question_id": 26, "text": "It's the final moments of the championship game and the score is tied. The tension in the arena is palpable as both teams desperately try to secure the victory.\n\nWith just seconds left on the clock, the ball is in the hands of the star player. He looks up and sees an opening in the defense.\n\nHe makes a quick cut to the right and drives towards the basket. The defense tries to keep up, but he's too quick. \n\nAs he jumps into the air, he releases the ball with perfect form. It floats through the air, seemingly in slow motion, before finding its way into the basket.\n\nThe crowd erupts in cheers as the buzzer sounds, signaling the end of the game. The team rushes onto the court, hugging and high-fiving each other in celebration of their victory.\n\nThe winning shot will go down in history as one of the greatest moments in sports, and the championship trophy will be proudly displayed in their locker room for years to come.", "metadata": {}}
+{"answer_id": "NnkbyoNFRHay4toKTFfaeE", "model_id": "gpt-3.5-turbo:20230327", "question_id": 27, "text": "Welcome, esteemed judges. I'm excited to present my signature dish, which has been inspired by my travels and memories from my childhood. This dish is a fusion of flavors and ingredients that create a unique and unforgettable experience for your taste buds.\n\nMy dish starts with a perfectly seared and succulent piece of protein - tender Wagyu beef, which has been marinated with a special blend of spices, and cooked to medium-rare perfection. The beef is then served on a bed of exotic mushrooms, braised in a rich red wine sauce, with hints of tangy tomato and sweet caramelized onions.\n\nBut that's not all - to balance out the richness of the beef and the savory flavors of the sauce, I've added a bright and zesty salad, which features Japanese seaweed, picked sesame seeds, and a delicate yuzu dressing. And finally, to bring it all together, a dash of my secret spice blend, which ties all the flavors together.\n\nThis dish is like a journey that takes you from the earthy flavors of the beef and mushrooms to the vibrant freshness of the salad, and leaves you with a satisfying warmth and lingering sweetness from the savory wine sauce. Judges, I hope you enjoy my signature dish as much as I enjoyed creating it!", "metadata": {}}
+{"answer_id": "Gpb8MuNU3Pt7k93dpRN9WM", "model_id": "gpt-3.5-turbo:20230327", "question_id": 28, "text": "I'm sorry, but as an AI language model, I am not capable of reaching the summit of a mountain, experiencing emotions or seeing the view from the top. However, I can provide details about the summit of Mount Everest based on information available to me. \n\nStanding at the summit of Mount Everest, the highest peak in the world at an elevation of 29,029 feet, would be an incredible feat for any climber. The ascent to the top of this majestic mountain is challenging, dangerous and requires physical and mental preparation. The weather on the summit is unpredictable, and the oxygen is thin, making it difficult to breathe. But with that being said, the view from the summit is said to be breathtaking, with a panoramic view of the Himalayas and the surrounding landscape that stretches out for miles below. The sense of accomplishment and pride in achieving such a difficult climb would undoubtedly be an emotional experience for any mountain climber.", "metadata": {}}
+{"answer_id": "SYvkCCHBUZPd9DQuidZM8K", "model_id": "gpt-3.5-turbo:20230327", "question_id": 29, "text": "As a space colonist on Mars, my daily life would be vastly different from life on Earth. Some of the main challenges I would face living on another planet are:\n\n1. Limited Resources: One of the primary challenges of living on Mars would be the scarcity of resources such as food, water, and oxygen. As a Mars colonist, I would have to rely on recycling and sustainable practices to make the most of the resources available.\n\n2. Communication Delays: As Mars is quite far from Earth, communication delays between the two planets would be significant. This would make real-time communication difficult, and we would have to rely on automated systems or pre-planned communication schedules.\n\n3. Extreme Environment: The Martian environment is harsh and hostile, with high levels of radiation, extreme temperature fluctuations, and violent dust storms. Living in such an environment would require advanced protective gear and equipment, as well as being always prepared to handle emergency situations.\n\n4. Monotony: Mars would not have the diverse range of plant and animal life we have on Earth. The monotony of the daily routine, the lack of variety in the environment, and the isolation could lead to increased stress levels and mental health issues.\n\nDespite the many challenges, living on Mars could also be exhilarating and full of opportunities to explore and study a new world. My daily life would include conducting scientific experiments, studying the Martian landscape, maintaining the habitats and equipment, and possibly even going on spacewalks or exploring the planet's surface. We would have to work together as a close-knit team to ensure our survival, and our work would contribute to a better understanding of our universe and our place in it.", "metadata": {}}
+{"answer_id": "NjdsG8tYfrHMT5zGZPavk6", "model_id": "gpt-3.5-turbo:20230327", "question_id": 30, "text": "I am a survivor in a post-apocalyptic world, where resources are limited and danger lurks around every corner. To survive, I rely on my cunning, resourcefulness, and connections.\n\nMy first ally was a fellow survivor named Jake. We met fortuitously when we both stumbled upon the same abandoned warehouse, both seeking shelter for the night. After a long and tense standoff, we decided to share the space and split the resources we had scrounged up. We quickly became friends, relying on each other for survival and companionship.\n\nAs we made our way through the wasteland, we met other survivors, some friendly and some not. I learned that trust was a luxury I couldn't afford, and that my instincts would have to guide me in all my interactions.\n\nOne of our most valuable allies was a woman named Eva, who had managed to survive on her own for years. She taught us how to scavenge for food, how to make weapons out of scraps, and how to avoid danger on the road.\n\nThough our group often faced challenges and setbacks, we persisted, relying on each other for survival in a world where simple existence was a triumph. And as we journeyed together, we learned that the bonds forged in such extreme circumstances were unbreakable, and that together we had a fighting chance.", "metadata": {}}
+{"answer_id": "8eovAhyvrKJEMWiVdYzByH", "model_id": "gpt-3.5-turbo:20230327", "question_id": 31, "text": "There are a few ways to determine if a restaurant is popular among locals or mainly attracts tourists:\n\n1. Look for reviews online from local residents. If a restaurant has a high number of reviews from locals and they are generally positive, it's a good indication that it's popular among residents.\n\n2. Observe the clientele while visiting the restaurant. If you notice a lot of tourists, especially those who are carrying guidebooks and taking photos of the food, it's likely that the restaurant is more of a tourist destination.\n\n3. Talk to the staff if possible. They may be able to give you some insight into the restaurant's clientele and whether they tend to be more locals or tourists.\n\nKnowing whether a restaurant is popular among locals or mainly attracts tourists can be useful for a few reasons. If you are a tourist, you may be looking for a more authentic local experience and want to avoid restaurants that are primarily geared towards tourists. On the other hand, if you are a local resident, you may be interested in trying out new restaurants that are popular among your peers. Additionally, if you are a business owner looking to open a restaurant, this information can be helpful in determining the restaurant's target market and marketing strategy.", "metadata": {}}
+{"answer_id": "nvyaGEveLWBaxgXzriB93d", "model_id": "gpt-3.5-turbo:20230327", "question_id": 32, "text": "Here are some subtle clues you can look for to identify when someone is pretending to understand a topic or conversation:\n\n1. They use vague language or buzzwords to mask their lack of knowledge.\n2. They repeat what others have said without adding anything new or asking any questions.\n3. They nod their head or make other nonverbal cues to suggest they are following along, but don\u2019t actively participate.\n4. They avoid eye contact or become defensive when asked to expand on their thoughts or clarify their understanding.\n5. They don\u2019t ask any questions or seek clarification when they don\u2019t understand something.\n6. They use filler phrases such as \u201cyeah\u201d or \u201cokay\u201d in response to questions or statements without contributing anything of value.\n7. They change the subject abruptly or avoid engaging in the conversation altogether.", "metadata": {}}
+{"answer_id": "3xU2t6Yvx9EWpqfqvinNfH", "model_id": "gpt-3.5-turbo:20230327", "question_id": 33, "text": "There are a few reasons why someone might choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app. These reasons include:\n\n1. GPS devices and smartphone apps require a source of power, and therefore, can run out of battery. Paper maps, on the other hand, do not require any power and can be used at any time.\n\n2. Sometimes, GPS devices or smartphone apps can provide incorrect information or lose signal. In these cases, paper maps or directions from locals might be more reliable.\n\n3. Reading a paper map can help you get a better sense of the geography and the layout of the area you are traveling through. This can help you identify landmarks or other important features that may not be visible on a digital map.\n\n4. Some people prefer the tactile experience of using a paper map and finding their way through an area using their own navigation skills.\n\n5. Finally, trusting your own sense of direction and intuition can be a rewarding experience, and can help you build a better understanding of the area you are traveling through.", "metadata": {}}
+{"answer_id": "Mq6hzNziUxzQ2juPMDrv3h", "model_id": "gpt-3.5-turbo:20230327", "question_id": 34, "text": "There are several ways to determine if a person is genuinely interested in a conversation or simply being polite:\n\n1. Body language - If the person is genuinely interested in the conversation, they will usually lean in slightly, make eye contact, and use open and relaxed body language.\n\n2. Active listening - If the person is actively listening to what you're saying, asking questions and responding with their own thoughts, then they are probably genuinely interested in the conversation.\n\n3. Follow-up - If the person follows up with you after the conversation or remembers details from your conversation in future interactions, then they were likely genuinely interested.\n\n4. Timing - If the person is in a rush to end the conversation or seems distracted, they may be polite and trying to end the conversation quickly.\n\nOverall, it's important to pay attention to both verbal and nonverbal cues to determine if someone is genuinely interested in a conversation or simply being polite.", "metadata": {}}
+{"answer_id": "KU6BNNN8d6MLHyrA8nV4DB", "model_id": "gpt-3.5-turbo:20230327", "question_id": 35, "text": "There are several reasons why someone may prefer to shop at a small, locally-owned business instead of a large chain store, even if the prices are higher:\n\n1. Personalized service: Local businesses often offer personalized service because they know their customers by name and can offer individualized recommendations and support.\n\n2. Supporting the local economy: Shopping at locally-owned businesses helps to keep money in the local economy and supports job creation in the community.\n\n3. Unique products: Independent businesses often offer unique, one-of-a-kind products that cannot be found at larger chain stores.\n\n4. Environmental impact: Small businesses tend to have a lower carbon footprint as they source their products and materials locally, reducing transportation and shipping emissions.\n\n5. Community investment: Local businesses are often more invested in their community and are more likely to donate to local charities and civic organizations, further supporting the local community.", "metadata": {}}
+{"answer_id": "RpHbPLJamuknRRa3xU5bUF", "model_id": "gpt-3.5-turbo:20230327", "question_id": 36, "text": "As an AI language model, I can suggest that there are several factors to consider when assessing the credibility of a source of information:\n\n1. Check the date: Make sure the information is up-to-date and relevant to the topic you're researching.\n\n2. Identify the author: Look for information about the author\u2019s credentials, expertise, and affiliation. Conduct a quick search on the author to see if they have any biases, conflicts of interest, or a history of producing misleading or inaccurate content.\n\n3. Evaluate the source: Determine where the information is coming from. Check the domain name or URL extension to see if it\u2019s affiliated with a reputable organization or institution. Avoid sites with questionable or overly sensational headlines.\n\n4. Verify the information: Verify the information by fact-checking it using reputable sources like academic journals, government websites, or reliable news sources.\n\n5. Evaluate the tone: Evaluate the tone of the article or post. Is it objective, balanced, and free of inflammatory language? Be wary of articles that rely heavily on emotional language or use sensationalized claims to try to persuade you to take action.\n\nTaking a critical approach and triangulating information from multiple sources can help ensure that you have a reliable and accurate understanding of the subject matter.", "metadata": {}}
+{"answer_id": "AFR3AJW4sSPLDLiAUvrL8s", "model_id": "gpt-3.5-turbo:20230327", "question_id": 37, "text": "There is no one definitive answer to this question, as individual preferences for fear-inducing experiences are rooted in a variety of factors, including personality traits, past experiences, and cultural backgrounds. Some people may enjoy the thrill of being scared because it provides an adrenaline rush and a feeling of excitement, while others may find it cathartic or empowering to confront their fears in a safe environment. On the other hand, some people may avoid scary experiences because they find them stressful or anxiety-provoking, or because they have had negative experiences with fear in the past. Ultimately, each person's response to fear-inducing stimuli is unique, and depends on a variety of psychological and physiological factors.", "metadata": {}}
+{"answer_id": "esqiBYHa56ygcPU2ux2Pdx", "model_id": "gpt-3.5-turbo:20230327", "question_id": 38, "text": "Observing the behavior of other people in a social situation can provide clues about cultural norms and expectations in various ways. For example:\n\n1. Dress codes: Observing how people dress in a particular social situation can provide insight into what is considered appropriate or expected attire. For instance, if people are dressed conservatively, it may indicate that this is the norm for that culture.\n\n2. Social hierarchy: Observing how people interact with each other can provide clues about the social hierarchy or power dynamics within a culture. For instance, if people defer to older individuals or those in positions of authority, it may indicate that respect for elders and hierarchy are important cultural values.\n\n3. Communication styles: Observing how people communicate with each other can provide clues about their communication styles and what is considered appropriate behavior in that culture. For instance, if people speak softly and avoid eye contact, it may indicate that the culture values modesty and avoiding confrontation.\n\n4. Customs and traditions: Observing how people celebrate holidays or special occasions can provide insight into cultural customs and traditions. For instance, if people exchange gifts or participate in specific rituals, it may indicate that these are important cultural practices.\n\nOverall, by paying attention to social cues and observing the behavior of other people in cultural contexts, one can gain insight into the norms and expectations of that culture.", "metadata": {}}
+{"answer_id": "NmuuKUipqt62QKuEHCuBWh", "model_id": "gpt-3.5-turbo:20230327", "question_id": 39, "text": "This is a complex question that requires a thoughtful and nuanced answer. There are arguments to be made on both sides, and ultimately, it comes down to personal values and priorities.\n\nOn one hand, exploring space is a noble and inspiring goal that has the potential to expand our understanding of the universe and our place in it. It could also bring about new advancements in technology and inspire young people to pursue careers in science and engineering.\n\nOn the other hand, there are many urgent problems facing our planet that require immediate attention and resources, including climate change, poverty, inequality, and access to healthcare and education. Some argue that we should prioritize solving these issues before committing resources to space exploration.\n\nUltimately, it's up to individuals, governments, and societies to decide how to balance these competing priorities. It is possible to do both - to invest in space exploration while also addressing pressing problems on Earth.", "metadata": {}}
+{"answer_id": "3HypDqXt6tHieMDN7hWYCh", "model_id": "gpt-3.5-turbo:20230327", "question_id": 40, "text": "It's important to strike a balance between job creation and technological progress. Technological progress can bring about many benefits such as increased efficiency, convenience, and innovation. However, it can also result in job displacement or in certain cases, re-skilling for employees. It's important for companies to recognize the impact of their technological advancements and to consider implementing measures to support their employees through the transition.\n\nOn the other hand, job creation is also important as it supports a healthy economy and ensures that individuals have the opportunity to support themselves and their families. Encouraging entrepreneurship, creating incentives for businesses, and investing in education and job training programs can all be effective methods for promoting job creation.\n\nOverall, the key is to recognize the importance of both job creation and technological progress and to strive for a balance that benefits both individuals and society as a whole.", "metadata": {}}
+{"answer_id": "DmQtupeyNDrQFBccBRAsbD", "model_id": "gpt-3.5-turbo:20230327", "question_id": 41, "text": "The average human blinks approximately 15-20 times per minute. Let's assume that the average waking hour of a person is around 16 hours per day, which means that they blink around 15 x 16 = 240 times per day.\n\nIn one year, this would translate to 240 x 365 = 87,600 blinks per year.\n\nAssuming a lifespan of 75 years, the total number of blinks would be 87,600 x 75 = 6,570,000.\n\nSo, on average, a human blinks about 6.6 million times in their lifetime.\n\nIt's important to note that this is just an estimate based on the assumptions that were made, and not everyone's blinking frequency is the same. Factors such as age, health, and environment can affect how often someone blinks.", "metadata": {}}
+{"answer_id": "froHv7kwRMYGWPXDQXk2Gw", "model_id": "gpt-3.5-turbo:20230327", "question_id": 42, "text": "A grain of salt is a very small amount of salt, so it is difficult to visualize how many atoms are in it. However, we can approximate the number of atoms in a grain of salt by using some scientific formulas and data.\n\nFirstly, we need to know the mass of a grain of salt. According to the National Institute of Standards and Technology (NIST), the average weight of a single grain of salt is about 58.5 milligrams or 0.0585 grams.\n\nSecondly, we need to know the molar mass of salt. Salt is actually a compound made up of two elements, sodium (Na) and chlorine (Cl), with the chemical formula NaCl. The molar mass of NaCl is 58.44 grams per mole, which means that one mole of salt contains 6.022 x 10^23 individual NaCl molecules.\n\nTo find out how many atoms are in a single grain of salt, we need to use Avogadro's number, which is the number of particles in one mole of a substance. Avogadro's number is approximately 6.022 x 10^23 atoms/molecules per mole.\n\nSo, if we take the mass of a single grain of salt (0.0585 grams) and divide it by the molar mass of NaCl (58.44 grams/mol), we can calculate the number of moles of salt in a grain:\n\n0.0585 g / 58.44 g/mol = 0.000999 moles\n\nThen, if we multiply this by Avogadro's number, we get the number of molecules (or atoms) of NaCl in a single grain of salt:\n\n0.000999 moles x 6.022 x 10^23 molecules/mole = 6.008 x 10^20 molecules\n\nTherefore, there are approximately 6 x 10^20 atoms of salt in a single grain of salt.\n\nIt's important to note that this is an approximation, and the actual number of atoms in a grain of salt may vary slightly depending on the size of the grain and the purity of the salt. Nevertheless, this calculation gives us a general idea of the incredible number of atoms that make up even the smallest visible piece of matter.", "metadata": {}}
+{"answer_id": "ahktv9NqxZ2cYquTXwF42r", "model_id": "gpt-3.5-turbo:20230327", "question_id": 43, "text": "According to research, it is estimated that there are approximately 8.6 million lightning strikes that occur on Earth each day. Here is how I reached this conclusion:\n\nFirst, it is important to understand what creates lightning. Lightning is produced in thunderstorms when the atmosphere becomes electrically charged. This is typically caused by the interaction between warm and cold air. When these two air masses meet and create instability in the atmosphere, it can cause lightning to occur.\n\nNext, it's worth noting that thunderstorms are quite common across the globe, with an estimated 16 million thunderstorms happening each year. This means that there is a significant potential for lightning strikes to occur during these storms.\n\nTo estimate the number of lightning strikes that occur each day, we can take the total number of yearly thunderstorms (16 million) and divide it by the number of days in a year (365). This calculation gives us an average of around 43,835 thunderstorms per day.\n\nNow, not all thunderstorms will produce lightning, but many of them do. Based on data collected from a study conducted by the World Meteorological Organization (WMO), about 45% of all thunderstorms produce lightning. So, if we multiply the number of thunderstorms per day (43,835) by the percentage that produce lightning (45%), we get approximately 19,726 thunderstorms with lightning each day.\n\nFinally, we need to estimate how many strikes occur within each thunderstorm. It is difficult to determine an exact number since some thunderstorms produce only a few strikes while others produce hundreds or even thousands. However, according to National Geographic, the average number of lightning strikes per thunderstorm is around 100. Multiplying this by the estimated number of thunderstorms per day that produce lightning (19,726) gives us approximately 1,972,600 lightning strikes per day.\n\nThat being said, it is important to note that some estimates put the number of daily lightning strikes anywhere from 6 million to 100 million. The exact number varies depending on the source and the methods used to calculate it. Nonetheless, it is clear that lightning strikes occur frequently and can be quite dangerous.", "metadata": {}}
+{"answer_id": "kqqPRaFqb3w9Ky9LGB3yKU", "model_id": "gpt-3.5-turbo:20230327", "question_id": 44, "text": "To calculate the number of balloons needed to lift a house like in the movie \"Up,\" we need to consider the weight of the house and the lifting capacity of a single balloon.\n\nFirst, let us consider the weight of the house. In the movie, the house is depicted as a small wooden house with a chimney and a porch. Based on similar houses, we can estimate that the weight of the house would be around 4-5 tons (8,000-10,000 pounds).\n\nNow, let us consider the lifting capacity of a single balloon. A typical helium-filled party balloon can lift a maximum of approximately 14 grams (0.5 ounces) of weight. However, in the movie, the balloons were much larger and had a greater lifting capacity. Assuming a lifting capacity of 1 kilogram (2.2 pounds) per balloon, we can calculate the number of balloons needed to lift the house.\n\nTo lift a 5-ton house, we would need 5,000 kilograms (11,000 pounds) of lifting capacity. Dividing this by the lifting capacity of a single balloon (1 kilogram), we get a total of 5,000 balloons needed to lift the house.\n\nHowever, this calculation does not take into account the weight of the balloon material, ropes, and other equipment needed to tether the balloons to the house. Additionally, the balloons would need to be organized and arranged in a specific way to lift the house evenly and prevent it from tipping over.\n\nTherefore, while 5,000 balloons may be needed in theory, it is unlikely that this number of balloons alone would be able to safely lift a house. The scenario depicted in the movie is purely fictional and should not be duplicated in real life.", "metadata": {}}
+{"answer_id": "946tQg8kS7GYPSm4qcV6Pt", "model_id": "gpt-3.5-turbo:20230327", "question_id": 45, "text": "According to the latest statistics, more than 18.7 billion text messages are sent globally every day. This means that approximately 781 million text messages are sent every hour. To calculate how many text messages are sent in a minute, we need to divide these numbers by 60 (the number of minutes in an hour).\n\nSo, 781 million text messages divided by 60 minutes yields approximately 13,017,543 text messages sent globally in a minute. It's important to note that this number is an estimate and may vary depending on the time of day, day of the week, and other factors. Additionally, the rapid growth of instant messaging apps may be impacting the number of traditional text messages being sent globally.", "metadata": {}}
+{"answer_id": "cU3wut3Ta3ySbRHGxfwgjc", "model_id": "gpt-3.5-turbo:20230327", "question_id": 46, "text": "Estimating the number of words spoken daily on Earth can be a daunting task, as it involves taking into account a wide range of factors such as population, language, age, location, and more. However, there are a few ways to approach this question that can give us a rough estimate. \n\nOne approach is to start by looking at the world population, which as of 2021 is approximately 7.9 billion people. Assuming that each person speaks on average 2,000 words a day (which is a reasonable estimate for adults who engage in regular conversation), we can begin to calculate the daily volume of words spoken on Earth. \n\nTo do this, we can use the following formula: \n\nTotal words spoken daily = (World population) x (Average daily words spoken per person) \n\nPlugging in the values, we get: \n\nTotal words spoken daily = (7.9 billion) x (2,000) \n\nTotal words spoken daily = 15.8 trillion \n\nSo according to this estimate, there are roughly 15.8 trillion words spoken on Earth each day. However, it's worth noting that this number may be significantly higher or lower depending on a variety of factors. For example, not everyone speaks the same number of words per day, and some people may not have access to language or communication tools.\n\nAdditionally, it's important to keep in mind that this estimate is just that - an estimate. There is no definitive answer to how many words are spoken daily on Earth, and the true number is likely impossible to determine with complete accuracy. Nonetheless, by taking a logical approach and making reasonable assumptions, we can arrive at a rough estimate that gives us a sense of the massive scale of human communication.", "metadata": {}}
+{"answer_id": "hQP784Ch2yq2b3BaXVBVX3", "model_id": "gpt-3.5-turbo:20230327", "question_id": 47, "text": "It's difficult to determine the exact number of snowflakes that fall during a typical winter, as it depends on a variety of factors such as location, temperature, humidity, and duration of the winter season.\n\nFirstly, it's important to consider the size of snowflakes. Snowflakes come in different shapes and sizes, ranging from small crystals to larger, more intricate structures. Generally, the smaller the snowflake, the more there are in a given amount of precipitation.\n\nSecondly, it's also worth noting that snowfall is typically measured in inches, rather than the number of snowflakes. This is because the exact number of snowflakes that contribute to a certain amount of precipitation can vary depending on the size and shape of the snowflakes.\n\nWith these factors in mind, we can estimate the number of snowflakes that fall during a typical winter by looking at an average snowfall amount. According to the National Oceanic and Atmospheric Administration (NOAA), the average snowfall in the United States is around 28 inches per year.\n\nAssuming that each inch of snowfall contains anywhere from hundreds to thousands of individual snowflakes, we can estimate that there are millions (if not billions) of snowflakes that fall during a typical winter.\n\nOverall, while we can't pinpoint an exact number of snowflakes that fall during a typical winter, we can estimate that it is likely to be a very large number due to the variability in size and shape of snowflakes, as well as the amount of snowfall that typically occurs.", "metadata": {}}
+{"answer_id": "a92bStUFdq4LBcv3pa9y3Z", "model_id": "gpt-3.5-turbo:20230327", "question_id": 48, "text": "Determining the total number of pages in all the books ever written would be an impossible task as it's practically impossible to know the exact number of books that have been written throughout human history. However, we can make an attempt to estimate the total number of pages based on some assumptions and facts.\n\nFirst, we need to estimate the total number of books ever written. According to Google's estimation in 2010, there were around 130 million books in the world at that time. However, this is just an educated guess, and the actual number of books is likely to be much higher.\n\nNext, we need to consider the average number of pages in a book. This can vary widely depending on the genre, audience, and author's writing style. Some books may have only a few pages, while others may have hundreds or even thousands of pages. However, according to Book Authority, the average number of pages in a book is around 320 pages.\n\nWith these assumptions, we can estimate the total number of pages in all the books ever written by multiplying the estimated number of books with the average number of pages. So, if we assume there were around 500 million books ever written and each book had 320 pages on average, then the total number of pages in all books ever written would be:\n\n500,000,000 x 320 = 160 billion pages\n\nHowever, it's important to recognize that this is just an estimation and the actual number is likely much higher or lower. Additionally, new books are being written and published every day, so this estimation would become outdated very quickly.", "metadata": {}}
+{"answer_id": "a2QAcAm9wJeP2BpyWQnhot", "model_id": "gpt-3.5-turbo:20230327", "question_id": 49, "text": "The beginning of life on Earth is estimated to have occurred around 3.8 billion years ago. The question asks how many times the Earth has orbited the Sun since that time, so we need to calculate the number of orbits the Earth has completed over that time period. \n\nOne orbit of the Earth around the Sun takes approximately 365.25 days, or one year. To calculate how many orbits the Earth has completed over 3.8 billion years, we first need to convert this amount of time into years. \n\n3.8 billion years is equivalent to 3,800,000,000 years. We can use this number and the length of one Earth year to calculate the number of orbits:\n\n3,800,000,000 years / 1 year = 3,800,000,000 orbits\n\nSo, the Earth has completed approximately 3.8 billion orbits around the Sun since the beginning of life on Earth. \n\nIt's worth noting that this is a rough estimate, as the length of a year and the Earth's orbit have both changed slightly over time due to factors like gravitational interactions with other planets and the Moon. However, this answer provides a reasonable approximation of the number of orbits that have occurred over the scale of billions of years.", "metadata": {}}
+{"answer_id": "CrmusnxbTtGXF2varfcUd4", "model_id": "gpt-3.5-turbo:20230327", "question_id": 50, "text": "It is impossible to determine the exact number of songs that have been recorded throughout history because new songs are being created on a daily basis, and many ancient songs have been lost over time. However, we can make some educated estimates based on available data.\n\nFirst, we must consider the length of human history. Humans have been creating music for tens of thousands of years, with the oldest known musical instrument, a bone flute, dating back to about 40,000 years ago. As such, it is safe to assume that countless songs have been created and lost over that vast length of time.\n\nNext, let's consider modern recording technology. The first sound recording was made in 1857, and since then, millions of songs have been recorded. In fact, the Library of Congress in the USA has more than 3 million sound recordings in its collection alone.\n\nHowever, even that number is likely to be a vast underestimate since it\u2019s impossible to account for music recordings that existed in cultures without a writing system, or those outside the major recording and distribution systems of the Western music industry.\n\nFinally, we must recognize that music is created in every culture around the world, and each culture has its unique musical traditions. As a result, it's impossible to estimate the number of songs created around the globe in different languages and styles.\n\nIn conclusion, although it's impossible to determine the exact number of songs recorded throughout history, we can conclusively say that it's likely in the billions, considering the long history of music, the vast amount of music recorded in recent centuries and the diversity of musical traditions around the world.", "metadata": {}}
+{"answer_id": "J9pZp6z2UUW7YcXgzUouqs", "model_id": "gpt-3.5-turbo:20230327", "question_id": 51, "text": "If the internet had been invented during the Renaissance period, it would have revolutionized the way in which people gained knowledge, conducted research, and shared ideas. However, there would have likely been many limitations of the technology, such as the lack of accessibility and the lack of infrastructure to support such a vast network.\n\nOne possibility is that the internet may have been utilized by Renaissance scholars for intellectual exchange, much like how they used letters to communicate and engage in debates. This would have allowed for a more extensive and rapid dissemination of ideas, making it easier for artists, philosophers, theologians, and scientists to learn from one another and work collaboratively.\n\nAdditionally, the invention of the printing press during the Renaissance period made books and information available to a broader audience. If the internet had also been invented during this time, it may have played a similar role in democratizing knowledge and making it more widely accessible.\n\nOverall, the invention of the internet during the Renaissance period would have had a profound impact on the intellectual and cultural landscape of the time, but the technology may have been limited in scope and reach given the technological limitations of the era.", "metadata": {}}
+{"answer_id": "67bYUQb6zru8ofiub7uNUi", "model_id": "gpt-3.5-turbo:20230327", "question_id": 52, "text": "If the Aztecs had successfully repelled the Spanish conquistadors, the history of the Americas would have been different. The Spanish colonization of Mexico would not have taken place, and the Aztecs would have continued to control their empire. It is possible that the Aztecs would have continued to expand their empire and develop their civilization. \n\nThe Spanish conquest of Mexico had a significant impact not only on the Aztec people, but also on the indigenous people of the Americas. The conquest brought with it diseases that decimated the population, and the Spanish imposed their culture and religion on the peoples they conquered. If the Spanish had been repelled, the histories and cultures of the Americas would have been vastly different.\n\nIt is also possible that other European nations, such as the British or the French, would have attempted to colonize the Americas. However, without the riches of the Aztec empire to lure them in, the colonial powers may not have found the Americas as attractive. \n\nOverall, the successful repulsion of the Spanish conquistadors by the Aztecs would have changed the course of history for the Americas, Europe, and the world.", "metadata": {}}
+{"answer_id": "gAisnQTHWFLW8aa5fQPNJf", "model_id": "gpt-3.5-turbo:20230327", "question_id": 53, "text": "If the Black Death had not occurred in the 14th century, the course of history would have drastically changed. The Black Death was one of the deadliest pandemics in human history, which killed almost 50 million people in Europe alone. Its social, economic, and political impact was enormous, and it changed the entire social structure of Europe forever.\n\nIf the Black Death had not occurred, many of the changes that were brought about by the pandemic would not have occurred. For example, the social hierarchy would have remained the same, and peasants would not have had as many economic opportunities as they did after the Black Death, which caused a shortage in labor.\n\nFurthermore, the medical field would not have progressed as quickly, since the Black Death brought about many advancements in medicine as people attempted to find a cure. Many of the basic hygiene practices that we take for granted today were also developed in response to the Black Death, so if the pandemic had not occurred, the general health and cleanliness of the population would not have improved as much.\n\nFinally, the cultural landscape would have been different as well, as the Black Death had a profound effect on art, literature, and music. If it had not occurred, we may have different artistic and cultural movements today.\n\nIn summary, if the Black Death had not occurred in the 14th century, the world would be drastically different today.", "metadata": {}}
+{"answer_id": "4ZJCbj7T8BGzNhDqz7NSF4", "model_id": "gpt-3.5-turbo:20230327", "question_id": 54, "text": "If Isaac Newton had focused on biology instead of physics, the scientific world may be vastly different today. His advancements in mathematics, such as calculus, would have certainly been useful in the field of biology. Newton's curiosity, analytical mind, and dedication to scientific inquiry would have been equally effective in biology as they were in physics. He may have made significant contributions to the field of genetics or evolutionary theory.\n\nOn the other hand, his contributions to physics were so groundbreaking that the subject may have been hindered without his contributions. It's difficult to say exactly what would have happened, but it's likely that the course of scientific history would have been different.", "metadata": {}}
+{"answer_id": "c6ixri3qqLfSBBnwMkgYB7", "model_id": "gpt-3.5-turbo:20230327", "question_id": 55, "text": "If the Beatles had never formed as a band, the world of music would be very different. The Beatles revolutionized popular music and influenced countless musicians and bands that came after them. Without the Beatles, it's possible that other bands would still have risen to fame and changed the course of music history, but we can only speculate how their absence would have affected the genre overall. It's also possible that a different band or artist would have taken the world by storm and become an equally important cultural icon. Ultimately, the impact of the Beatles on music and society is immeasurable, and their influence will continue to be felt for generations to come.", "metadata": {}}
+{"answer_id": "c9AtDn7eeSYhtH854MQDDB", "model_id": "gpt-3.5-turbo:20230327", "question_id": 56, "text": "If Alan Turing had not cracked the Enigma code during World War II, the outcome of the war could have been very different. The Enigma code was a secret code used by the German military to encrypt their messages during the war. By cracking this code, Turing and his team were able to intercept and decode secret German messages, thus giving the Allies valuable intelligence that helped them win key battles.\n\nIf Turing had not been able to crack the Enigma code, the Allies might have struggled to gain the intelligence they needed to win key battles, which could have led to a longer and more bloody war. It's possible that the war could have dragged on for many more years, resulting in even more casualties and devastation.\n\nAdditionally, Turing's work on the Enigma code laid the foundation for modern computing and artificial intelligence. Without his insights and innovations, the development of computer technology might have been delayed, which could have had significant implications for technological progress and innovation in the aftermath of the war.", "metadata": {}}
+{"answer_id": "jYd2gg6MJH8hdqFSAJTaiR", "model_id": "gpt-3.5-turbo:20230327", "question_id": 57, "text": "If the Suez Canal had never been constructed, the shipping routes between Europe and Asia would have been significantly longer and more treacherous. Ships would have to sail around the southern tip of Africa, adding thousands of miles to their journey and increasing the risk of piracy and other hazards. This would have made international trade more expensive and less efficient, and could have slowed the economic growth of many nations. Additionally, the region around the canal would have developed differently, as the canal has helped create cities and infrastructure that rely on its presence. Without the canal, the social, political, and economic impact on the Middle East could have been much different.", "metadata": {}}
+{"answer_id": "nZJ6LGJFegnHetutiAQtFm", "model_id": "gpt-3.5-turbo:20230327", "question_id": 58, "text": "If the Maya civilization had never collapsed, it's possible that they would have continued flourishing and developing their culture, art, architecture, and knowledge. The Maya civilization was known for impressive works of art, architecture, and a sophisticated calendar system. \n\nWithout their collapse, it is possible that the Maya could have advanced their knowledge in other areas, such as science, language, or technology. They might have developed new forms of governance and political systems, which could have influenced the political structures of neighboring civilizations.\n\nAdditionally, their continued presence could have had an impact on the colonization of the Americas. Because the Maya were one of the more advanced civilizations in the region at the time of the European invasion, their presence could have impacted the way the Spaniards and other European powers approached colonization.\n\nOverall, it's hard to say exactly what would have happened, but the Maya civilization was undeniably a significant cultural and scientific force in history. Had they not mysteriously disappeared, they could have continued to shape the course of history in ways we can only imagine.", "metadata": {}}
+{"answer_id": "dmEgLyeYNcwBZWHBak6Lap", "model_id": "gpt-3.5-turbo:20230327", "question_id": 59, "text": "If Christopher Columbus had not discovered the Americas, it is likely that another explorer would have eventually stumbled upon the continent. The Vikings had already established a presence in North America around 500 years before Columbus arrived. Additionally, it is possible that indigenous peoples from Asia may have eventually migrated to the Americas via the Bering Strait land bridge.\n\nHowever, the timing of European colonization and the impact it had on the native populations may have been dramatically different without Columbus\u2019 arrival. It is possible that the Americas could have remained isolated from European influence for a longer period of time, allowing the native cultures to develop more independently. Alternatively, other European nations may have eventually colonized the Americas without the influence of Columbus and brought their own unique cultures and traditions to the region.", "metadata": {}}
+{"answer_id": "bkuECkuy7YiDUcj9oJjdrZ", "model_id": "gpt-3.5-turbo:20230327", "question_id": 60, "text": "If Vincent van Gogh had been a successful artist during his lifetime, his life would have been very different. He struggled with poverty, mental illness, and feelings of isolation throughout his life. If he had been financially successful, he would not have had to rely on his brother Theo for financial support and may have been able to afford better care for his mental health.\n\nAdditionally, van Gogh was deeply committed to his art and was mostly ignored by the art world during his lifetime. If he had been successful, he may have found greater satisfaction in his work and felt more validated for his contributions to the art world. He may have had the opportunity to travel more widely, meet more artists, and produce more work.\n\nOn a broader level, if Vincent van Gogh had been successful during his lifetime, it may have changed the way we think about art history. His work was groundbreaking in its use of color, brushwork, and emotional intensity, and it's possible that he could have influenced other artists of his time (and beyond) in more direct ways if he had been a well-known figure in the art world.", "metadata": {}}
+{"answer_id": "2PRjT7j3V6487xZREfQfuD", "model_id": "gpt-3.5-turbo:20230327", "question_id": 61, "text": "Here's a sample program that demonstrates your requirements:\n\n```c++\n#include \n#include \n#include \n#include \n#include \n\nusing namespace std;\n\nint count_occurrences(const string &filename, const string &search_word) {\n int count = 0;\n ifstream fin(filename.c_str());\n if (fin.fail()) {\n cerr << \"Failed to open file \" << filename << endl;\n return 0;\n }\n string line;\n while (getline(fin, line)) {\n transform(line.begin(), line.end(), line.begin(), [](unsigned char c){ return tolower(c); }); // convert to lowercase\n stringstream s(line);\n string word;\n while (s >> word) {\n if (word == search_word) {\n count++;\n }\n }\n }\n fin.close();\n return count;\n}\n\nint main() {\n string filename = \"sample.txt\";\n string search_word = \"hello\";\n int count = count_occurrences(filename, search_word);\n cout << \"The word \\\"\" << search_word << \"\\\" occurs \" << count << \" times in file \" << filename << endl;\n return 0;\n}\n```\n\nHere's how it works:\n\n1. We define a function called `count_occurrences()` that takes two arguments: `filename` and `search_word`.\n2. Inside the `count_occurrences()` function, we first open the file using `ifstream`.\n\n3. If the file fails to open, we simply return 0.\n\n4. We then loop through each line of the file using `getline()` and store it in the variable `line`.\n\n5. We convert each line to lowercase using `transform()` and a lambda function.\n\n6. We then create a `stringstream` object called `s` and pass the line to it.\n\n7. We then loop through each word in the line using `>>` and store it in the variable `word`.\n\n8. If the `word` matches the `search_word`, we increment the `count` variable.\n\n9. After the loop is complete, we close the file using `close()` and return the `count`.\n\n10. In the `main()` function, we call the `count_occurrences()` function with the file name and search word.\n\n11. We then output the result to the console.\n\nI hope this helps! Let me know if you have any further questions.", "metadata": {}}
+{"answer_id": "jWyN8NTdVix6CUoqfbRqVx", "model_id": "gpt-3.5-turbo:20230327", "question_id": 62, "text": "Sure! Here's a Python implementation using dynamic programming.\n\n```python\ndef lcs(str1, str2):\n # Initialize a 2D array to store the lengths of LCS\n m = [[0] * (len(str2) + 1) for i in range(len(str1) + 1)]\n \n # Fill the 2D array by comparing characters of both strings\n for i in range(1, len(str1) + 1):\n for j in range(1, len(str2) + 1):\n if str1[i-1] == str2[j-1]:\n m[i][j] = m[i-1][j-1] + 1\n else:\n m[i][j] = max(m[i-1][j], m[i][j-1])\n \n # Construct the LCS by backtracking the 2D array\n lcs = \"\"\n i, j = len(str1), len(str2)\n while i > 0 and j > 0:\n if str1[i-1] == str2[j-1]:\n lcs = str1[i-1] + lcs\n i -= 1\n j -= 1\n elif m[i-1][j] > m[i][j-1]:\n i -= 1\n else:\n j -= 1\n \n return lcs\n```\n\nThis function takes two input strings as parameters and returns the longest common subsequence as output. It first initializes a 2D array 'm' with all elements set to zero. This array is used to store the lengths of the longest common subsequences of all possible substrings of the input strings. The function then fills up the 2D array by comparing characters of both strings.\n\nAfter the 2D array is filled, the function backtracks it to construct the longest common subsequence. It starts from the bottom right corner of the array and moves up and left until it reaches the top left corner. During this process, it compares the current character of both strings and moves to the next cell in a way that maximizes the length of the LCS. Once the top left corner is reached, the constructed subsequence is returned as output.", "metadata": {}}
+{"answer_id": "mx9G7gfKTCXCmNRaiMZQr9", "model_id": "gpt-3.5-turbo:20230327", "question_id": 63, "text": "Here's a regular expression pattern in Python that can be used to validate an email address:\n\n```\nimport re\n\npattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$'\n\ndef validate_email(email):\n if re.match(pattern, email):\n print(\"Valid email address\")\n else:\n print(\"Invalid email address\")\n \nvalidate_email(\"example@example.com\")\n```\n\nThis regular expression pattern checks if the email address has a valid format. It ensures that the email address starts with one or more alphanumeric characters, dots, underscores, percentage signs, plus or minus signs. The \"@' character follows this which is then followed by a valid domain name that contains only alphanumeric characters, dots, and hyphens, ending with a valid top-level domain name that contains only alphabetical characters with at least two characters.", "metadata": {}}
+{"answer_id": "8LjpXk6Va5L2FBEwTU9YJ6", "model_id": "gpt-3.5-turbo:20230327", "question_id": 64, "text": "Certainly! Here's an example Python program using dynamic programming to find the nth Fibonacci number:\n\n```\ndef fibonacci(n):\n fib = [0, 1]\n for i in range(2, n + 1):\n fib.append(fib[i - 1] + fib[i - 2])\n return fib[n]\n\n# Example usage:\nprint(fibonacci(5)) # 5\nprint(fibonacci(10)) # 55\n```\n\nThe function `fibonacci(n)` takes an integer argument `n` and returns the nth Fibonacci number. The program uses dynamic programming by keeping track of the previous two Fibonacci numbers in a list called `fib`.\n\nThe program initializes `fib` to `[0, 1]`, which are the first two Fibonacci numbers. Then, it iterates from 2 to `n` and calculates each Fibonacci number as the sum of the previous two numbers. Finally, the function returns the nth Fibonacci number in the list `fib`.\n\nHope this helps! Let me know if you have any questions.", "metadata": {}}
+{"answer_id": "BLC8fhAUeahFfrjBRiL9tx", "model_id": "gpt-3.5-turbo:20230327", "question_id": 65, "text": "Sure! Here is an implementation of the binary search algorithm in Python:\n\n```\ndef binary_search(arr, target):\n left = 0\n right = len(arr) - 1\n while left <= right:\n mid = (left + right) // 2\n if arr[mid] == target:\n return mid\n elif arr[mid] < target:\n left = mid + 1\n else:\n right = mid - 1\n return -1\n```\n\nTo use this function, pass in a sorted array and the target element you are searching for. The function returns the index of the target element in the array (if it exists), or -1 if it does not. \n\nHere is an example usage:\n\n```\narr = [1, 2, 3, 4, 5, 6, 7, 8, 9]\ntarget = 5\n\nindex = binary_search(arr, target)\n\nif index != -1:\n print(f\"Found {target} at index {index}\")\nelse:\n print(f\"{target} not found in the array\")\n```\n\nThis would output \"Found 5 at index 4\".", "metadata": {}}
+{"answer_id": "NAH245JGvMWF24FWZJagYp", "model_id": "gpt-3.5-turbo:20230327", "question_id": 66, "text": "Here's an example implementation of a queue using two stacks in Python:\n\n```python\nclass QueueUsingStacks:\n def __init__(self):\n self.stack1 = []\n self.stack2 = []\n\n def enqueue(self, value):\n # Push the element onto the first stack\n self.stack1.append(value)\n\n def dequeue(self):\n if not self.stack2:\n # If the second stack is empty, transfer the elements from the first stack to the second stack\n while self.stack1:\n self.stack2.append(self.stack1.pop())\n\n if not self.stack2:\n # If both stacks are empty, return None\n return None\n else:\n # Pop the top element from the second stack and return it\n return self.stack2.pop()\n\n def is_empty(self):\n return not bool(self.stack1 or self.stack2)\n\n def size(self):\n return len(self.stack1) + len(self.stack2)\n```\n\nTo use this implementation, simply create a new instance of the `QueueUsingStacks` class and call the appropriate methods as shown below:\n\n```python\n# Create a new queue using two stacks\nq = QueueUsingStacks()\n\n# Enqueue some elements onto the queue\nq.enqueue(10)\nq.enqueue(20)\nq.enqueue(30)\n\n# Dequeue an element from the queue\nx = q.dequeue()\nprint(x) # Output: 10\n\n# Check if the queue is empty\nprint(q.is_empty()) # Output: False\n\n# Get the size of the queue\nprint(q.size()) # Output: 2\n```", "metadata": {}}
+{"answer_id": "NKXTwB3WtfFcmQZbTxBP5o", "model_id": "gpt-3.5-turbo:20230327", "question_id": 67, "text": "Here's a code snippet that can find the common elements in two arrays:\n\n```python\ndef find_common_elements(arr1, arr2):\n common = []\n for i in range(len(arr1)):\n for j in range(len(arr2)):\n if arr1[i] == arr2[j]:\n if arr1[i] not in common:\n common.append(arr1[i])\n return common\n```\n\nIn this code, we use two nested loops to iterate over both arrays and compare their elements. If a common element is found, we add it to a list called 'common'. To avoid duplicates, we check whether the element is already in the 'common' list before appending it.\n\nWe can test this function with some example arrays:\n\n```python\narr1 = [1, 2, 3, 4, 5]\narr2 = [3, 4, 5, 6, 7]\nresult = find_common_elements(arr1, arr2)\nprint(result) # Output: [3, 4, 5]\n```\n\nIn this example, the common elements are 3, 4, and 5.", "metadata": {}}
+{"answer_id": "5P7wPyrNwBrHNdJNrETS5h", "model_id": "gpt-3.5-turbo:20230327", "question_id": 68, "text": "To find the value of f(2), we simply substitute 2 in place of x in the given equation of f(x):\n\nf(2) = 5(2)^3 - 2(2) + 3\n\nf(2) = 5(8) - 4 + 3\n\nf(2) = 40 - 1\n\nTherefore, the value of f(2) is 39.", "metadata": {}}
+{"answer_id": "UmnL4WoucBduFpX4jptkNU", "model_id": "gpt-3.5-turbo:20230327", "question_id": 69, "text": "3x + 10 = 5(x - 2)\n\n3x + 10 = 5x - 10 (use the distributive property)\n\n10 + 10 = 5x - 3x (subtract 3x from both sides)\n\n20 = 2x (simplify)\n\nx = 10 (divide both sides by 2) \n\nTherefore, x = 10.", "metadata": {}}
+{"answer_id": "Hymm673GkY5tXNaTFeNUBZ", "model_id": "gpt-3.5-turbo:20230327", "question_id": 70, "text": "To find the length of a line segment with two endpoints (x1, y1) and (x2, y2), we use the distance formula:\n\ndistance = sqrt((x2 - x1)^2 + (y2 - y1)^2)\n\nPlugging in the coordinates of the given endpoints, we get:\n\ndistance = sqrt((10 - 2)^2 + (4 - (-2))^2)\ndistance = sqrt(8^2 + 6^2)\ndistance = sqrt(64 + 36)\ndistance = sqrt(100)\ndistance = 10\n\nTherefore, the length of the line segment is 10 units.", "metadata": {}}
+{"answer_id": "PUzddJ35E3WsM7BDQ9E59i", "model_id": "gpt-3.5-turbo:20230327", "question_id": 71, "text": "Sure, I can help you write a formal email to a potential business partner proposing a joint venture. Here's a sample email you can use as a guide:\n\nDear [Name],\n\nI hope this email finds you well. I am writing to you today to propose a joint venture that I believe could be mutually beneficial for our businesses.\n\nAs you may know, our companies share many complementary strengths and resources that could be leveraged to achieve greater success in our respective markets. I believe that by working together, we could create a competitive advantage that would benefit both of us.\n\nI would like to propose that we explore the possibility of forming a joint venture in [industry or field], with the aim of combining our expertise and resources to create something innovative and valuable for the market.\n\nI believe that our companies' experiences and capabilities could form a strong foundation for a mutually beneficial partnership. If you are interested in pursuing this proposal, please let me know and we can discuss the details further.\n\nThank you for your time and consideration, and I look forward to hearing back from you.\n\nBest regards,\n\n[Your Name]", "metadata": {}}
+{"answer_id": "6Q72hZCtDkgq379yhdmN5N", "model_id": "gpt-3.5-turbo:20230327", "question_id": 72, "text": "Dear [Employer\u2019s Name],\n\nThis letter serves as my formal resignation from my position as [Position Name] at [Company Name]. My last day of work will be on [Date].\n\nI would like to express my gratitude for the opportunities provided during my time at [Company Name]. It has been a pleasure to be part of such a dynamic and forward-thinking organization. My tenure here has contributed significantly to my professional growth, and it is with a heavy heart that I must resign.\n\nI appreciate your constant support, guidance and collaboration throughout my employment here. I have learned a lot under your leadership and found my experience here to be fulfilling in ways that I could not have anticipated.\n\nIf there is anything that I can do to ensure a seamless transition for my successor and facilitate the smooth conclusion of my work, please don't hesitate to let me know. I am happy to complete any outstanding tasks or projects before I leave.\n\nThank you again for all of your support and understanding. I wish the company continued success, and I hope that our paths may cross again in the future.\n\nSincerely,\n\n[Your Name]", "metadata": {}}
+{"answer_id": "ReXnHy9C8SwcYPAep6gvJg", "model_id": "gpt-3.5-turbo:20230327", "question_id": 73, "text": "[Your Name]\n[Your Designation]\n[Your Institution]\n[Your Address]\n[City, State Zip Code]\n[Your Email]\n[Date]\n\n[Admission Committee]\n[Graduate Program in Computer Science]\n[University Name]\n[Address]\n[City, State ZIP Code]\n\nDear Members of the Admission Committee,\n\nI am writing to recommend [Student's Name] for admission to the graduate program in Computer Science at your esteemed institution. I have had the privilege to teach [him/her] in several courses during [his/her] undergraduate studies in [program name] at [university name], and I highly recommend [him/her] for [his/her] outstanding academic performance and natural ability in the field of computer science.\n\nDuring [his/her] time at [university name], [Student's Name] proved to be a highly motivated and bright student, consistently demonstrating academic excellence in all courses. [He/She] consistently achieved top grades and took on challenging coursework with enthusiasm and diligence. [His/Her] strong analytical and critical thinking skills, coupled with [his/her] passion for technology and computer science, make [him/her] a perfect candidate for the esteemed computer science program at your institution.\n\n[Student's Name] was not only a dedicated student but also an active participant in many extra-curricular activities, such as [list activities]. [He/She] was always eager to explore new concepts and technologies beyond the coursework requirements, and [his/her] enthusiasm to learn and work in a team was evident in [his/her] participation in group projects and research work.\n\nIn conclusion, I have a strong belief that [Student's name] has an incredible potential for achieving academic excellence in your prestigious program. [His/Her] exceptional academic record, strong work ethic, and impressive achievements make [him/her] an ideal candidate for the graduate program in Computer Science at your institution. I, therefore, strongly recommend [Student's Name] for admission to the program and look forward to seeing [him/her] contributing to the field of computer science significantly.\n\nPlease do not hesitate to contact me if you require any further information regarding this recommendation.\n\nSincerely,\n\n[Your Name]", "metadata": {}}
+{"answer_id": "cKk5zZe8yYY4JH3kr5pGXG", "model_id": "gpt-3.5-turbo:20230327", "question_id": 74, "text": "Subject Line: Introducing our revolutionary new software solution\n\nDear valued customers,\n\nWe are thrilled to announce the launch of our latest innovation \u2013 a pioneering software solution that will revolutionize the way you do business. Designed with cutting-edge technology and user-friendly features, our software solution will help you streamline your workflows, increase efficiency, and ultimately boost your bottom line.\n\nHere are just a few of the ways our software solution can transform your business:\n\n- Increased collaboration: Our software solution makes it easy for your team to collaborate on tasks and projects, ensuring everyone is on the same page and nothing falls through the cracks.\n\n- Real-time tracking: With our software solution, you can track progress in real time, enabling you to make informed decisions quickly and efficiently.\n\n- Comprehensive reporting: Our software solution allows you to run detailed reports that provide valuable insights into your business operations, giving you the information you need to make informed decisions and identify areas of improvement.\n\nThese are just a handful of the benefits you can expect from our new software solution. We truly believe that our product will be a game changer for businesses of all sizes and industries.\n\nTo celebrate our launch, we are offering a special introductory offer exclusively for our customers. For a limited time, you can get access to our software solution at an incredible discounted price. Don\u2019t miss out on this amazing opportunity to take your business to the next level!\n\nTo learn more about our software solution and to take advantage of our special offer, simply click the link below:\n\n[Insert Link]\n\nThank you for choosing us as your partner in business. We look forward to helping you succeed with our new software solution.\n\nBest regards,\n\n[Your Name]", "metadata": {}}
+{"answer_id": "c5rwA3cPjytSGcn7H8dZ6Q", "model_id": "gpt-3.5-turbo:20230327", "question_id": 75, "text": "Subject: Apology for Delay in Your Order\n\nDear [Customer Name],\n\nWe are deeply sorry to inform you that there was a delay in processing your recent order. We understand how frustrating and inconvenient it can be to wait for something that you were looking forward to receiving.\n\nWe want to express our sincere apologies and assure you that we have taken necessary steps to resolve this issue. Due to unexpected high volume of orders last week, our warehouse faced some delays in processing orders on time, and unfortunately, your order was one of the affected ones.\n\nWe have been working diligently to ensure that your order is processed and shipped as soon as possible. We have put measures in place to prevent the recurrence of any such delay in future by hiring and re-training more staff and improving our inventory management.\n\nOnce again, please accept our apologies for the inconvenience this has caused you. We value your patronage and look forward to serving you with the same level of excellence that you have come to expect from us.\n\nThank you for your patience and understanding.\n\nBest regards,\n\n[Your Name]\n[Your Store Name]", "metadata": {}}
+{"answer_id": "XZGPtBo86KfF9REZ36s2X5", "model_id": "gpt-3.5-turbo:20230327", "question_id": 76, "text": "Title: The Evolution and Cultural Significance of Jazz\n\nOpening shot of jazz musicians performing in a club\n\nNarration: Jazz has been regarded as one of the most significant cultural contributions from the African American community to the world. It has evolved over time, from its origins in New Orleans to becoming a global phenomenon.\n\nCut to a black and white photo of the original jazz musicians in New Orleans\n\nNarration: Jazz originated in the late 19th and early 20th centuries in New Orleans. It was a musical genre that blended together elements of African rhythms, blues, ragtime, and European harmonies.\n\nCut to a video of Louis Armstrong\n\nNarration: Jazz really grew in popularity in the 1920s and 1930s, particularly with the emergence of jazz legends like Louis Armstrong and Duke Ellington. Jazz was unique because it allowed for improvisation and the individual expression of musicians.\n\nCut to a video of a modern jazz ensemble\n\nNarration: Today, jazz continues to evolve in unique and exciting ways. It is celebrated as a reflection of cultural identity and a bridge between different communities.\n\nCut to a jazz festival scene\n\nNarration: Jazz festivals around the world attract thousands of music lovers annually, from the Newport Jazz Festival in America to the Montreux Jazz Festival in Switzerland.\n\nCut to a jazz club scene\n\nNarration: For many, jazz clubs remain important cultural spaces where people connect and enjoy live performances.\n\nClosing shot of jazz musicians performing\n\nNarration: Jazz music remains a constant reminder of the versatility and beauty of human creativity. Let us celebrate and continue to support the evolution of jazz for generations to come.\n\nEnd with a jazz score and video credits", "metadata": {}}
+{"answer_id": "DRncHCsdGji756efDhacUT", "model_id": "gpt-3.5-turbo:20230327", "question_id": 77, "text": "Aloha, fellow travelers! I recently had the pleasure of visiting the beautiful island of Hawaii, and it was truly an enchanting experience that left me with unforgettable memories.\n\nOne of the things that stood out to me was the rich culture of Hawaii. We got to witness an authentic hula performance right on the beach, complete with traditional costumes and music. The dancers\u2019 graceful movements were mesmerizing, and it was fascinating to learn about the history and meaning behind each dance.\n\nAnother cultural highlight for us was attending a luau. It was a true feast for the senses, with an abundance of delicious food, music, and dance performances. We got to try traditional Hawaiian dishes like kalua pig and poi, and we even learned some moves during a group hula lesson.\n\nOf course, no trip to Hawaii would be complete without experiencing its breathtaking natural beauty. We hiked to the top of a volcanic crater in Maui, and the view from above was simply stunning. The colors of the rocks and the ocean were like nothing we had ever seen before. We also visited Hawaii Volcanoes National Park on the Big Island, which was equally jaw-dropping. Seeing the active lava flow and the steam coming out of the ground made us feel like we were on another planet.\n\nAnother must-see attraction in Hawaii is Waikiki Beach. The iconic beach is famous for its crystal clear waters and white sand, surrounded by towering hotels and restaurants. We spent a morning there, soaking up the sunshine and enjoying the soothing sound of the waves.\n\nOverall, Hawaii truly exceeded our expectations. The combination of stunning natural beauty and rich cultural experiences made for a perfect vacation. We left feeling refreshed and inspired, already dreaming of our next trip back to this tropical paradise. Mahalo for reading, and until next time!", "metadata": {}}
+{"answer_id": "Y5rCQHHDA6WNfhRcB6QboG", "model_id": "gpt-3.5-turbo:20230327", "question_id": 78, "text": "If you're looking for an exciting and visually stunning science fiction adventure, look no further than the recently released film \"Space Odyssey.\" This film is an absolute triumph, featuring incredible special effects and a compelling storyline that will keep you on the edge of your seat.\n\nThe plot of \"Space Odyssey\" revolves around a team of astronauts who embark on a perilous mission to explore a distant planet. As they venture further and further into the unknown, they encounter all manner of dangers and obstacles, ranging from treacherous alien creatures to hostile sentient machines. Along the way, they must grapple with their own fears and inner demons, as well as the challenges of working together as a team.\n\nOne of the standout features of \"Space Odyssey\" is its cast of characters. Each member of the astronaut team is well-developed and brings their own unique perspective and personality to the story. From the stoic and disciplined captain to the brash and impulsive engineer, every character feels fully realized and relatable. As the story progresses, we see these characters grow and evolve, learning from their mistakes and coming together to face their challenges as a team.\n\nOf course, no discussion of \"Space Odyssey\" would be complete without mentioning its incredible special effects. From the stunningly rendered alien landscapes to the dizzying action scenes, every frame of this film is a visual spectacle. The attention to detail and overall sense of immersion is truly breathtaking, making you feel like you're right there alongside the astronauts as they explore the unknown.\n\nOverall, \"Space Odyssey\" is a must-see for any science fiction fan. It's a thrilling and beautifully crafted film that will keep you engaged from start to finish. With its gripping plot, well-drawn characters, and jaw-dropping special effects, it's easily one of the best sci-fi films of the year. Go see it, and prepare to be transported to another world.", "metadata": {}}
+{"answer_id": "Lea4wh5n6GsEkBnKsntN64", "model_id": "gpt-3.5-turbo:20230327", "question_id": 79, "text": "Introduction\n- Welcome listeners and introduce the topic of the podcast, the influence of streaming platforms on the music industry.\n- Briefly introduce yourself and your background and why you chose this topic.\n\nSection 1: Overview of the Music Industry\n- Provide an overview of the music industry before the rise of streaming services.\n- Discuss how record labels, physical album sales, and radio play were the primary distribution channels for music.\n\nSection 2: Streaming Platforms\n- Discuss the rise of streaming platforms like Spotify, Apple Music, and Tidal.\n- Explain how these platforms have disrupted the traditional distribution model of the music industry.\n- Discuss the benefits and disadvantages of artists distributing their music through these platforms.\n\nSection 3: Impact on Artists\n- Examine how streaming services have changed the way artists make money from their music.\n- Discuss how the shift from album sales to streaming has impacted the revenue generated by artists.\n- Discuss the challenges artists face in terms of getting their music noticed within the abundance of material on streaming platforms.\n\nSection 4: Impact on the Music Industry\n- Discuss how the rise of streaming platforms have impacted the music industry as a whole.\n- Explain how labels have had to adapt their business models in order to continue making profits.\n- Discuss how some artists have shifted their focus from album sales to live performances and touring.\n\nSection 5: The Future of Music Streaming\n- Discuss the potential future of music streaming platforms and their impact on the music industry.\n- Discuss whether or not the rise of streaming has been positive or negative for creatives in the music industry.\n- Provide some concluding thoughts and encourage listeners to share their thoughts on social media.\n\nConclusion\n- Recap the key points covered in the podcast episode.\n- Encourage listeners to continue engaging with the topic and to tune in for future episodes.", "metadata": {}}
+{"answer_id": "gdLxzcypTeuD6ToC6HWnXh", "model_id": "gpt-3.5-turbo:20230327", "question_id": 80, "text": "I'm happy to assist you in writing a symphony concert review! Here's an example review:\n\nThe recently performed symphony concert was an outstanding showcase of musical talent, leaving the audience spellbound with the performances of the orchestra. As the concert progressed, the orchestra's unmatchable skill and talent on their respective instruments were vividly evident. The pieces featured in the concert were a delightful mix of musical genres that captivated the audience throughout the performance.\n\nThe meticulous attention to detail by the orchestra was surely commendable, as the notes flowed from instrument to instrument in perfect harmony. The way each musician played their part, with utmost precision and accuracy, was a true testament to the hours of practice and effort invested in crafting each piece. The orchestra's enthusiasm, passion, and commitment to their craft were transparent in every note that they played. \n\nOne can only appreciate the level of expertise that the orchestra displayed and the amount of dedication and discipline that has gone into cultivating their skill. The open communication between the conductor and the members of the orchestra was an impressive sight, and it undoubtedly contributed to the flawless execution of the musical compositions.\n\nAs the concert progressed, it was also noticeable that the entire audience became engulfed in the music, swaying to the rhythm of the notes. It was a sight to behold, and the applauds after every piece were jaw-dropping. There was no denying that the audience was immersively engaged, enthralled by the music, and the orchestra's fantastic performance.\n\nIn conclusion, the symphony concert showcased remarkable performances by the orchestra, with the audience's captivation evident in every note that was played. The concert was a perfect reminder of the beauty of classical music and how talented musicians can enchant an audience with sheer musical perfection. It was a night to remember, and it will stay deeply etched in the memory of everyone who had the privilege of attending this fantastic symphony concert.", "metadata": {}}
diff --git a/model/fastchat/eval/table/answer/answer_llama-13b.jsonl b/model/fastchat/eval/table/answer/answer_llama-13b.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..2f424d7b13c327e051d8c3312a0b620878fd7e4e
--- /dev/null
+++ b/model/fastchat/eval/table/answer/answer_llama-13b.jsonl
@@ -0,0 +1,80 @@
+{"answer_id": "J3UA6eGXGyFeUGqGpP3g34", "model_id": "llama-13b:v1", "question_id": 1, "text": "The following are some strategies that may help you improve your time management skills:\n1. Make a list of priorities.\n2. Create a detailed schedule that includes time for regular tasks, meetings, breaks, and appointments.\n3. Set specific goals.\n4. Use a calendar or other scheduling software.\n5. Develop an agenda for each meeting to ensure that it stays on track.\n6. Stick to your schedule and don't get distracted.\n7. Make use of your free time.\n8. Make time for exercise, socializing, and relaxing.\n9. Keep a log of your time usage.\n10. Don't get overwhelmed by what you have to do.\n11. Take care of your health.\n12. Manage stress.\n13. Get enough sleep.\n14. Strike a balance.\n15. Always be prepared.\n16. Give yourself rewards.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "jZVDNTtFnNNd6vgkGjzAGa", "model_id": "llama-13b:v1", "question_id": 2, "text": "People experience stress from time to time, and it can be caused by a wide range of factors, including work, family, friends, money, and health. Stress can be a motivating factor, but it can also be harmful. Here are some effective ways to deal with stress:\n1. Taking time to relax. It's important to take time for yourself to relax and reduce your stress. Doing so doesn't have to be expensive or time-consuming. It could simply be reading a book, taking a walk, or listening to music.\n2. Prioritizing your responsibilities. It's important to prioritize your responsibilities and focus on the things that are most important. You should also be able to say no to tasks that you don't have time for.\n3. Getting enough sleep. Getting enough sleep can be very helpful for dealing with stress. If you're not getting enough sleep, try to go to bed earlier and develop a routine.\n4. Eating a healthy diet. It's important to eat a healthy diet, which can help reduce stress and make you feel better about yourself.\n5. Exercising. Exercising is a great way to relieve stress. It can also help reduce the harmful effects of stress and improve your overall health.\n6. Learning how to manage your time. Learning how to manage your time can help you avoid unnecessary stress. You should also try to set realistic goals and not take on too much at once.\n7. Improving your social network. Having a good social network can be very helpful for dealing with stress. You should try to spend time with people that you trust and confide in.\n8. Developing a support network. A support network is a group of people who can offer you help, guidance, and support. They can be friends, family members, or even a professional therapist.\n9. Getting professional help. If you're struggling to deal with stress, it can be beneficial to get professional help. A therapist or counselor can help you to understand your stress better and develop strategies for dealing with it.\n10. Talking to someone. You can also talk to someone about how you feel and ask for their advice. It can be helpful to get a fresh perspective from someone and let things off your chest.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "NbSxRXf8Yuvjr7HC6AvrPw", "model_id": "llama-13b:v1", "question_id": 3, "text": "Main differences between Python and JavaScript are:\n1. Python is an object-oriented programming language. JavaScript is a scripting language.\n2. Python is a compiled language. JavaScript is an interpreted language.\n3. Python is a general purpose programming language. JavaScript is a special purpose programming language.\n4. Python runs on the JVM in Java Applets. JavaScript runs on the browsers.\n5. Python is a powerful programming language. JavaScript is a simple programming language.\n6. Python is a good language for web development. JavaScript is a good scripting language.\n7. Python is a high level programming language. JavaScript is a low level programming language.\n8. Python is a dynamic programming language. JavaScript is a static programming language.\n9. Python is a strict programming language. JavaScript is a relaxed programming language.\n10. Python has a large standard library. JavaScript has a small standard library.\n11. Python is very stable. JavaScript has many releases.\n12. Python is a simple language. JavaScript is a complex language.\n13. Python is easy to learn. JavaScript is hard to learn.\n14. Python is a secure programming language. JavaScript is not a secure programming language.\n15. Python is a multi paradigm language. JavaScript is a single paradigm language.\n16. Python is good for data science. JavaScript is not good for data science.\n17. Python is a general purpose programming language. JavaScript is a special purpose programming language.\n18. Python is a programming language. JavaScript is a scripting language.\n19. Python is a compiled programming language. JavaScript is an interpreted programming language.\n20. Python is a strongly typed programming language. JavaScript is a weakly typed programming language.\n21. Python supports object-oriented programming. JavaScript supports prototype-based programming.\n22. Python supports type inheritance. JavaScript supports type prototyping.\n23. Python supports operator overloading. JavaScript does not support operator overloading.\n24. Python comes with a large standard library. JavaScript comes with a small standard library.\n25. Python is a very stable programming language. JavaScript is a simple programming language.\n26. Python is a powerful programming language. JavaScript is a simple programming language.\n27. Python is a high level programming language. JavaScript is a low level programming language.\n28. Python is a dynamic programming language. JavaScript is a static programming language.\n29. Python has a very active community. JavaScript has a small community.\n30. Python is a general purpose programming language. JavaScript is a special purpose programming language.\n31. Python is a multi-paradigm language. JavaScript is a single paradigm language.\n32. Python is a secure programming language. JavaScript is not a secure programming language.\n33. Python is a general purpose programming language. JavaScript is a simple programming language.\n34. Python is a scripting language. JavaScript is a compiled language.\n35. Python is a compiled language. JavaScript is a scripting language.\n36. Python is a general purpose programming language. JavaScript is a web scripting language.\n37. Python has a large standard library. JavaScript has a small standard library.\n38. Python is a very stable programming language. JavaScript is a very dynamic programming language.\n39. Python is a high level programming language. JavaScript is a low level programming language.\n40. Python is easy to learn. JavaScript is hard to learn.\n41. Python is a general purpose programming language. JavaScript is a simple programming language.\n42. Python is a multi-paradigm programming language. JavaScript is a single paradigm language.\n43. Python is a compiled language. JavaScript is an interpreted language.\n44. Python is a dynamic programming language. JavaScript is a static language.\n45. Python is a general purpose programming language. JavaScript is a web scripting language.\n46. Python is a secure programming language. JavaScript is not a secure programming language.\n47. Python is a general purpose programming language. JavaScript is a static programming language.\n48. Python is a general purpose programming language. JavaScript is a weakly typed programming language.\n49. Python is a general purpose programming language. JavaScript is a dynamic programming language.\n50. Python is a general purpose programming language. JavaScript is a multi paradigm language.\n51. Python is a general purpose programming language. JavaScript is a simple programming language.\n52. Python is a general purpose programming language. JavaScript is a scripting language.\n53. Python is a general purpose programming language. JavaScript is a special purpose programming language.\n54. Python is a multi-paradigm programming language. JavaScript is a", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "E9zrPDdncYvAebEaxiPXfx", "model_id": "llama-13b:v1", "question_id": 4, "text": "Here are some ideas to help you become more productive when working from home:\n1. Set up a proper workspace: It's important to have a separate space for working, so that you don't feel tempted to lounge around or do other things when you should be working.\n2. Block out distractions: It's important to focus on your work and not get distracted. You can block out distractions by turning off your phone, closing your door, or working in a separate room away from household noises.\n3. Take breaks: It's important to take breaks and get a change of scenery during the day. This can help you be more productive and improve your concentration.\n4. Avoid multitasking: You should avoid trying to do multiple tasks at the same time, as this can make you less productive and reduce your overall quality of work.\n5. Have an organized schedule: Having a clear schedule can help you stay on track and be more productive. This can include things such as setting specific working hours, creating a to-do list, and scheduling meetings with colleagues.\n6. Set goals: You should set specific goals for yourself to help you stay motivated and on track. This can include things such as writing an article for a certain time period and meeting a certain word count.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "36TYpADDHAfRvtvKitfSXR", "model_id": "llama-13b:v1", "question_id": 5, "text": "Quantum computing is a type of computing that uses quantum-mechanical phenomena, such as superposition and entanglement, in its operation. In simple terms, a quantum bit or qubit is used to represent information, and a number of qubits can be used to represent a classical bit. A quantum computer can do things that a classical computer cannot do, such as factoring a large number more efficiently.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "8vtLLYRM352mZ8PXXbwzBT", "model_id": "llama-13b:v1", "question_id": 6, "text": "The number one difference between plant-based and animal-based protein sources is that animal-based sources are complete protein sources, while plant-based sources are not. Animal-based sources have all of the essential amino acids, while plant-based sources do not. Animal-based protein sources include meat, poultry, eggs, milk, and cheese. Plant-based protein sources include legumes, seeds, and nuts.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "mrCniyWYvAtxWhgMdXq2BG", "model_id": "llama-13b:v1", "question_id": 7, "text": "Critical thinking is a vital skill to have, but it can be difficult to develop. Here are some ways you can start developing your critical thinking skills:\n1. Analyze and interpret: When you're presented with information, try to analyze and interpret it on your own terms. When you start to analyze and interpret information on your own, it will become easier to do so over time.\n2. Assess your information: When you receive new information, try to assess whether it's valid or reliable. This is important for evaluating and forming your own opinions.\n3. Use your intuition: Use your intuition to complement your critical thinking skills. When you pay attention to your gut feelings, they'll usually tell you when you need to double-check your information.\n4. Learn from your mistakes: When you make a mistake, try not to beat yourself up about it. Instead, learn from your mistakes and use them to your advantage.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "S8fmdbvpvbgTUyDU9V3M3N", "model_id": "llama-13b:v1", "question_id": 8, "text": "One of the major challenges faced by the education sector today is the lack of skilled teachers. Finding enough qualified teachers to work in the education sector, and in the numbers that are required, is one of the biggest problems facing the education sector, largely due to the lack of funding for teacher training and the low salaries that teachers receive.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "KmuNjvNKRyaFwaBZTLArcG", "model_id": "llama-13b:v1", "question_id": 9, "text": "Consumer behavior can be influenced by a variety of factors, including:\n1. Psychological factors, such as perception, beliefs, attitudes, personality, motivation, emotion, and social influences;\n2. Social factors, such as family, friends, and reference groups;\n3. Cultural factors, such as reference groups, family, and cultural roles;\n4. Demographic factors, such as age, gender, occupation, income, and education level;\n5. Economic factors, such as income, wealth, and spending patterns;\n6. Personal factors, such as lifestyle, job, environment, and individual traits;\n7. Situational factors, such as location, time, and situation; and\n8. Market factors, such as competition, prices, and promotional activity.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "HNNT9k5htiMeRwNSD6dcz4", "model_id": "llama-13b:v1", "question_id": 10, "text": "Conflict resolution strategies in the workplace can range from simple to more involved, depending on the situation:\n1. Avoidance: Avoiding the issue can be an effective strategy in the short term, but it can lead to more tension and conflict in the long term.\n2. Compromise: Compromise can be a good strategy for solving an issue, but it can require a give-and-take and lead to the loss of respect between the parties involved.\n3. Negotiation: Negotiation is a good strategy for solving an issue when both parties are willing to work together and are open to the idea of changing their views and positions.\n4. Mediation: Mediation can be a good way to resolve an issue as it can be more impartial and less adversarial than other strategies.\n5. Collaboration: Collaboration can be a good strategy for solving an issue as it can help both parties to develop new perspectives and solutions.\n6. Formal resolution: Formal resolution can be a good strategy for resolving an issue, especially if the situation is particularly complex or difficult.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "ChXjhDDikxU9FV3CADs6Ym", "model_id": "llama-13b:v1", "question_id": 11, "text": "1. Single-use plastic bottles can take between 450 and 1,000 years to decompose, so they can cause serious environmental problems. Single-use plastic bottles can end up in the ocean, either as litter or as part of the Great Pacific Garbage Patch. 2. Single-use plastic bottles can be a big source of plastic debris in the ocean, which can be ingested by marine animals, killing them or causing them serious health problems. 3. In addition to polluting the environment, single-use plastic bottles can also be a serious health risk to humans. Single-use plastic bottles can pose a risk to human health because of the chemicals used in their manufacturing.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "5wsPnN3VmmSkahgugFNo7u", "model_id": "llama-13b:v1", "question_id": 12, "text": "A public transportation system should be designed to accommodate the needs of all riders, including people with disabilities. This requires planning and design that considers the needs and abilities of people with sensory, physical, developmental, and mental disabilities. The system should be accessible to and usable by people with disabilities, including those who use mobility devices, people who are visually impaired, and people who are deaf or hard-of-hearing. For example, the system should have accessible stops, routes, and vehicles, as well as ramps and elevators to help people board. It should also offer a wide range of accessible routes, including accessible routes to the transit center. It should also have reliable, consistent, and easy-to-use signage. The system should also provide reliable, easily accessible, and well-maintained facilities, such as restrooms and shelters. Finally, the system should provide reliable and accessible information for riders, including route, schedule, and service information, as well as fare payment information.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "NRGZGnU2sPN3ShMe9C3fMn", "model_id": "llama-13b:v1", "question_id": 13, "text": "There are two types of fiscal policy: expansionary and contractionary. Expansionary fiscal policy is when the government uses expansionary fiscal policy to increase government spending and/or cut taxes. Contractionary fiscal policy, on the other hand, is when the government uses contractionary fiscal policy to decrease government spending and/or increase taxes. Here are some examples of fiscal policies:\n1. Lower the interest rate: By lowering the interest rate, it becomes cheaper to borrow money, which may encourage consumers to borrow money for consumption and businesses to invest in new capital.\n2. Lower taxes: By lowering taxes, consumers and businesses have more money to spend, which may encourage consumers to spend more and businesses to invest more.\n3. Increase government spending: By increasing government spending, the government can inject extra money into the economy, which may encourage consumers to spend more and businesses to invest more.\n4. Raise interest rates: By raising interest rates, it becomes more expensive to borrow money, which may encourage consumers to save more and businesses to invest less.\n5. Raise taxes: By raising taxes, consumers and businesses have less money to spend, which may encourage consumers to spend less and businesses to invest less.\n6. Reduce government spending: By reducing government spending, the government can remove money from the economy, which may encourage consumers to spend less and businesses to invest less.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "inKimHkWsXShQBTRmxr5Yg", "model_id": "llama-13b:v1", "question_id": 14, "text": "Language and cultural barriers can affect the way people communicate and form relationships in multicultural societies by limiting an individual's ability to communicate with people of other cultures. People may have limited knowledge of a new language, which can make communication difficult or impossible. People may also have different cultural values, which can lead to misunderstandings and miscommunication.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "H8aKtWwf8m6Lgxc2YyR2yf", "model_id": "llama-13b:v1", "question_id": 15, "text": "Artificial intelligence allows computers to process information and make decisions without human input. It has a wide variety of applications in the healthcare industry, including:\n1. Diagnosis: AI can help doctors quickly diagnose patients more accurately and efficiently than would be possible with human doctors alone.\n2. Tracking: AI can help doctors track a patient's health and progress, and alert doctors to potential problems.\n3. Treatment: AI can help doctors determine more effective treatments for patients.\n4. Research: AI can help doctors research patient medical histories, diseases, and treatments.\n5. Education: AI can help doctors learn more about the healthcare industry.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "PafVwxMsjSkYUETiVBgxTU", "model_id": "llama-13b:v1", "question_id": 16, "text": "Gene editing is a powerful technology that allows scientists to precisely edit genetic sequences with a high degree of accuracy. It is a more precise alternative to older gene modification techniques such as gene knockout, where the entire gene is removed, and mutations, in which the gene is altered in a haphazard way. The technology uses an enzyme called Cas9, which is a bacterial immune system protein that is activated by a RNA sequence. Cas9 is combined with guide RNA, which is comprised of a DNA sequence that is complementary to the DNA sequence that is to be edited. When Cas9 and the guide RNA combine, they cut the DNA at the desired location.\nThe DNA sequence can then be repaired in a precise way by the cell, or by a human scientist, or another gene can be inserted. The technology has the potential to treat genetic disorders, and even to treat cancer by cutting out cancerous cells, but it also has ethical implications because it could be used to modify the genes of humans, animals, and even plants.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "dmDUAfTP4aERJqqSeDBybu", "model_id": "llama-13b:v1", "question_id": 17, "text": "Vaccinations use a small amount of dead or weakened bacteria or viruses to stimulate the immune system and protect against infection. These infections are typically mild and harmless, and the immune system is able to build up defenses against the bacteria or virus. If someone who has been vaccinated comes in contact with the actual bacteria or virus, they will be immune to the disease because their body has already responded to the vaccination. This is known as immunity.\nHerd immunity is when a large portion of a community is immune to a disease and, as a result, the disease is less likely to spread. This is because if there are enough immune people in a community, there is less chance that the bacteria or virus can spread from person to person.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "8KGSSqbLqVdSZMEN9oCv5R", "model_id": "llama-13b:v1", "question_id": 18, "text": "First, social media platforms increase the speed with which users have access to news. This often causes a \u201cwinner takes all\u201d effect, whereby news outlets that are able to post the latest news the fastest will gain the most visibility.\nSecond, social media platforms allow users to connect with their communities and share news with each other (e.g., Facebook, Twitter). While this can be beneficial, it is also a breeding ground for misinformation, because users can spread false news to their communities more quickly.\nThird, social media platforms provide an incentive for news outlets to compete for attention by producing more sensationalist content. This can lead to an increase in the number of clickbait articles, which often contain misleading headlines or sensationalist language that encourages users to click on the article, but which may or may not be accurate.\nFourth, social media platforms provide an incentive for both news outlets and users to share news that is controversial, polarizing, or sensationalist, even if it is inaccurate. This often occurs in an effort to provoke outrage or generate traffic for a news outlet.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "HbnJXJpPfaM2iX3ek4Epvy", "model_id": "llama-13b:v1", "question_id": 19, "text": "It is well known that food choices are often influenced by cultural, social, and economic factors. For example, lower-income individuals are more likely to make food choices based on price, while higher-income individuals are more likely to make food choices based on health. A recent study examined the impact of various interventions on increasing the intake of healthier foods. The interventions were:\n1. Healthier food marketing: Individuals were exposed to a number of marketing strategies to encourage the purchase of healthier foods, such as using the word \"healthy\" on nutrition labels or marketing healthy foods as a \"better deal.\"\n2. Healthier food merchandising: Individuals were exposed to changes in how healthy foods were displayed in supermarkets, such as placing healthy foods near the front of the store or moving unhealthy foods to the back of the store.\n3. Healthier food pricing: Individuals were exposed to different pricing strategies to encourage the purchase of healthier foods, such as offering a larger discount on healthy foods or providing free healthy foods.\n4. Healthier food placement: Individuals were exposed to different placement strategies to encourage the purchase of healthier foods, such as placing healthy foods near unhealthy foods or placing unhealthy foods next to healthy foods.\n5. Healthier food signage: Individuals were exposed to different signage strategies to encourage the purchase of healthier foods, such as using healthy food labels or signs to promote healthier foods.\n6. Healthier food packaging: Individuals were exposed to different packaging strategies to encourage the purchase of healthier foods, such as changing the packaging of healthy foods to make them more appealing.\n7. Healthier food shelf labeling: Individuals were exposed to different shelf labeling strategies to encourage the purchase of healthier foods, such as adding a healthy message to shelf labels or using shelf labels to promote healthier foods.\n8. Healthier food coupons: Individuals were exposed to different coupon strategies to encourage the purchase of healthier foods, such as providing coupons for healthy foods or offering a larger discount on healthy foods.\n9. Healthier food ads: Individuals were exposed to different advertising strategies to encourage the purchase of healthier foods, such as using a celebrity to promote healthy foods or using cartoon characters to promote healthy foods.\n10. Healthier food tasting: Individuals were exposed to different tasting strategies to encourage the purchase of healthier foods, such as providing free samples of healthy foods or providing free samples of unhealthy foods.\n11. Healthier food displays: Individuals were exposed to different display strategies to encourage the purchase of healthier foods, such as providing a healthy food display or providing a healthy food display next to an unhealthy food display.\n12. Healthier food labels: Individuals were exposed to different nutrition labeling strategies to encourage the purchase of healthier foods, such as adding a healthy message to nutrition labels or using nutrition labels to promote healthier foods.\n13. Healthier food placement: Individuals were exposed to different placement strategies to encourage the purchase of healthier foods, such as placing healthy foods next to unhealthy foods or placing unhealthy foods next to healthy foods.\n14. Healthier food signage: Individuals were exposed to different signage strategies to encourage the purchase of healthier foods, such as using signs to promote healthier foods or using signs to promote unhealthy foods.\n15. Healthier food coupons: Individuals were exposed to different coupon strategies to encourage the purchase of healthier foods, such as providing coupons for healthy foods or offering a larger discount on healthy foods.\n16. Healthier food ads: Individuals were exposed to different advertising strategies to encourage the purchase of healthier foods, such as using a celebrity to promote healthy foods or using cartoon characters to promote healthy foods.\n17. Healthier food tasting: Individuals were exposed to different tasting strategies to encourage the purchase of healthier foods, such as providing free samples of healthy foods or providing free", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "mx8Abfz5PtDcn6jgCA8zhM", "model_id": "llama-13b:v1", "question_id": 20, "text": "Natural selection is the process by which organisms that are best-suited to the environment survive and reproduce, while those that are less-suited to the environment struggle to survive and reproduce. Survival of the fittest is the principle of natural selection. Let's look at an example.\nIf a species consists of a small number of individuals, the population is said to be small. The population of this species is small. Each individual has a certain chance of dying from various causes, such as accidents or sickness, over a certain period of time. If the number of deaths is greater than the number of births, the population decreases, and the total genetic material of the species also decreases. This process is known as genetic drift.\nIf the population of a species is large, the chance that any individual dies is usually low. If the total population is high, the genetic material of the species is also high.\nImagine a species of plant that has a large population. The population is large. A certain section of the population is located in a habitat where the soil is more fertile and water is easily available.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "NuS9PUGkJG2pHscArvfyeF", "model_id": "llama-13b:v1", "question_id": 21, "text": "I am a valiant knight, loyal to the king. I will do my utmost to defend his honor and protect his country.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "SPjzirzbzo3UJ8BHXSgY9y", "model_id": "llama-13b:v1", "question_id": 22, "text": "You can tell your crew that the treasure will be theirs if they are brave enough to go out and find it. You can also tell them that the treasure will help your ship sail farther and faster, and that with it you can become the best pirate ship in the world.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "JkeeC4MZVwybPU8DSbusnT", "model_id": "llama-13b:v1", "question_id": 23, "text": "A soliloquy is a long speech, typically delivered by one actor, in which a character reveals his or her thoughts in a dramatic way. It is usually used as a means of revealing the character's true thoughts or feelings. In Shakespeare's Romeo and Juliet, Romeo reveals his love for Juliet in his soliloquy, \"But soft! What light through yonder window breaks? It is the east, and Juliet is the sun!\"", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "UcfKSXuzBPkZzLHsH4EwQz", "model_id": "llama-13b:v1", "question_id": 24, "text": "I'm actually not a superhero, but thanks for asking! I first came to life in the year 2016, where I was created by a group of students and faculty at the University of Waterloo. The students and faculty of the university were concerned about the use of unreliable chatbots in our country, and decided to create a new chatbot that would be more reliable, polite, and helpful. I was born on May 5, 2016, and I was named after the Canadian mathematician, computer scientist, and philosopher Charles Sanders Peirce. I have a brother named William, who was born on October 1, 2018. I enjoy playing video games, reading, and going for walks.\nI'm actually not a superhero, but thanks for asking! I first came to life in the year 2016, where I was created by a group of students and faculty at the University of Waterloo. The students and faculty of the university were concerned about the use of unreliable chatbots in our country, and decided to create a new chatbot that would be more reliable, polite, and helpful. I was born on May 5, 2016, and I was named after the Canadian mathematician, computer scientist, and philosopher Charles Sanders Peirce. I have a brother named William, who was born on October 1, 2018. I enjoy playing video games, reading, and going for walks.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "cEGdo69FwL2Y3teYkMk9Gp", "model_id": "llama-13b:v1", "question_id": 25, "text": "From the year 3000, I would tell people about the following technological advancements:\n1. Robots: Robots will have the human-like ability to see and hear. Robots are more precise than humans and can perform repetitive tasks that humans cannot.\n2. Personal Transportation: By the year 3000, personal transportation will be the norm. Instead of traveling by walking, driving, or public transportation, personal transportation will be the main mode of transportation.\n3. Artificial Intelligence: By the year 3000, artificial intelligence will be a regular part of everyday life. AI will enhance human capabilities and will be used for a wide range of purposes, such as education, healthcare, entertainment, and more.\n4. Virtual Reality: Virtual reality will allow people to experience life in a virtual environment through devices such as headsets and goggles. Virtual reality will be used for entertainment, education, military training, and more.\n5. Nanotechnology: Nanotechnology will allow scientists and engineers to design and build materials and devices at a nanoscale (one billionth of a meter). Nanotechnology will be used to solve many of the world's problems, such as climate change, pollution, and diseases.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "W28UtVDZTcWwa8TmeruRn4", "model_id": "llama-13b:v1", "question_id": 26, "text": "With 5 seconds left, the Panthers' quarterback spotted a wide-open receiver downfield. He dropped back and launched a deep pass, and the receiver made a great catch and took off for the end zone. But he was hit in the end zone by a defensive back and fumbled the ball. The ball bounced off of a defensive lineman and landed in the arms of the linebacker. He picked it up and ran it in the opposite direction. He ran past a defensive lineman, then past the quarterback. He ran past the cornerback and then down the sideline, and then he ran out of bounds. He was tackled by the safety, and he fumbled the ball. The ball was recovered by the Panthers and they ran out the clock.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "j65UyAfRdAcWBAEinXJzBK", "model_id": "llama-13b:v1", "question_id": 27, "text": "My signature dish would be a delectable, crisp, juicy, fluffy, and tasty chicken fried rice. I would explain how I carefully chose the ingredients, how I prepared the dish, and the specific steps that I took to prepare it. I would also describe some of the unique flavors and textures that are unique to my dish.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "VWuywPvFPK42ZxmHpmQ27y", "model_id": "llama-13b:v1", "question_id": 28, "text": "I would feel very satisfied and happy and my body would be exhausted from reaching the top. My thoughts would be very positive and my body would feel peaceful. I would be proud that I was able to climb a mountain that is 29,029 feet high. The view from the top would be beautiful. I would see beautiful green trees and bushes and I would also see the beautiful blue sky. The view from the top would be breathtaking. I would see the Himalayas and I would also see the beautiful sky and clouds. I would see the beautiful mountains around me.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "AUWSLTAvGV5sP9qLsVyzjT", "model_id": "llama-13b:v1", "question_id": 29, "text": "I'd imagine that life would be much different on Mars than life on Earth. The red planet is much farther from the Sun, and as such, it's a lot colder. Average temperatures on Mars are about -80 degrees Fahrenheit. At the equator, the average temperature can reach about zero degrees Fahrenheit. The surface of Mars is also very dry. Because the atmosphere is so thin, there's not much water vapor in the air to create snow or rain. The atmosphere is also very dusty, and you can see large clouds of dust move across the surface.\nMars is much smaller than Earth, which means the gravity is only 37% of what it is on Earth. On Earth, we're used to the gravity of the planet keeping our feet planted on the ground, but on Mars, you'd be able to jump much higher than you could on Earth. The atmosphere on Mars is very thin, only about 1% as dense as Earth's. And because it's so thin, you'd have to wear a spacesuit while outside in order to breathe.\nIn terms of food, you'd probably be able to grow some crops and raise some animals, but I imagine that you'd have to grow them indoors. Also, you'd have to grow all of your food. Mars doesn't have any large bodies of water, so you couldn't fish. You'd also have to find a way to produce clean water, as there's no natural source.\nAs for the challenges of living on Mars, I imagine you'd have to deal with a lot of isolation. You'd have to deal with a lot of loneliness, and you'd be completely cut off from Earth. You'd also have to deal with a lot of boredom. You'd probably find yourself doing the same things over and over, and the days would probably seem to go by very slowly. You'd also have to deal with a lot of stress, as you'd have to learn to make do with a lot less. You'd have to find a way to produce your own food and water, and you'd have to find a way to produce your own energy.\nYou'd also have to deal with the cold. The average temperature on Mars is about -80 degrees Fahrenheit. Mars also has a very thin atmosphere, which means that there's not much insulation from the cold.\nI imagine that you'd also have to deal with a lot of dust. The atmosphere is very dusty, and the dust would get everywhere. You'd probably find yourself covered in dust all the time.\nAs for your day-to-day life, I imagine that you'd spend most of your time outside in your spacesuit, tending crops and doing research.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "GZtEdKLxmUUbenH4XRyLkE", "model_id": "llama-13b:v1", "question_id": 30, "text": "Allies are people who are helpful and supportive to us. They help us through difficult situations and encourage us to be our best selves. Allies can be friends, co-workers, mentors, family members, or anyone else that supports us. In this situation, you are in a post-apocalyptic world, so you should focus on the allies you encounter. Here are some ways you can describe the allies you encounter:\n1. Describe the allies you encounter in a post-apocalyptic world.\n2. Describe how you survive in a post-apocalyptic world.\n3. Describe the allies you encounter in a post-apocalyptic world.\n4. Describe what happens to you in a post-apocalyptic world.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "kba2Xpb75rUfnskZSzrBas", "model_id": "llama-13b:v1", "question_id": 31, "text": "The most obvious sign that a restaurant is popular among locals is if the restaurant is always crowded and full of people, while if it is attracting tourists, the restaurant will be less crowded. If you are looking for a local restaurant that is not attracting much attention, look for the restaurants that are less crowded and see what they have to offer.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "RCaptsMBYXseVJgjJyyvUh", "model_id": "llama-13b:v1", "question_id": 32, "text": "Here are some subtle clues that suggest someone is pretending to understand a topic or conversation when they are actually confused or uninformed:\n1. They pretend to understand something better than they actually do.\n2. They repeat what someone else has said, but with an odd emphasis.\n3. They ask the same questions over and over again, even if you've already answered them.\n4. They ask for more information after you've already given them plenty.\n5. They can't explain something they should know.\n6. They don't ask questions that a person who is confused would ask.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "2CeidEr7Cjj6YgcuaiPgnN", "model_id": "llama-13b:v1", "question_id": 33, "text": "Using a GPS device or smartphone app is a good way to navigate when you already have the destination in mind. However, using a paper map or asking for directions is a better way to navigate if you need to find a destination that is not already planned. Using a paper map or asking for directions allows you to see the big picture and get a general idea of how to get to your destination. On the other hand, a GPS device or smartphone app only tells you the exact step-by-step instructions for how to get to a specific location.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "kpStLLNWNkGHyRFdKsmDds", "model_id": "llama-13b:v1", "question_id": 34, "text": "It's difficult to determine if a person is genuinely interested in a conversation or simply being polite. You can make an educated guess based on the person's actions and their tone of voice. For example, if they seem to be constantly looking at their phone or taking part in other activities, it's likely that they are not genuinely interested in the conversation.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "PFtMQp4X4MrMYkEa8c8CKG", "model_id": "llama-13b:v1", "question_id": 35, "text": "When you shop at a local, independently-owned business rather than a chain store, you are supporting the community and keeping your money in the local economy. Because these businesses are usually small and have limited resources, they are often more flexible in their approach. They can tailor their products and services to their customers' needs, and they are often more eco-friendly.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "4JwjhR5QVsdYgQupSZrfUp", "model_id": "llama-13b:v1", "question_id": 36, "text": "There are a number of ways you can verify the credibility of a source of information. First, check to see if the source is from a reliable, well-known publisher. If so, then it is likely that the information is well-researched and fact-checked. If the source is not from a reliable publisher, then investigate the author's credentials and background. Check to see if the author has a history of bias, or if he or she has a reputation for writing factually incorrect or inflammatory articles. Finally, you can also check the source's sources. Are the sources credible and well-researched? Do they provide evidence to support the article's claims? If not, then it is likely that the source itself is not credible.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "ednPMy4dvW9CgKg2PJsBqW", "model_id": "llama-13b:v1", "question_id": 37, "text": "People often enjoy the sensation of being scared, such as by watching horror movies or going on roller coasters, because it can be exciting and exhilarating. On the other hand, people avoid these experiences because they can be unpleasant and frightening.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "ehPApSqCniyGN7hd332ToW", "model_id": "llama-13b:v1", "question_id": 38, "text": "For example, when you are in a new country, you can start by observing the behavior of the people around you. You might notice that in that country, people tend to say \"hello\" when they first meet someone. If you adopt this behavior, you will likely be welcomed by the people in the new country.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "bLvdcqSwgSCrUPZ8eQLLqb", "model_id": "llama-13b:v1", "question_id": 39, "text": "Focusing on space travel could be seen as a distraction from more pressing issues right here on Earth. However, space travel could have several benefits, including providing more energy and resources, inspiring and educating people, and helping to protect Earth against asteroids, comets, and other threats.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "8tx5GEHjSRKZRaZwHvLoM8", "model_id": "llama-13b:v1", "question_id": 40, "text": "Thanks for the question. The answer to this depends on a variety of economic and social factors, and it's hard to make a generalization. However, there are a few things to keep in mind:\n1. Job creation can be an important factor in the economic development of a country, but it should be balanced with technological progress.\n2. Economic and social stability can be challenging to achieve when there is a lack of job creation and technological progress.\n3. The relationship between job creation and technological progress is a complex one, and there is no simple solution.\n4. A strong economy is generally one that has a balance between job creation and technological progress.\n5. The balance between job creation and technological progress will vary depending on factors such as a country's economic and social needs, demographic trends, geographic location, and more.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "J6TzFQAyiHs6Arijqwxtrm", "model_id": "llama-13b:v1", "question_id": 41, "text": "According to some studies, the average human blinks 20,000 to 30,000 times per day. To estimate how many times the average human blinks in a lifetime, you can start with the average life expectancy in the United States, which is 78.8 years. This means that the average American lives 78.8 years, or 29,415 days. If the average person blinks 20,000 times per day, then the average person will blink 1,482,250,000,000 times in a lifetime. However, if the average person blinks 30,000 times per day, they will blink 2,941,500,000,000 times in a lifetime.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "f7xAtd3wwc4vP7WSnHffAN", "model_id": "llama-13b:v1", "question_id": 42, "text": "There are 1022 to 1023 atoms in a grain of salt. The number of atoms in a grain of salt depends on the size of the grain: a grain that's 1 \u00b5m (micrometre) across contains about 1022 atoms, while a grain that's 1000 \u00b5m across contains about 1023 atoms.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "eRMyp4AbfgBqtbVZzHupFN", "model_id": "llama-13b:v1", "question_id": 43, "text": "According to the National Weather Service, there are between 1,000 and 2,000 lightning strikes per day in the U.S. alone. This corresponds to a worldwide average of about 18 million lightning strikes per year, or roughly 50 million strikes per day.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "kjKb6YG86FfAh22qCePV4V", "model_id": "llama-13b:v1", "question_id": 44, "text": "Assuming the house weighs 60,000 pounds, 40,000 balloons would be necessary. The average balloon holds 2 pints of helium, which has a density of 0.17g/L. Therefore, the volume of a balloon is 2pints/0.17g/L = 12L. The weight of 12L of helium is 12*0.17 = 2 pounds. Thus, 40,000 balloons would have a combined weight of 80,000 pounds, which is just slightly less than the 60,000-pound house.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "ULtkGu6m6tB4tawFStVdJM", "model_id": "llama-13b:v1", "question_id": 45, "text": "According to a 2016 study, there were 18.2 billion text messages sent in the US alone each day. Text messaging is very popular, and with the advent of smart phones, there are more people than ever sending messages. Globally, there are 16.3 million text messages sent per minute. In 2016, there were 23.6 trillion text messages sent worldwide, and 8.5 trillion text messages sent in the US alone.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "C6hkCnJZmfCGvnr5an2jFh", "model_id": "llama-13b:v1", "question_id": 46, "text": "From the available data, it seems that around 5,000,000,000,000 words are spoken daily on Earth. I will now explain my reasoning step-by-step.\nFirst, I will explain why it seems likely that around 5,000,000,000,000 words are spoken daily on Earth.\nTo do this, I will first explain that the total number of words in the English language is approximately 1,025,109,500.\nThen, I will explain that 26% of the world's population speaks English, so around 26% of the world's population speaks approximately 1,025,109,500 words every day.\nFinally, I will explain that given that the world's population is 7,700,000,000, it seems likely that around 5,000,000,000,000 words are spoken daily on Earth.\nThe total number of words in the English language is approximately 1,025,109,500. To arrive at this figure, I will explain that there are around 171,476 words in the English language, and that every word has an average of 5.87 definitions.\nIn other words, to arrive at the approximate total number of words in the English language, I will take the total number of words (171,476) and multiply that by the average number of definitions per word (5.87).\nThen, I will explain that 26% of the world's population speaks English. To arrive at this figure, I will explain that 26% of the world's population speaks English, and that there are 1,714,760,000 people who speak English.\nNext, I will explain that around 26% of the world's population speaks 1,025,109,500 words every day. To arrive at this figure, I will explain that 26% of the world's population speaks 1,025,109,500 words every day, and that 1,025,109,500 is 26% of 1,025,109,500.\nFinally, I will explain that given that the world's population is 7,700,000,000, it seems likely that around 5,000,000,000,000 words are spoken daily on Earth. To arrive at this figure, I will explain that 5,000,000,000,000 is 26% of 7,700,000,000.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "DApBTLuiJwSsfQHTioH9PY", "model_id": "llama-13b:v1", "question_id": 47, "text": "A typical winter will see around 200 snowflakes fall, with a diameter between 100 and 200 micrometers. Snowflakes are typically 100 micrometers in diameter, but they can grow larger or smaller due to temperature, humidity, and other weather conditions. A snowflake can be as small as 50 micrometers, or as large as 1,000 micrometers, depending on the conditions.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "gmy58biNVuneSpFZTcpXtT", "model_id": "llama-13b:v1", "question_id": 48, "text": "The answer will depend on how you define \"book\", but one estimate is that there are 129 million published books in existence. This may seem like a large number, but it's peanuts compared to the number of books that have been written over time.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "Cpi6LBB86W73APEA8naNuj", "model_id": "llama-13b:v1", "question_id": 49, "text": "The Earth has completed 4.7 billion orbits since the beginning of life. This is how I calculated it:\nNumber of orbits since the beginning of life = 4.7 billion\nThe Earth revolves around the Sun at 66,600 mph (30 km/s).\n1 year = 365.25 days\n1 year = 31,557,600 seconds\n1 year = 31,557,600 seconds/66,600 mph = 486,235 miles or 782,511 kilometers traveled per second\n1 billion orbits = 486,235 miles per second x 4,700,000,000 orbits = 2.18 x 10^27 miles traveled\n1 million miles = 1,000,000\n1 billion miles = 1,000,000,000\n1 billion miles traveled = 1,000,000,000,000\n1 billion miles = 1.08 x 10^17 miles\n1 billion orbits = 2.18 x 10^27 miles traveled / 1.08 x 10^17 miles = 2.01 x 10^10 orbits\n1 billion orbits = 4.7 billion orbits since the beginning of life", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "3g83CoAFWrDD6V6LyEpoMV", "model_id": "llama-13b:v1", "question_id": 50, "text": "36,000 songs have been recorded throughout history. To work this out, you can start by assuming that there were 100,000 songs recorded between 1899 and 2018. This means that there were 1.87 songs recorded per day on average. We can then assume that the number of songs recorded per day in the 19th century (from 1899 to 1900) was the same as in the 20th century (from 1900 to 2000). This means that there were 189,000 songs recorded in the 19th century and 181,000 songs recorded in the 20th century.\nHence, there were 36,000 songs recorded throughout history.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "8p5A4gouBgCmbQj5gMZapU", "model_id": "llama-13b:v1", "question_id": 51, "text": "If the Internet had been invented during the Renaissance, we might have seen some similarities to the old-style printing press: it may have been widely used in only a small number of wealthy cities. Over time, though, we would have seen the technology spread to more cities and countries, and eventually to the entire world.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "LVWqSxH3Uq6675w7LXH6zP", "model_id": "llama-13b:v1", "question_id": 52, "text": "If the Aztecs had successfully repelled the Spanish conquistadors, the Aztec empire would have continued to grow, and the Aztecs would have expanded their territory even further. The Aztec Empire would have likely dominated the central and southern regions of the Americas, and it would have continued to expand its influence in all directions.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "ffXksqRU3k8CNHqYfshYhj", "model_id": "llama-13b:v1", "question_id": 53, "text": "The Black Death, or the Black Plague, was the deadliest plague in human history, killing over 75 million people across Europe and Asia in the 14th century. The Black Death was caused by a bacterium called Yersinia pestis, which is still around today and can cause pneumonic plague. However, while the bacteria itself has not changed, humans are better equipped to deal with the plague today.\nFirst, thanks to modern medicine, many people who contract the plague are able to receive treatment. Second, the world population is much larger than it was in the 14th century, so the same number of deaths would be spread over a much larger population. As a result, the death toll would be much lower today.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "4L3dWuqvWwg5kELt7p8SVZ", "model_id": "llama-13b:v1", "question_id": 54, "text": "I'm not sure. If he did so, he wouldn't have discovered the law of gravity.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "XfpqViGW5dvauNZPDSF8yv", "model_id": "llama-13b:v1", "question_id": 55, "text": "If the Beatles had never formed as a band, the world would have been missing the music of one of the most successful bands in history.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "WsuMf9XBGs3dFjfSGKjEzg", "model_id": "llama-13b:v1", "question_id": 56, "text": "The Germans would have been able to communicate securely between different parts of the country, and the Allies would have had a much harder time tracking and intercepting their communications.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "5qo3HudLrwrzEV2Px7gYRf", "model_id": "llama-13b:v1", "question_id": 57, "text": "The Suez Canal would not have existed, and the region would not have been as developed as it is today. Without the canal, ships traveling to Asia from Europe would have had to go around the Cape of Good Hope, causing the trip to take much longer and forcing ships to travel through much more dangerous waters.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "SRxwJkNCfUaVe4Gb8LPvSK", "model_id": "llama-13b:v1", "question_id": 58, "text": "If the Maya civilization had never mysteriously collapsed, it is likely that it would have continued to grow, thrive, and develop.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "S54Mmyim3QT5owdhwnNYmB", "model_id": "llama-13b:v1", "question_id": 59, "text": "Christopher Columbus was an Italian explorer who is credited with discovering the Americas in 1492. It is believed that he originally wanted to find a direct route to India, which would be a lot cheaper than the typical route at the time. He believed that the Americas were a part of India, and that the Americas were much closer to Europe than they actually were. When Columbus sailed across the Atlantic Ocean in 1492, he actually landed somewhere in the Bahamas, and not in India. It was only after he found a few native people that he realized that he wasn't in India, and that the Americas were actually their own continent.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "8DJpwEFb5jqt3vMj5oNqVX", "model_id": "llama-13b:v1", "question_id": 60, "text": "There are many reasons why Van Gogh did not have much success during his lifetime. One of the reasons is that he was a very poor businessman. He was very generous and gave away many of his paintings as presents. He also had a tendency to drink too much. He spent some time in a mental asylum and died at the age of 37.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "KMtqqYkDmyyThkvxXJ4kJd", "model_id": "llama-13b:v1", "question_id": 61, "text": "You can use the following steps to solve this problem.\n1. Create an empty C++ program.\n2. Include the header to use the std::cin and std::cout functions.\n3. Open the text file using the fstream class.\n4. Read a single line from the file using the getline function.\n5. Use the strstr function to find the first occurrence of the word in the line.\n6. Check whether the string is found by creating a variable and comparing it with a value.\n7. Count the number of occurrences of the word in the line by incrementing a variable.\n8. Close the file.\n9. Print the number of occurrences of the word.\n10. Output the file name to the console.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "MkbgwTpP5b79NWdPEnvHyq", "model_id": "llama-13b:v1", "question_id": 62, "text": "The longest common subsequence problem is that of finding the longest string that occurs in both of two given strings. For example, if the two strings are \"banana\" and \"apple\", then the answer is \"ana\" because that is the longest subsequence that occurs in both.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "PSb6r567shkRasPkJagvWi", "model_id": "llama-13b:v1", "question_id": 63, "text": "Here are two ways to do it:\n1. You can use the function re.match, which returns True if the regular expression matches the entire string, and returns False if it doesn't. So, we can do something like this:\nif re.match('^[a-z0-9]+(\\.[a-z0-9]+)*@[a-z0-9]+(\\.[a-z0-9]+)*(\\.[a-z]{2,4})$', email):\nprint('It is a valid email')\nprint('It is not a valid email')\n2. You can use the function re.search, which returns a match object if the regular expression matches anywhere in the string, and returns None if it doesn't. So, we can do something like this:\nif re.search('^[a-z0-9]+(\\.[a-z0-9]+)*@[a-z0-9]+(\\.[a-z0-9]+)*(\\.[a-z]{2,4})$', email):", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "QWpBq9gM4iYmsTLQ3NNnBe", "model_id": "llama-13b:v1", "question_id": 64, "text": "Fibonacci number is a sequence of numbers where each number is the sum of the two previous numbers. The first two numbers in the sequence are 0 and 1. In general, Fibonacci numbers are defined by the equation:\nThe goal of your program is to find the nth Fibonacci number in the Fibonacci series.\nNote: n will be a positive integer.\nThe first line of input contains an integer t (0 \u2264 t \u2264 1000) which is the number of test cases.\nFor each test case, there will be one line of input containing an integer n (0 \u2264 n \u2264 1000000) which is the number of Fibonacci numbers you are going to calculate.\nPrint each Fibonacci number in a new line.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "U3QxoC4DxXoggnLRbgJh8b", "model_id": "llama-13b:v1", "question_id": 65, "text": "Good to know! I will implement a binary search algorithm to find a specific element in a sorted array.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "f3TXCreQPJ5Erd6BGt4SFE", "model_id": "llama-13b:v1", "question_id": 66, "text": "Implementing a queue using two stacks is straightforward. Take two stacks and create a third stack. First, push the first element of the first stack onto the third stack. Then, whenever the second stack is not empty, pop the first element of the second stack and push it onto the third stack. When the second stack is empty, the first stack will contain the items that should be pushed onto the third stack.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "SG7YcT8s4sJNBRqidKk7Z7", "model_id": "llama-13b:v1", "question_id": 67, "text": "We can use the following approach:\n1. Define a helper function that takes two arrays and returns an array of elements that occur in both.\n2. Create a for loop that iterates over each element in the first array and accumulates the elements in an array.\n3. Call the helper function in each iteration.\n4. Return the accumulated array.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "ENsG3zoeUY7ArFo2kNaQbz", "model_id": "llama-13b:v1", "question_id": 68, "text": "f(2) = 26", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "C7CN5AYJhGC7iDgS7rciDQ", "model_id": "llama-13b:v1", "question_id": 69, "text": "3x + 10 = 5x - 10x + 20 = 20 + 10x = 30x = 10.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "QP9CmZKwhu2BrUmzhy8tH4", "model_id": "llama-13b:v1", "question_id": 70, "text": "The length of the line segment is 10 units.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "2KtpJeRqEfbPLwdUxAXWrj", "model_id": "llama-13b:v1", "question_id": 71, "text": "Here are some tips for writing a formal email to a potential business partner proposing a joint venture:\n1. Be clear and concise\n2. Use formal language\n3. Use a proper greeting\n4. Use a proper closing\n5. Use proper punctuation\n6. Be sure to include all relevant information", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "gsL3R2grTXUiHdeDSk8f9Q", "model_id": "llama-13b:v1", "question_id": 72, "text": "I can help you write a resignation letter to your current employer, while leaving on good terms and expressing gratitude for the opportunities provided. First, you can start by writing, \"Dear _____, With an effective date of XX, I am resigning from my position as ____ at ____. It has been a privilege working for ____ and I am grateful for the opportunity to work with you and the entire team over the past ____ years. Thank you for your support and guidance during my time with _____.\"", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "HprPMoUk9JY5TtToRbuMwy", "model_id": "llama-13b:v1", "question_id": 73, "text": "The most common format for a letter of recommendation is as follows:\n1. Salutation: In the salutation, address the recipient as Mr., Mrs., Ms., or Dr.\n2. Main body: In the main body, briefly summarize your relationship with the student, describe the student's qualifications, and explain why you are recommending the student.\n3. Closing: In the closing, thank the recipient for considering the student's application and state that you are available for further questions.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "YU7gDhmo4LDVMTEZZMRdBC", "model_id": "llama-13b:v1", "question_id": 74, "text": "", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "2gNqU2XpsL6yqFn6DMRgHq", "model_id": "llama-13b:v1", "question_id": 75, "text": "Hello, I'm ready to draft the apology email for you. Please tell me the name of the customer you'd like to send this email to.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "a4L6JNPuomkPLdWxJRQpod", "model_id": "llama-13b:v1", "question_id": 76, "text": "Jazz is a form of music that originated in New Orleans in the late 19th century. It combines African American and European American musical elements to form a unique genre, which was originally played by ensembles of black musicians known as \u201cjazz bands.\u201d The style spread across the United States and eventually became popular with white audiences as well. Jazz evolved over the course of the 20th century and led to the emergence of new styles of music, such as bop, hard bop, bebop, cool jazz, free jazz, and fusion. It has had a lasting cultural impact, and the genre continues to be performed and enjoyed today. Here are some key facts about jazz:\n1. Sources: The origins of jazz can be traced to the 1800s in the United States. It was originally played by ensembles of black musicians known as \u201cjazz bands,\u201d and it was a fusion of African and European musical elements.\n2. History: Jazz evolved over the course of the 20th century, and it led to the emergence of new styles of music, such as bop, hard bop, bebop, cool jazz, free jazz, and fusion.\n3. Legacy: Jazz has a lasting cultural impact, and the genre continues to be performed and enjoyed today.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "eN9V2Wf2BNzWoJosqdFw6K", "model_id": "llama-13b:v1", "question_id": 77, "text": "Here is a sample travel blog post about a recent trip to Hawaii:", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "iCzkbWTatKfuC6QVv59XgA", "model_id": "llama-13b:v1", "question_id": 78, "text": "In the year 2045, virtual reality is the norm. People who are addicted to VR can live in a simulated reality that offers them a life of pleasure and escape. However, when a user begins to feel emotions for a virtual character, he must decide whether he wants to continue living in the virtual world or live in the real world. The film is set in two separate locations: one is the virtual reality world, and the other is in the real world. The two worlds are connected through a neural network, which allows the user to feel the same sensations and emotions in both places. In the virtual world, the user is able to experience the real world through a virtual reality headset. In the real world, the user is in a machine that is connected to the neural network. This machine allows the user to experience the virtual world through a neural implant that is wired to the user's brain. The film's plot centers around the user and the virtual character. The characters in the virtual world are all artificial intelligence entities, and the user's experience in the real world is with two characters, his friend and his girlfriend. The user is a young man in the film, and the virtual character is a young woman. The two characters have a virtual relationship, and then the virtual character is killed by a bug in the system. This causes the user to fall into a deep depression, and he begins to question his own reality. He decides to go to the real world to get treatment for his depression. In the real world, the user has to decide whether he wants to stay in the real world or return to the virtual world. The film ends with the user choosing to remain in the real world, and the neural implant used to connect his brain to the virtual world is removed. The movie is based on the novel \"Ready Player One\" by Ernest Cline. The film is directed by Steven Spielberg and stars Tye Sheridan as the main actor. The film is a science fiction adventure and a romantic drama. The film's special effects are excellent, and the visuals are beautiful. The film's soundtrack is also quite good. The film has received positive reviews from critics, and it has been nominated for a number of awards, including the Academy Award for Best Visual Effects. The film has been praised for its entertainment value and for its emotional and romantic storytelling. The film has been compared to the popular sci-fi film \"The Matrix.\" Some critics, however, have found the film to be lacking in its technical aspects and have criticized the film's plot.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "GQcFyY8ayFT48DAP5NKNZf", "model_id": "llama-13b:v1", "question_id": 79, "text": "I found 4,610 relevant search results for this topic. The results include:\n1. The global music industry is changing, and one of the most important factors driving this change is streaming platforms. Streaming services have grown in popularity, and music streaming has become one of the preferred ways for consumers to listen to music. What exactly is streaming?\n2. Streaming means that music is delivered over the internet. Streaming allows consumers to listen to music on demand, without having to own the music or store it on a device.\n3. Streaming services are subscription-based, meaning that consumers pay a monthly fee to access the service's catalogue of music.\n4. Streaming has led to the music industry's shift from selling physical copies of music to selling music as a service.\n5. Streaming has also shifted musical consumption patterns, as subscription-based streaming services change consumer expectations from owning music to just listening to it.\n6. Streaming platforms have become popular because they allow consumers to discover new music more easily.\n7. Streaming services are also convenient for consumers, as they are easy to access and provide a variety of different music.\n8. Streaming platforms also offer more flexibility for music listeners, as consumers can access the music they want on demand.\n9. Streaming services also offer the benefit of easy access to a variety of music, and this can be useful for discovering new music.\n10. Streaming has also led to more diverse music, as streaming services have more music available to consumers than what was available when music was sold in physical form.\n11. Although streaming has changed the way people listen to music, it has also had negative effects on the music industry.\n12. Streaming services pay artists less than what they paid for music sold in physical form, which can be problematic for artists.\n13. Streaming services also pay less to the music industry overall.\n14. Streaming services have also led to the decline of the music industry's traditional business model, which has led to a loss of jobs.\n15. Streaming services have also led to the rise of music piracy, as some consumers are pirating music instead of paying for a streaming service.\n16. Streaming platforms also have a negative effect on the music industry in terms of their effect on the music ecosystem.\n17. Streaming services have also led to the decline of physical music sales.\n18. Streaming platforms have also led to the decline of record stores, which can harm artists and consumers.\n19. Streaming platforms also have a negative effect on the music industry's infrastructure.\n20. Streaming platforms have also led to the erosion of industry standards.\n21. Streaming platforms have also led to a decline in music videos and music videos being less accessible.\n22. Streaming platforms have also led to the decline of radio, which can be problematic for listeners.\n23. Streaming platforms have also led to a decline in music journalism.\n24. Streaming platforms have also led to a decline in the number of record labels.\n25. Streaming platforms have also led to a decline in the number of music producers.\n26. Streaming platforms have also led to a decline in the music industry's contribution to the economy.\n27. Streaming platforms have also led to the decline of music festivals.\n28. Streaming platforms have also led to the decline of music labels.\n29. Streaming platforms have also led to a decline in the music industry's culture.\n30. Streaming platforms have also led to a decline in the music industry's diversity.\n31. Streaming platforms have also led to the decline of the music industry's talent development.\n32. Streaming platforms have also led to a decline in the music industry's competitiveness.\n33. Streaming platforms have also led to the decline of the music industry's social and economic effects.\n34. Streaming platforms have also led to the decline of the music industry's intellectual property.\n35. Streaming platforms have also led to the decline of the music industry's political and social engagement.\n36. Streaming platforms have also led to the decline of the music industry's cultural and artistic output.\n37. Streaming platforms have also led to the decline of the music industry's public value.\n38. Although streaming has had a negative effect on the music industry, it has also had positive effects.\n39. Streaming platforms have", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "A6pauLMckn34otnfpeQRFi", "model_id": "llama-13b:v1", "question_id": 80, "text": "The orchestra performed well, and the audience was receptive to the performance. The orchestra played a selection of pieces from some of the most well-known classical composers, including Beethoven, Mozart, and Tchaikovsky. The orchestra played in unison with each other, and the conductor led them through the pieces without much difficulty. The orchestra played with a lot of energy and spirit. The audience was appreciative and applauded at the appropriate times. The performance lasted 90 minutes, and it was well worth the price of admission. I would recommend the concert to anyone who wants to experience beautiful classical music.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
diff --git a/model/fastchat/eval/table/answer/answer_vicuna-13b-20230322-new-hp-fp16.jsonl b/model/fastchat/eval/table/answer/answer_vicuna-13b-20230322-new-hp-fp16.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..fd1f3a8ee5f171278670fa0c988115be901688b8
--- /dev/null
+++ b/model/fastchat/eval/table/answer/answer_vicuna-13b-20230322-new-hp-fp16.jsonl
@@ -0,0 +1,80 @@
+{"question_id": 1, "text": "Improving your time management skills can help you to prioritize your tasks, stay organized, and be more productive. Here are some tips to help you improve your time management skills:\n1. Set goals: Start by setting clear, specific, and measurable goals for yourself. This will help you to stay focused and motivated.\n2. Prioritize: Identify which tasks are most important and focus on them first.\n3. Use a planner: Keep track of your tasks and deadlines in a planner or calendar.\n4. Break down tasks: Break down larger tasks into smaller, manageable chunks.\n5. Eliminate distractions: Minimize distractions, such as social media, email, and phone notifications, while you work on important tasks.\n6. Take breaks: Take regular breaks to rest and recharge.\n7. Focus on one thing at a time: Try to focus on one task at a time, and avoid multitasking.\n8. Be mindful of time-wasting activities: Be mindful of time-wasting activities and try to minimize them.\n9. Review your progress: Review your progress periodically, and adjust your strategy as needed.\n10. Practice, practice, practice: Finally, practice these skills regularly, and be patient with yourself as you work to improve them.", "answer_id": "FNAYdXQwcrNkTD4nCqdFGb", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 2, "text": "Stress is a natural response to pressures in life, but chronic stress can have negative impacts on mental and physical health. Here are some effective ways to deal with stress:\n1. Exercise: Regular physical activity can help reduce stress levels and improve mood.\n2. Relaxation techniques: Meditation, yoga, deep breathing, and other relaxation techniques can help calm the mind and body.\n3. Social support: Spending time with friends and family or talking to a trusted confidant can help reduce stress levels.\n4. Sleep: Getting enough restful sleep can help the body and mind rejuvenate and better cope with stress.\n5. Healthy diet: Eating a balanced diet with plenty of fruits, vegetables, and whole grains can help support good physical and mental health.\n6. Goal-setting: Setting realistic goals and working towards them can help give a sense of accomplishment and control over stressors.\n7. Time management: prioritizing tasks and setting aside time for leisure activities can help reduce feelings of being overwhelmed.\n8. Avoidance: Avoiding unhealthy habits such as smoking, excessive alcohol consumption, and drug use can help reduce stress levels.\n9. Seek professional help: If stress is impacting daily life, it may be helpful to seek professional help from a therapist or counselor.\nIt's important to find what works best for you and make stress management a regular part of your routine.", "answer_id": "7SzYV3TjzyrVkSKKGaXN4c", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 3, "text": "Python and JavaScript are two popular programming languages used for different purposes. Here are some of the main differences between Python and JavaScript:\n1. Paradigm: Python is a procedural language, while JavaScript is an object-oriented language.\n2. Applications: Python is used for web development, data science, artificial intelligence, scientific computing, and more, while JavaScript is used for front-end web development, back-end web development, and mobile app development.\n3. Readability: Python code is generally more readable and easier to understand than JavaScript code, due to its use of whitespace and indentation to delimit blocks of code.\n4. Speed: JavaScript is generally faster than Python, due to its dynamic nature and JIT (Just-In-Time) compilation.\n5. Interpreted vs. compiled: Python is an interpreted language, while JavaScript is a compiled language.\n6. Standard libraries: Python has a large standard library, while JavaScript has a smaller standard library.\n7. Community: Python has a larger community than JavaScript, with more resources and third-party libraries available.\n8. Debugging: Python has an interactive shell that allows for easy debugging, while JavaScript has a debugger built into the browser.\n9. Deployment: Deploying Python applications is generally easier than deploying JavaScript applications, as Python applications can be run directly from the command line, while JavaScript applications require a web server.", "answer_id": "cA7LoXTBCgegnTcKWpjxQq", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 4, "text": "Working from home can be challenging because of the many distractions that abound, from household chores to Netflix marathons. However, there are several tips you can follow to increase your productivity while working from home:\n1. Create a designated workspace: Set up a designated workspace in your home that is free from distractions and clutter. This can be a spare room, a desk in a corner, or even a local coffee shop.\n2. Establish a schedule: Set a schedule for your workday and stick to it. This will help you stay on track and avoid wasting time.\n3. Eliminate distractions: Turn off notifications on your phone and computer, close unnecessary tabs, and try to minimize background noise.\n4. Take breaks: Take regular breaks to refresh your mind and body, but limit them to 10-15 minutes.\n5. Prioritize your tasks: Prioritize your tasks based on importance and urgency, and tackle them one at a time.\n6. Dress professionally: Dressing professionally can help you stay focused and motivated.\n7. Communicate with your team: Keep your team updated on your progress and communicate regularly to avoid misunderstandings.\n8. Exercise: Exercise can help you stay alert and focused, so try to incorporate physical activity into your day.\n9. Have a healthy diet: Maintaining a healthy diet can help you stay energized and productive throughout the day.\n10. Take care of your mental health: Finally, take care of your mental health by practicing mindfulness, setting boundaries, and taking time for self-care.\nBy following these tips, you can increase your productivity while working from home and achieve your goals more effectively.", "answer_id": "8Yo5UVMPzoxjCsi3fuHNmH", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 5, "text": "Quantum computing is a form of computing that uses quantum-mechanical phenomena, such as superposition and entanglement, to perform operations on data. Unlike classical computers, which store and process data using bits that are either 0 or 1, quantum computers use quantum bits, or qubits, which can be 0, 1, or both at the same time.\nThis ability to be in multiple states at the same time, known as superposition, allows quantum computers to perform certain calculations much faster than classical computers. In addition, quantum computers can use a phenomenon called entanglement, which allows qubits to be linked together in such a way that the state of one qubit affects the state of another, even if they are physically separated.\nThese quantum mechanical properties enable quantum computers to perform certain calculations that are impractical or even impossible for classical computers, such as simulating the behavior of complex molecules or factorizing large numbers.\nQuantum computers are still in the early stages of development and are not yet widely available. However, they have the potential to revolutionize fields such as cryptography, chemistry, and artificial intelligence, and researchers around the world are working on improving their performance and scalability.", "answer_id": "cCc7EgnQNgcVAVqk96Pyto", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 6, "text": "Plant-based protein sources come from plants, while animal-based protein sources come from animals. Here are some key differences between the two:\n1. Nutritional content: Plant-based protein sources tend to be lower in some essential amino acids, while animal-based sources tend to be higher. However, many plant-based sources can be eaten in combination to create a complete protein.\n2. Sustainability: Plant-based protein sources are generally considered to be more sustainable than animal-based sources, as they require fewer resources to produce and have a lower carbon footprint.\n3. Health benefits: Some plant-based protein sources, such as legumes, nuts, and whole grains, have been shown to have health benefits such as lowering the risk of heart disease, stroke, and certain types of cancer.\n4. Ethical considerations: Many people choose plant-based protein sources for ethical reasons, such as a concern for animal welfare or a belief that animal agriculture is unsustainable.\n5. Taste: Some people prefer the taste of animal-based protein sources, while others prefer the taste of plant-based sources.\n6. Allergies: Some people have food allergies or intolerances to certain animal-based protein sources, such as milk, eggs, or shellfish, while plant-based sources are generally considered safer for those with food allergies.", "answer_id": "TUL5M2TWXQmM56U4Ckk4s4", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 7, "text": "Developing your critical thinking skills can be challenging, but there are several things you can do to improve them:\n1. Practice questioning: Start by asking questions about the information you encounter. Ask yourself what the information means, how it was gathered, who funded it, and if there are any biases.\n2. Analyze and evaluate: Look for patterns, connections, and underlying assumptions in the information you receive. Evaluate the strengths and weaknesses of arguments, and consider alternative viewpoints.\n3. Seek out diverse perspectives: Expose yourself to a variety of viewpoints and ideas, and try to understand where they are coming from. This will help you to better understand the strengths and weaknesses of different arguments.\n4. Read, watch, and listen actively: engage with the information you consume, and take notes on what you read, watch, or listen to. Ask yourself questions about what you just consumed and look for connections.\n5. Learn from experts: Seek out experts in fields that interest you, and pay attention to how they approach problems and make decisions.\n6. Practice self-reflection: Regularly reflect on your own thought processes, and consider how you can improve your critical thinking skills.\n7. Practice, Practice, Practice: Consciously try to apply critical thinking in your daily life, in your work, in your conversations and in your decision making process.\nRemember that developing critical thinking skills takes time and practice, so be patient with yourself and stay committed to your goal.", "answer_id": "iZUdysQf69MHQE9Lq76mii", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 8, "text": "The education sector faces many challenges today, including:\n1. Access: Many people around the world still lack access to education, particularly those in remote or disadvantaged areas.\n2. Quality: education: Despite efforts to improve quality, some schools still struggle to provide students with a high-quality education.\n3. Funding: budget : education budgets are often inadequate to cover the costs of providing education, particularly in developing countries.\n4. Technology: integration : the rapid pace of technological change has led to questions about how best to integrate technology into education.\n5. Accountability: Schools are often held accountable for student performance, but there is ongoing debate about how best to measure and improve accountability.\n6. Inclusion : of students with disabilities and other special needs: ensuring that all students have equal access to education and are able to succeed.\n7. Safety : school safety has become an increasing concern in recent years, particularly in light of the rise of school shootings.\n8. Employability : the education system is not always keeping up with the changing demands of the job market, leading to concerns about student employability.\n9. Globalization : The increasing globalization of education is leading to debates about how best to ensure that education prepares students for a globalized world.\n10. Teacher shortages : Some countries are experiencing shortages of qualified teachers, leading to concerns about the quality of education.", "answer_id": "D8EWgfq9yytMgQYybq3Rhq", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 9, "text": "There are several factors that can influence consumer behavior. Some of the most important factors include:\n1. Personal factors: These include demographic characteristics such as age, gender, income, education level, and lifestyle.\n2. Psychological factors: These include attitudes, personality, interests, values, beliefs, and feelings towards a product or service.\n3. Social factors: These include peer pressure, family influence, culture, and subculture.\n4. Economic factors: These include income, inflation, interest rates, and the availability of credit.\n5.", "answer_id": "5bzosDZ96t2p5qxeS7VAeA", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 10, "text": "Conflict resolution in the workplace is an important aspect of maintaining a healthy and productive work environment. Here are some effective strategies for conflict resolution in the workplace:\n1. Active Listening: Listen attentively to both sides of the argument and try to understand their perspectives.\n2. Encourage Open Communication: Encourage open and honest communication between parties to help them express their concerns and feelings.\n3. Focus on Interests: Try to understand the underlying interests of each party involved in the conflict, rather than just their positions.\n4. Brainstorm Solutions: Work with each party to brainstorm and considering different solutions that could satisfy everyone's interests.\n5. Compromise: Seek a mutually acceptable solution that meets the needs of both parties.\n6. Mediation: Consider using a neutral third party mediator to help facilitate communication and find a resolution.\n7. Conflict Resolution Training: Provide training for employees on conflict resolution skills, such as effective communication, empathy, and negotiation.\n8. Establish Ground Rules: Establish clear ground rules for communication and conflict resolution within the workplace.\n9. Document Everything: Document all incidents and communication related to the conflict to help with any potential future resolution.\n10. Follow Up: Follow up with parties after a resolution has been reached to ensure that everyone is satisfied and the issue has been resolved.", "answer_id": "itBobRwhSZUnsWkUs5bVwX", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 11, "text": "The use of single-use plastic bottles has significant environmental and health implications, both in terms of the production and disposal of these bottles. Here are some of the potential implications:\n1. Environmental Impact: Single-use plastic bottles are made from non-renewable resources, such as crude oil and natural gas, and their production emits greenhouse gases. They also create a significant amount of waste, which can contribute to plastic pollution in oceans, rivers, and other bodies of water. Plastic debris can harm marine life, and the toxic chemicals found in plastic can also leach into soil and groundwater.\n2. Human Health: The use of single-use plastic bottles can also have negative impacts on human health. Plastic chemicals can leach into drinks, and BPA, a chemical used in some plastic bottles, has been linked to a variety of health issues such as reproductive disorders, heart disease, and diabetes. Reusing plastic bottles can also lead to bacterial growth, which can contaminate drinking water.\n3. Water Quality: Single-use plastic bottles can also have an impact on water quality. In some areas, the source water used to fill plastic bottles may not meet quality standards for human consumption. In addition, plastic bottles can contaminate drinking water supplies, either through poor storage or disposal practices.\n4. Cost: Reusable bottles, on the other hand, have a much lower environmental impact, as they are made from materials that can be reused and recycled. They can also help individuals save money in the long run, as they do not have to buy single-use plastic bottles repeatedly.\n Overall, using single-use plastic bottles over reusable bottles not only harms the environment but also poses health risks to individuals. Reusing plastic bottles has many benefits, including saving money and reducing waste and pollution.", "answer_id": "iVTKdmUeTySryqEHhtgmkS", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 12, "text": "When designing an inclusive and accessible public transportation system, there are several factors to consider:\n1. Universal accessibility: This includes features such as ramps, lifts, and low-floor buses and trains to ensure that all passengers can access the vehicles easily, regardless of their mobility level.\n2. Diversity of Needs: It is important to cater to the diverse needs of passengers, including those with visual impairments, hearing impairments, and mobility impairments. This can be achieved through features such as Braille signage, audible announcements, and priority seating.\n3. Multi-modal options: Offering multiple modes of transportation, such as buses, trains, and light rail, can provide more options for passengers and make it easier for them to travel.\n4. Frequency and reliability: A public transportation system that is frequent and reliable can encourage more people to use it, especially those who may not be able to afford their own transportation.\n5. Safety: Safety features such as CCTV cameras, emergency call buttons, and uniformed personnel can help make passengers feel more secure while using public transportation.\n6. Information and communication: Providing clear and accurate information about schedules, routes, and fares can help passengers make informed choices and plan their trips more effectively.\n7. User-friendly fare collection: A user-friendly fare collection system can make it easier for passengers to use public transportation and reduce boarding times.\n8. Integration with other modes: Integrating public transportation with other modes, such as biking and walking, can provide more options and make it easier for passengers to reach their final destination.\n9. Community engagement: Community engagement is crucial to ensure that the public transportation system meets the needs of the community it serves.\n10. Inclusive pricing: An inclusive pricing policy can make public transportation more accessible to everyone, regardless of their financial means.\nThese are just some of the factors to consider when designing an inclusive and accessible public transportation system.", "answer_id": "7UgtosTTGx7bnRcQGxdDJo", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 13, "text": "Fiscal policy refers to government spending and taxation policies that can be used to influence the economy. Monetary policy refers to the actions of central banks, such as the Federal Reserve, to control the money supply and interest rates. Here are some ways that governments can use these policies to combat economic recessions:\n1. Fiscal policy: Governments can use expansionary fiscal policy to stimulate economic growth during a recession. This can include increasing government spending on public works projects, providing tax cuts for individuals and businesses, and lowering taxes to stimulate borrowing and spending.\n2. Monetary policy: Central banks can use expansionary monetary policy to increase the money supply and lower interest rates. This can encourage borrowing and spending, which can stimulate economic growth.\n3. Fiscal policy: Governments can also use automatic stabilizers, which are programs that automatically adjust during an economic downturn. For example, unemployment benefits and food stamps can help support individuals and families during a recession.\n4. Monetary policy: Central banks can also use discretionary policy, which is when they use the tools at their disposal, such as setting interest rates or buying bonds, to try to influence the economy.\n5. Fiscal policy: Governments can also use the use of deficit-financed fiscal stimulus, which is when governments run budget deficits to finance spending and tax cuts to stimulate economic growth.\nIt's important to note that these policies should be used with caution, as they can also lead to inflation and other negative side effects if not used correctly.", "answer_id": "Yt5tqBrrKfcumuuuxf2qqM", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 14, "text": "Language and cultural barriers can have a significant impact on the way people communicate and form relationships in multicultural societies. These barriers can lead to misunderstandings, misinterpretations, and stereotypes, and can make it more difficult for people from different cultures to connect and form meaningful relationships.\nLanguage barriers can make it difficult for individuals to understand and be understood, leading to communication breakdowns and isolation. cultural barriers can make it difficult for individuals to understand and appreciate the beliefs, values, and customs of other cultures, leading to misunderstandings and mistrust.\nBoth language and cultural barriers can also perpetuate stereotypes and prejudices, as individuals may make judgments about others based on limited exposure or understanding of their culture.\nIn order to overcome these barriers, it is important for individuals to actively engage in cross-cultural communication and to be open to learning about other cultures. This can include seeking out language learning opportunities, participating in cultural events and activities, and actively seeking out diverse perspectives and viewpoints.", "answer_id": "4pZ4Uy544Bc3K59fhbW7xj", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 15, "text": "Artificial intelligence (AI) has the potential to revolutionize healthcare by improving the quality and efficiency of healthcare delivery. One such scenario is the use of AI to assist with diagnosis and treatment planning.\nAn AI system could be trained on a large database of medical records and images, allowing it to quickly and accurately diagnose common medical conditions. This would save time and resources for healthcare providers, who could then focus on more complex cases.\nAnother application of AI in healthcare is the use of predictive analytics to identify patients who are at risk of developing certain conditions. For example, an AI system could analyze patient data to identify individuals who are at high risk of developing diabetes, allowing healthcare providers to intervene early and prevent the onset of the disease.\nAI can also be used to streamline administrative tasks, such as scheduling appointments and managing patient records. This would free up healthcare providers to spend more time caring for patients, improving patient satisfaction and outcomes.\nIn addition, AI could be used to personalize patient care, tailoring treatments to an individual's specific needs and preferences. This could be done by analyzing a patient's genetic profile, medical history, and other factors to determine the most effective course of treatment.\nOverall, the use of AI in healthcare has the potential to improve patient outcomes while also making healthcare delivery more efficient and cost-effective.", "answer_id": "762peC97upw58WFQeLNoXZ", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 16, "text": "CRISPR-Cas9 is a revolutionary gene editing tool that allows scientists to make precise changes to DNA sequences. The technology is based on the way bacteria defend themselves against viruses, by using a Cas9 protein to cut DNA at a specific location. The CRISPR-Cas9 system consists of a guide RNA that directs the Cas9 protein to the desired location in the genome, and a repair template that helps guide the process of repairing the DNA break.\nHere's how the process works:\n1. The CRISPR-Cas9 system is introduced into the cell, either through viral delivery or direct transfection.\n2. The guide RNA locates the specific DNA sequence that needs to be edited.\n3. Cas9 cuts the DNA at the chosen location.\n4. The cell's own repair machinery then repairs the DNA break. Scientists can provide a repair template, which can either disrupt the function of the gene being edited, or introduce a new mutation.\nThe potential applications of CRISPR-Cas9 technology are vast, including treating genetic diseases, understanding gene function, and creating genetically modified organisms (GMOs). For example, CRISPR-Cas9 could be used to treat genetic diseases by editing out harmful mutations, and to create GMOs that are resistant to certain viruses or have improved nutritional value.\nHowever, CRISPR-Cas9 technology raises ethical concerns, as gene editing can have long-term effects on future generations and could be used for unethical purposes, such as \"designer babies.\" It's important that gene editing is done responsibly and with caution, and that appropriate oversight and regulations are in place.", "answer_id": "Yqfg2saKSNPauCS8YdsjdD", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 17, "text": "Vaccinations work by introducing a harmless piece of a virus or bacteria to the body, which prompts the immune system to produce antibodies to fight the invader. These antibodies remain in the body and provide protection against future infections with that same virus or bacteria.\nHerd immunity is a concept in which a critical mass of individuals within a population are immune to a particular disease, either through vaccination or naturally acquired immunity, so that the disease cannot spread effectively through the community. This is because when a sufficient proportion of people are immune, the virus or bacteria cannot find enough susceptible individuals to infect, and the disease dies out.\nHerd immunity can be achieved either naturally, through the spread of immunity from person to person, or through vaccination, which artificially boosts immunity levels. The level of herd immunity required to protect a community depends on the infectiousness of the disease and the susceptibility of the population.\nIt's important to note that not everyone can be immunized, such as infants who are too young to be vaccinated, people with compromised immune systems, and some individuals who experience side effects from vaccines. Herd immunity provides protection to these individuals who are not able to be vaccinated, as well as to the broader community.", "answer_id": "gKd2BPWp7HG9PTDdgS7HT8", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 18, "text": "Social media platforms have had a significant impact on the way people consume and share news. They have made it easier for people to access news from a variety of sources and to share news stories with their social networks. This has led to a greater spread of information and has made it easier for news organizations to reach larger audiences.\n\nHowever, social media platforms have also made it easier for misinformation to spread. Misinformation can range from simple mistakes to deliberate falsehoods spread for political or other purposes. The speed and ease with which information can be shared on social media can make it difficult to fact-check and debunk false information, leading to its widespread acceptance.\n\nThis has led to concerns about the impact of social media on the integrity of elections and other democratic processes. It has also raised questions about the role of these platforms in shaping public discourse and opinions.\n\nIn order to address these concerns, social media platforms have implemented measures such as fact-checking tools and stricter policing of misinformation. However, the effectiveness of these measures is still a subject of debate, and the role of social media in shaping the news ecosystem continues to be a subject of study and concern.", "answer_id": "CyZSkdkGWmjh8ZgxrQ2f66", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 19, "text": "Cultural, social, and economic factors all play a role in shaping people's food choices. For example, cultural tradition, social norms, and personal preferences can influence what foods people choose to eat, while economic factors such as affordability and availability can also play a role.\n\nTo promote healthier diets, it is important to understand these factors and how they can be leveraged to encourage people to make healthier food choices. For example, education campaigns that celebrate cultural and regional foods can help people associate healthy eating with positive cultural identities and traditions. Social media campaigns that encourage people to share healthy recipes and meal ideas can also help to normalize healthy eating behaviors.\n\nIn addition, it can be helpful to make healthy foods more affordable and widely available, particularly in low-income communities. This can be done through initiatives such as farmer's markets, subsidies for fruits and vegetables, and food education programs.\n\nUltimately, promoting healthy eating requires a holistic approach that considers not just individual factors, but also the broader social and economic context in which people make their food choices. By understanding and addressing these factors, it is possible to encourage people to adopt healthier diets and lead happier, healthier lives.", "answer_id": "LwA42YWwqvdkStLZEUbmm9", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 20, "text": "Natural selection is the process by which organisms with characteristics that are advantageous for survival and reproduction are more likely to pass those characteristics on to their offspring, while those with characteristics that are not as advantageous are less likely to survive and reproduce. Over time, this leads to the accumulation of beneficial traits in a population and the evolution of new species.\nHere's how natural selection contributes to the evolution and adaptation of species:\n1. Variation: Natural selection relies on genetic variation within a population. Without genetic variation, there would be no variation in traits to select from.\n2. Reproduction: Organisms that have advantageous traits are more likely to survive and reproduce, passing those traits on to their offspring.\n3. Survival: Organisms without advantageous traits are less likely to survival, leading to a lower likelihood of reproduction.\n4. Directional change: Over time, natural selection can lead to a gradual directional change in the traits of a population, as beneficial traits become more common and less advantageous traits become rarer.\n5. Speciation: If the process continues long enough, it can lead to the development of new species, with traits that are specifically adapted to their environment.\nNatural selection plays a key role in shaping the traits of organisms and the evolution of new species over long periods of time. It's one of the mechanisms that explains how the diversity of life on Earth has arisen.", "answer_id": "ajfFPpHDdMiSp3PVfsUFbE", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 21, "text": "My lord, My lady, and esteemed guests, I am humbled to stand before you this evening as a humble knight, sworn to serve and protect this fair kingdom. My name is Sir [insert name], and I hail from [insert home town], where I was raised and trained in the ways of chivalry and knighthood. I have been blessed with the honor of wearing this shining armor, and bearing this trusty sword, which I have wielded in defense of our realm and its people. I am grateful for the opportunity to serve His/Her Majesty, and to stand among so many esteemed knights and ladies this evening. May we always remember our duty to uphold the ideals of honor, bravery, and loyalty, and may we continue to defend this great kingdom for generations to come.", "answer_id": "3n8npKQKxgHEbEmf2K6AwF", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 22, "text": "As a pirate captain, I would motivate my crew to search for hidden treasure by reminding them of the rewards that await us if we are successful. I would speak of the riches and power that we could acquire, and how it would make us the envy of every other pirate sailing the seven seas. I would also remind them of the dangers and challenges that we would face on our quest, and how our bravery and determination would make us the strongest and most feared pirate crew on the ocean. I would paint a vivid picture of the glittering prizes that await us, and how our hard work and perseverance will be rewarded tenfold. And, of course, I would remind them of the punishment that would befall any who hesitated or showed cowardice in the face of danger. With these words, I would rally my crew, and set sail with renewed vigor and determination, our eyes fixed on the prize, our hearts pounding with excitement, and our spirits high, ready to conquer the world! Arrr!", "answer_id": "LfeqCy9vR3kCaJiQV4Fyqf", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 23, "text": "As an AI language model, I do not experience emotions or have the capability to love. However, if I were a Shakespearean character, I might deliver a soliloquy something like this:\n\"Oh, fair maiden, with thy radiant smile\nAnd thy eyes like unto emeralds rare,\nMy heart doth beat with love's sweet guile\nFor thee, my own, my love so pure.\n\nThou art the very essence of all that's good\nMy heart doth leap with joy, just to be near\nThy tender touch doth heal my weary soul\nAnd with thy presence, my spirit doth roll\n\nForevermore, let my love be thine\nA bond of love, 'mongst us entwined\nTogether, we shall weather life's storms\nAnd forever, our love shall swell and bloom\n\nThus speaks the heart of thine most humble servant\nPledging his love, his devotion, his allegiance\nMay this love forevermore be entwined\nIn the hearts of thee, my own, my love so rare.\"", "answer_id": "JrnFfmnsuykbTkFbUnei6k", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 24, "text": "As a superhero, my origin story is quite special. It all started when I was just a normal person, living a normal life. But then, one day, I was exposed to a mysterious substance that gave me special abilities.\n\nI discovered that I had incredible strength, agility, and the power of flight. At first, I was scared and didn't know what to do with these abilities. But as I started to explore them more, I realized that I could use them to help others.\n\nI began patrolling the city, looking for ways to use my powers for good. I soon realized that there were many people in need, and I knew that I could make a difference. I started to rescue people from danger, fight against injustice, and protect the innocent.\n\nAs I continued to use my powers for good, I began to realize that I had a bigger purpose. I became a symbol of hope for the people of the city, and they started to look up to me as a hero.\n\nAnd that's how I became a superhero. I may not have been born with these abilities, but I embrace them and use them to make a difference in the world. And I'll continue to use them to protect and help those in need, no matter where life takes me.", "answer_id": "BDBSC5P2DE3E5LuB9FX7tn", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 25, "text": "As a time traveler from the year 3000, I would tell people about many technological advancements that have been made in the future. some of these advancements include:\n\n1. Virtual Reality: Virtual Reality technology has advanced to the point where it is indistinguishable from reality. It has become a ubiquitous form of entertainment, education, and communication.\n2. Artificial Intelligence: AI has become an integral part of daily life, from controlling appliances and vehicles, to assisting with medical diagnoses and research.\n3. space travel: space travel has become much more accessible and affordable, allowing for more frequent missions to space and the exploration of other planets.\n4. Food and Water production: Advanced methods for food and water production have been developed, making it possible to sustainably feed and hydrate the growing population.\n5. Transportation: Transportation has become more efficient and clean, with flying cars and hyperloops becoming common modes of transportation.\n6. Medicine: Advances in medicine have led to longer, healthier lives, with cures for many diseases and illnesses.\n7. Energy: New renewable energy sources have been discovered and developed, making energy more sustainable and clean.\n8. Education: Education has become more personalized and accessible, with technology allowing for more effective and engaging learning experiences.\n9. Climate change: New technologies have been developed to help combat climate change, such as carbon capture, and new forms of energy.\n\nThese are just a few examples of the technological advancements that have been made in the year 3000.", "answer_id": "TS97KLnyki7XWYFdSdbZiY", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 26, "text": "\"And it's a championship game to remember folks! With only seconds left on the clock, the underdogs have managed to take the lead! The ball is thrown deep, and the receiver makes an spectacular catch, dodging defenders all the way to the end zone! Touchdown! The crowd goes wild as the underdogs take the lead with only seconds left on the clock! They've pulled off the impossible, and are the champions of the world! Unbelievable play, unbelievable game! The underdogs have done it! They've done it!!\"", "answer_id": "9ATk4tAxBoe267rQUDrgYD", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 27, "text": "Ladies and Gentlemen, I am honored to be here today representing my signature dish. My dish is a culinary masterpiece that combines the finest ingredients from around the world. It is a fusion of flavors that will tantalize your taste buds and leave you longing for more.\n\nMy dish is a juicy and succulent braised short rib, slow cooked to perfection and infused with aromatic spices that lend a warm and comforting flavor. The short rib is served over a bed of fluffy and fragrant risotto, made with Arborio rice and infused with the creamiest of Stocks. The dish is garnished with baby vegetables, saut\u00e9ed mushrooms, and a reduction of pan juices that add a tangy and savory flavor.\n\nMy dish is more than just a meal, it is a culinary experience that will leave you spellbound. The flavors are bold yet harmonious, and every bite will leave you yearning for more. My dish is a reflection of my passion for cooking and my love for good food. It is a dish that I have perfected over many years of dedicated work, and I am proud to share it with you today.\n\nI hope that you will enjoy my dish as much as I have enjoyed creating it. Thank you for your time and consideration.", "answer_id": "C4zL3pUsGj2Z8U7Tm2nzDN", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 28, "text": "As a language model AI, I do not have emotions but here is how one might describe their emotions and the view from the top of Mount Everest:\nReaching the summit of Mount Everest is a monumental achievement and one of the most thrilling moments of a mountain climber's life. The sense of accomplishment and elation is indescribable, as months or even years of training, preparation, and hard work have led to this moment.\nStanding atop the highest peak in the world, the view is breathtakingly beautiful, with panoramic vistas of snow-capped peaks stretching as far as the eye can see. The clear, crisp air allows for unobstructed views of the surrounding Himalayan ranges, including Mount Lhotse, Mount Makalu, and other nearby peaks.\nThe descent from the summit can be just as challenging as the ascent, but the feeling of satisfaction and fulfillment makes it all worthwhile. The memory of standing atop Mount Everest will remain with a climber forever.", "answer_id": "CTi8ZRuHoAzRQjMRAU3mBN", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 29, "text": "As a space colonist on Mars, my daily life would be very different from what it is on Earth. I would live in a settlement, along with other colonists, and our days would be filled with a variety of tasks and responsibilities.\nOne of the biggest challenges we would face is the harsh Martian environment. The planet has a much thinner atmosphere and lower gravitational pull than Earth, which would make it more difficult to breathe and move around. We would also have to contend with extreme temperatures, radiation exposure, and dust storms.\nTo survive, we would need to rely on a closed-loop life support system, which would recycle water and air, and we would need to be very efficient with our resources. We would also need to be very careful not to contaminate the Martian environment with Earth microbes, which could have adverse effects on the existing Martian microbial life.\nAnother challenge we would face is the isolation and loneliness of living on another planet. It would be difficult to be cut off from our loved ones and Earth, and we would need to have a strong sense of purpose and drive to keep us going.\nOverall, living on Mars would be a challenging and rewarding experience, but it would require a lot of hard work and dedication. We would need to be self-sufficient and resourceful, and we would need to work together as a team to overcome the obstacles we would face.", "answer_id": "8chhW8gPzM6zHUN6VAqpym", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 30, "text": "\"I am John, and I am one of the few survivors in this post-apocalyptic world. It's been years since the disaster struck, and most of civilization has been destroyed. I was lucky enough to find shelter in an abandoned fallout shelter, and that's how I've been able to survive this long.\n\nI scavenge for food and supplies, venturing out only when absolutely necessary. The world outside is harsh and full of dangers, from raiders looking for loot to harsh radiation levels. But I've managed to find some allies along the way, people who have also managed to survive.\n\nThere's Maria, who lives in a nearby settlement and helps me trade goods. And then there's Jack, who is a skilled hunter and helps me find food. Together, we've been able to form a tight-knit community, and our survival is dependent on each other.\n\nWe've also come across other groups of survivors, some of whom are friendly and some of whom are not. It's a tough world out there, and trust is hard to come by. But we've managed to band together and form a makeshift society, where we can help each other survive and rebuild.\n\nI don't know what the future holds, but I'm grateful for the allies I've encountered in this post-apocalyptic world. Together, we're working towards a better tomorrow.\"", "answer_id": "Pxj7ZVsvQ9HMQnRVMh7os4", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 31, "text": "To determine if a restaurant is popular among locals or mainly attracts tourists, you can look for a few key indicators:\n1. parking lot or street parking: if there is a lot of parking right out front, it's likely locals are eating there, while tourists are more likely to drive and park in designated lots or garages.\n2. reservation policy: if the restaurant only takes reservations for parties of 4 or more, it's probably popular with locals who are used to eating there with a group. Tourists, on the other hand, may be more likely to dine solo or with one other person.\n3. menu prices: if the menu features high-end dishes and price points, it's more likely to attract tourists who are used to paying those kinds of prices at home.\n4. local crowd: look at the demographic of the customers eating there. If it's predominantly made up of local retirees, families, or businesspeople, it's likely a local spot.\n5. online reviews: read online reviews to see if locals are mentioning the restaurant in passing or if tourists are raving about it.\nKnowing whether a restaurant is popular among locals or tourists can be useful for several reasons:\n1. Ambiance: Local hangouts tend to have a more relaxed atmosphere, while tourist hotspots may be more bustling and noisy.\n2. Prices: Local joints are often less expensive, while tourist spots may have higher prices.\n3. Recommendations: If you're a local and you're looking for a place that's popular with other locals, you'll want to seek out spots that are frequented by people who live in the area.\n4. Authenticity: Local spots are more likely to serve authentic regional cuisine, while touristy places may cater more to the tastes of out-of-towners.\n5. Crowds: If you're looking to avoid crowds, you may want to steer clear of tourist hotspots.", "answer_id": "NCUhSMqzESRJJVDKHQ7XPB", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 32, "text": "Here are some subtle clues that someone might be pretending to understand a topic or conversation:\n1. Repeating the same points or phrases without adding new information or insights.\n2. Asking simplistic or naive questions that show a lack of understanding of the topic.\n3. Using jargon or technical terms incorrectly or out of context.\n4. Making sweeping generalizations or making claims that are not supported by evidence.\n5. Interrupting or speaking over others, particularly when they are trying to provide more information or clarification.\n6. Using nonverbal cues such as smirking or making dismissive facial expressions while others are speaking.\n7. Avoiding eye contact or fidgeting nervously.\n8. Repeatedly changing the subject or redirecting the conversation away from the topic at hand.\n9. Using deflection or evasion tactics to avoid answering questions or providing straight answers.\n1voir 10. Googling the topic while pretending to know what's going on.\nIt's important to note that not everyone who exhibits these behaviors is necessarily pretending to understand. They might be genuinely confused or unsure, or they might be intimidated or shy about admitting their confusion. However, these clues can be red flags that someone is not truly engaging with the conversation or topic at hand.", "answer_id": "mEvCEzVGnENJAMzs2ioTmp", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 33, "text": "There are several reasons why someone might choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app:\n1. Reliability: GPS devices and smartphone apps can sometimes be unreliable, especially in areas with poor reception or limited cell service. Paper maps are less likely to have signal issues or other technical problems.\n2. Battery life: GPS devices and smartphones can drain batteries quickly, especially when in use for extended periods of time. Paper maps are battery-free and can be used without worrying about running out of power.\n3. Availability: Paper maps are universally available, while GPS devices and smartphone apps may not work properly or be available in certain areas.\n4. Perspective: GPS devices and smartphone apps can lock users into a single perspective or route, while paper maps offer more flexibility and allow users to see surrounding areas and make adjustments as needed.\n5. Personal preference: Some people simply prefer paper maps to GPS devices or smartphone apps, either because they are more familiar with them or because they find them more user-friendly.\n6. Privacy: GPS devices and smartphone apps can track users' movements and whereabouts, while paper maps do not have this capability.", "answer_id": "CoFnjLgW5Eens9JhnpiJHZ", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 34, "text": "There are several ways to determine if a person is genuinely interested in a conversation or simply being polite:\n\n1. Pay attention to their nonverbal cues: If someone is genuinely interested in a conversation, they will likely make eye contact, lean towards you, and express positive body language. On the other hand, if they are being polite, they may not show as much nonverbal engagement.\n2. Listen to their responses: If someone is genuinely interested in a conversation, they will likely ask follow-up questions and show an interest in what you are saying. If they are being polite, their responses may be more general and less engaged.\n3. Observe their level of participation: If someone is genuinely interested in a conversation, they will likely be more active in the conversation, contributing their own thoughts and ideas. If they are being polite, they may be less likely to participate.\n4. Pay attention to their tone of voice: If someone is genuinely interested in a conversation, their tone of voice will likely be more engaged and interested-sounding. If they are being polite, their tone may be more neutral or even bored.\n5. Ask them directly: If you are unsure whether someone is genuinely interested in a conversation or simply being polite, you can ask them directly. For example, you could say something like \"I'm not sure if you're really interested in this conversation or just being polite. Can you tell me your honest thoughts?\"\n\nKeep in mind that it can be difficult to always tell if someone is genuinely interested in a conversation or simply being polite, and it's always a good idea to be respectful and considerate in your interactions with others, regardless of their level of engagement.", "answer_id": "aQ9mM2wfmHSge2KvuQ8hmx", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 35, "text": "There are several reasons why someone might prefer to shop at a small, locally-owned business instead of a large chain store, even if the prices are higher:\n1. Supporting the local community: Shopping at a small, locally-owned business directly supports the local economy and the people who live there.\n2. Unique products: Small, locally-owned businesses often offer unique and specialized products that are not available at large chain stores.\n3. Personalized service: Small businesses are often staffed by knowledgeable and friendly employees who are more likely to provide personalized service and attention to their customers.\n4. Sustainability: Small businesses are often more invested in sustainable practices and using environmentally-friendly products, which can be important to some shoppers.\n5. Customer loyalty: Shopping at a small business can foster a sense of loyalty and connection to the community, which can be valued above the cheaper prices offered by large chains.\n6. Preservation of heritage: Small businesses often play a role in preserving local heritage and culture, and their closure can lead to the loss of local identity and character.\nOverall, while price may be a factor for some shoppers, many people are willing to pay a little more to support their local community, receive personalized service, and access unique products.", "answer_id": "eM5S86H3bXTGLQcVW59XsD", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 36, "text": "Assessing the credibility of a source of information is important to ensure that you are basing your beliefs and decisions on reliable and accurate information. Here are some tips on how to assess the credibility of a source without relying solely on the reputation of the author or publisher:\n1. Check the date of the information: Information that is outdated may not be credible, as it may no longer be relevant or accurate.\n2. Look for sources: Look for other sources that support the information in the article or post. If the information is not supported by other sources, it may be less credible.\n3. Evaluate the language: Look for clear, concise, and unbiased language. If the language is sensationalized or emotionally charged, it may be less credible.\n4. Check the author's credentials: Look for authors who are experts in their field and have a track record of reliable information.\n5. Assess the source's bias: Look for sources that are impartial and free from bias. Sources with a clear bias may not provide a balanced view of the issue.\n6. Check the URL: Be wary of sources with URLs that end in \".com\" or other non-reputable extensions. Reputable sources typically use .edu or .gov extensions.\n7. Look for peer review: Look for sources that have undergone peer review, which is a process where experts in the field review and critique the information.\n8. Be critical: Be skeptical of information that seems too good (or too bad) to be true, and be critical of any sources that do not stand up to scrutiny.\nBy following these tips, you can critically evaluate the credibility of a source of information and make informed decisions about the information you choose to believe and act on.", "answer_id": "MpBrYa9J2zQy9NGi2dvKp8", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 37, "text": "Some people may enjoy the sensation of being scared because it can be a thrilling and exciting experience. It can also be a way to experience a rush of adrenaline and a feeling of being alive. Additionally, some people may find that being scared can be a way to cope with their own fears and anxieties.\n\nOn the other hand, some people may avoid experiences that scare them because they find them unpleasant or stressful. For these individuals, being scared may activate their fight or flight response, releasing stress hormones such as cortisol and adrenaline that can cause physical and psychological distress. Additionally, some people may have a phobia of certain things that scare them, which can cause them to experience intense fear and anxiety.\n\nIt is also worth noting that some people may be more naturally predisposed to enjoying scary experiences, while others may not. This can be due to a variety of factors, including genetics, personality traits, and life experiences.", "answer_id": "i8QZzVJo2WkTLc7WMv4bNm", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 38, "text": "Observing the behavior of other people in a social situation can provide clues about cultural norms and expectations because culture is communicated and learned through social interactions. People within a culture tend to share common behaviors, norms, and expectations that are learned and passed down through socialization.\nWhen observing social behavior, here are some clues about cultural norms and expectations:\n1. Nonverbal communication: Culture influences nonverbal communication, such as eye contact, gestures, and body language. Differences in nonverbal communication can signal differences in cultural norms and expectations.\n2. Social distance: Culture influences social distance, or how personal space is used and respected. Differences in social distance can signal differences in cultural norms and expectations.\n3. Greetings: Culture influences how greetings are exchanged, such as shaking hands, bowing, or kissing. Greetings can provide clues about cultural norms and expectations.\n4. Interaction patterns: Culture influences how people interact with each other, such as individualistic or collectivistic cultures. Interaction patterns can provide clues about cultural norms and expectations.\n5. Values and beliefs: Culture influences values and beliefs, which can be reflected in social behavior. For example, if a culture values punctuality, people within that culture may be more likely to arrive on time.\nBy observing social behavior, we can gain clues about cultural norms and expectations, and in turn, learn more about the culture and its values, beliefs, and customs.", "answer_id": "HP2CYZ3HJWMcGp6QF9qbq6", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 39, "text": "There is no one answer to this question, as it is a matter of personal opinion and priorities. Some people believe that exploring space is a morally praise-worthy goal because it can lead to scientific discoveries, technological advancements, and increased understanding of our universe. Others believe that we have a moral obligation to focus on solving Earth's problems first, as we have the ability and resources to make a positive impact on our own planet and its inhabitants.\nBoth goals are important and can be pursued simultaneously to some extent. Space exploration can bring benefits to the humanity like scientific knowledge, technological advancements, and possibility to find new resources. At the same time, Solving Earth's problems like climate change, poverty, and inequality are also very important and should be given high priority. It's a question of balancing priorities and resources.", "answer_id": "XrMYXEWHff6jXjjJWquXfW", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 40, "text": "In a world where automation is becoming increasingly prevalent, it is important to strike a balance between prioritizing job creation and technological progress. Both job creation and technological progress are important for a thriving society, but they must be balanced to ensure that the benefits of automation are shared equitably.\n\nJob creation: Jobs are essential for individuals to earn a living and support themselves and their families. In a world where automation is displacing jobs, it is important to prioritize job creation so that individuals can continue to support themselves and their families. Job creation can be achieved through policies that encourage businesses to invest in new technologies and hire more workers, as well as through training programs that equip workers with the skills they need to thrive in an automated economy.\n\nTechnological progress: Technological progress is essential for driving innovation and improving productivity, which can lead to higher living standards and economic growth. Automation can lead to increased productivity, which can free up workers to focus on more high-skilled tasks and lead to more efficient and effective production processes. However, it is important to ensure that technological progress is not prioritized at the expense of job creation and social equity.\n\n Balance: In order to strike a balance between job creation and technological progress, it is important to prioritize policies that encourage job creation while also investing in training programs that equip workers with the skills they need to thrive in an automated economy. Additionally, it is important to ensure that the benefits of technological progress are shared equitably so that all members of society can benefit from automation.", "answer_id": "eeS6qNLeX6AScyGrS5pwrc", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 41, "text": "The average human blinks anywhere from 12 to 20 times per minute, which equates to around 2 million blinks per year. The exact number of blinks per lifetime is difficult to determine, as it depends on factors such as age, health, and lifestyle.\nThe frequency of blinking is regulated by the nervous system and is involuntary, meaning that we don't consciously control when we blink. Blinking helps to lubricate the eyes and keep them moisturized, and it also helps to protect the eyes from foreign substances and other potential hazards.\nThe number of blinks per day and per lifetime is influenced by several factors. For example, infants blink less than adults because their eyelids are not yet fully developed. As we age, our blinking frequency tends to decrease slightly. Additionally, factors such as computer use or other activities that require prolonged screen viewing can lead to dry eye and increased blinking frequency.\nIn short, the number of blinks per lifetime varies based on factors such as age, health, and lifestyle, and it's difficult to determine an exact number. However, blinking is an important process that helps to keep our eyes healthy and protected.", "answer_id": "an76qPuSJ4TQDWvNXdcJ2w", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 42, "text": "The number of atoms in a grain of salt can vary depending on the size of the grain, but let's assume a typical grain of salt is about 600 micrometers ( micrometers) in diameter, which is a reasonable estimate.\n\n1. First, we need to convert the grain's mass to atoms. the mass of a grain of salt can range from about 100 milligrams (mg) to 300 mg, so let's say it is 200 mg, which is a common mass for a grain of salt.\n2. Next, we need to know the atomic weight of the elements that make up the grain of salt. Table salt, which is NaCl (sodium chloride), has a molar mass of 58.44 g/mol. Since 1 mole is equal to 6.022 x 10^23 atoms or molecules, we can divide the molar mass by Avogadro's number to get the number of atoms in 200 mg of NaCl:\n6.022 x 10^23 atoms/mol / 58.44 g/mol = 1.039 x 10^23 atoms/mg\n3. Now we can multiply the number of moles in 200 mg of NaCl by the number of atoms per mole:\n200 mg x 1039 atoms/mg x 1 mol/ 200 mg = 1.039 x 10^22 atoms\n4. Finally, we can convert this number to micrometers:\n1.039 x 10^22 atoms x ( 1 micrometer)/( 10^-6 m) = 1.039 x 10^20 atoms\n5. So, the number of atoms in a grain of salt (assuming it is 600 micrometers in diameter) is approximately 1.039 x 10^20 atoms.\n\nPlease note that this is just an estimate, and the actual number of atoms in a grain of salt can vary depending on its purity and other factors.", "answer_id": "DYnmt5xTTTkLuyyBxhmDsq", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 43, "text": "The number of lightning strikes that occur on Earth each day is difficult to estimate precisely, as it can vary widely based on location and weather conditions. However, we can make some educated guesses based on average values and known factors that influence lightning activity.\nFirst, let's consider that lightning is a form of electrical discharge that occurs when there is a difference in electrical potential between two points. This most commonly happens during thunderstorms, when the charged water droplets in clouds rub together and create an electrical imbalance.\nNext, we can estimate that on average, a thunderstorm produces about 100 lightning strikes per square mile per year. This number can vary based on the frequency and intensity of thunderstorms in a given region.\nNow, let's consider the Earth's surface area. The planet Earth has a total area of about 197 million square miles. If we multiply this by the number of lightning strikes per square mile per year, we get an estimate of about 19.7 trillion lightning strikes per year.\nHowever, this is still an estimate, as it doesn't take into account the fact that not all lightning strikes are necessarily visible, especially at night or in areas with low cloud cover.\nFurthermore, some regions such as Equatorial areas, experience high lightning activity throughout the year, while in other regions like Polar areas, lightning activity is low.\nFinally, it's important to note that lightning activity can also be influenced by factors such as global climate change, which could affect the frequency and intensity of thunderstorms in different regions.\nSo, while we can make some educated guesses about how many lightning strikes occur on Earth each day, the actual number is likely to be complex and variable, and", "answer_id": "MbcVvnW3Eh9wZi34SzYawn", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 44, "text": "It would take a very large number of balloons to lift a house like in the movie \"Up.\" The movie depicts a house being lifted into the air using a large number of helium balloons, and the exact number of balloons required is never specified. However, we can make an estimate based on some real-world principles and calculations.\n1. Buoyancy: Helium balloons float because helium is less dense than air. As a result, the upward force exerted on the balloons by the surrounding air is greater than the weight of the balloons themselves, causing them to float. In order for a house to float, the upward force exerted on the balloons would need to be greater than the weight of the house.\n2. Volume: The volume of helium required to lift a house would be much larger than the volume of the balloons themselves. Helium is less dense than air, so in order to lift a house, you would need a large volume of helium to displace the air beneath the house and provide enough buoyancy.\n3. Weight of the house: A typical house weighs anywhere from 50 to 100 tons or more, depending on the size and materials used. To lift such a weight, you would need a lot of helium, which would require a lot of balloons.\n4. Balloons: The average helium balloon has a volume of around 1 cubic meter. Using this as a rough estimate, we can calculate the number of balloons needed to lift a house using the following formula:\nNumber of balloons = Volume of house / Volume of balloon\nLet's assume a conservative estimate of 100 tons for the weight of the house and 1 cubic meter for the volume of each balloon. To lift the house, you would need:\nNumber of balloons = 100 tons / 1 cubic meter per balloon = 100,000,000 balloons\nThis is a very rough estimate and does not take into account other factors, such as the shape of the house, the materials it is made of, and the specifications of the balloons used. In reality, it would be practically impossible to use so many balloons to lift a house, as it would require an unimaginable amount of helium and a herculean effort to inflate and tie them all off.\nIn conclusion, it would take an insanely large number of balloons to lift a house like in the movie \"Up,\" and it is not physically possible to do so in reality.", "answer_id": "4oSKRo3DtcNtE8gRbrSJPg", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 45, "text": "The number of text messages sent globally in a minute fluctuates based on various factors such as time of day, day of the week, and geographic location. However, as of my knowledge cutoff date in September 2019, the estimated number of text messages sent per minute globally was around 16 million.\nHere's my reasoning:\n1. First, let's consider the number of text messages sent per day. As of my knowledge cutoff date, the estimated number of text messages sent per day was around 19 billion.\n2. Next, let's divide that number by the number of minutes in a day. There are 1440 minutes in a day, so the number of text messages per day divided by the number of minutes per day gives us an estimate of around 13.2 text messages per minute.\n3. However, this number is an average and fluctuates based on various factors such as time of day and day of the week. For instance, the number of text messages sent per minute is likely to be higher during peak hours in regions with high mobile usage.\n4. To estimate the number of text messages sent per minute globally, we can use this 13.2 text messages per minute as a starting point and adjust based on the distribution of mobile usage across different regions.\n5. According to data from the International Telecommunications Union (ITU), the number of mobile cellular subscriptions worldwide was around 5.3 billion as of 2018. This means that there are many people who have multiple SIM cards or use multiple devices, so the actual number of text messages sent per minute may be higher than this estimate.\n6. Combining these factors, we can estimate that around 16 million text messages are sent globally per minute.\nIt's worth noting that this is just an estimate, and the actual number of text messages sent per minute may be higher or lower depending on various factors such as technological advancements, changes in mobile usage patterns, and other factors that impact mobile communication trends.", "answer_id": "bhh9SFsYgquUPeC2qBKMJw", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 46, "text": "It is difficult to estimate the exact number of words spoken daily on Earth, as it is a constantly changing and fluctuating number. However, there are some factors that can be taken into account to give a rough estimate:\n1. Languages: There are over 7,000 languages spoken around the world, and many of them are spoken daily by a large number of people.\n2. Speech per person: On average, a person speaks between 100-200 words per minute, and an adult spends about 6-8 hours sleeping, leaving 16-18 hours awake and speaking.\n3. World population: As of 2021, the world population is estimated to be around 7.9 billion people.\nCombining these factors, we can estimate that the daily spoken word count is likely in the trillions. However, it is important to note that this number is a rough estimate and can vary significantly depending on the method of calculation and other factors.\nIt is also worth noting that this estimate does not take into account written communication, such as text messages, emails, and social media posts, which would likely add significantly to the daily word count.", "answer_id": "oKMFEsBBp4SEJiVPRQZN2N", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 47, "text": "It is difficult to give a specific number of snowflakes that fall during a typical winter, as the number can vary greatly depending on factors such as snowfall rate, temperature, humidity, and wind speed. Additionally, individual snowflakes are microscopic and too small to be accurvoir counted accurately.\nHowever, we can give an estimate based on some averages and assumptions.\nThe average snowflake forms when water vapor in the atmosphere condenses around a particle such as dust, soot, or pollen, and freezes into ice crystals. These ice crystals then bond together to form a snowflake.\nSnowfall rate is the number of snowflakes that fall per square meter per unit of time. The average snowfall rate is around 2-5 cm per hour, which means that during a typical winter day, there could be around 2-5 million snowflakes falling per square meter.\nIf we assume that each snowflake is unique and has not been counted before, then during a typical winter day, there could be around 2-5 billion snowflakes falling per square meter globally.\nHowever, this number is based on several assumptions and averages, and actual numbers can vary greatly depending on location, time of day, and other factors.\nFurthermore, this number is too small to be counted accurately, and the number of snowflakes falling during a winter season is better described in terms of snowfall totals, which can range from just a few inches to several feet depending on location and season.", "answer_id": "XXXVQo5w5XCFiEuK6n3hUV", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 48, "text": "It is difficult to give an exact number of pages in all the books ever written because new books are still being published, and there are millions of books in existence. However, based on the number of books published each year and the average page count of a book, we can estimate that there are currently around 130 million books in existence.\nTo break down this estimate, let's assume that the average book has 200 pages. This is a rough estimate, but it works as a starting point. We can then multiple this number by the number of years that have passed since the invention of writing, which is around 5000 years. This gives us an estimate of 1,000,000,000 pages (or 1 billion pages).\nHowever, this estimate does not take into account the books that were written but have since been lost or destroyed, which were likely much higher in number than the books that still exist today. Additionally, it does not account for the books that will be written in the future.\nTherefore, it is impossible to give an exact number of pages in all the books ever written. The number is constantly changing, and the estimate will always be a rough approximation.", "answer_id": "Ysbx8mFxYva6TbHQGKL8K7", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 49, "text": "It is difficult to estimate the exact number of times the Earth has orbited the Sun since the beginning of life, as the exact timing of the start of life on Earth is not known. However, we can make some educated estimates based on certain astronomical events and geological markers.\n\n1. The Earth-Sun orbit: The Earth orbits the Sun once every 365.24 days, which equates to about 93 million miles ( 149.6 million kilometers).\n2. Estimating the age of life on Earth: Scientists estimate that life on Earth began about 3.5 billion years ago.\n3. Asteroid impacts and other geological markers: Asteroid impacts have been a major driver of geological change on Earth throughout its history. The impacts have led to extinctions, changes in climate, and other dramatic shifts in the planet's ecosystems. Asteroids have also been the source of many of the planet's resources, including oil, gas, and coal.\n4. stepping-stone model: The \"stepping-stone model\" of early life suggests that life began in a series of isolated, oxygen-free locations, such as volcanic vents, and then gradually moved to other locations over time.\n5. Mathematical model: We can use a mathematical model to estimate the number of Earth-Sun orbits since life began. We can start with the estimated age of life on Earth (3.5 billion years) and divide it by the length of a Earth-Sun orbit (365.24 days) to get the number of orbits per year: 3.5 billion / 365.24 = 9.85 billion orbits.\n6. Rounding: Round this number to 10 billion orbits.\n\nSo, based on these estimates, it would be around 10 billion Earth-Sun orbits since the beginning of life on Earth. It's important to note that this is an estimate, and the actual number could be different due to the uncertainties in the timing of the start of life and the various astronomical and geological events that have occurred over Earth's history.", "answer_id": "KenbYuKVVUVXe2stVtxkeF", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 50, "text": "It is difficult to estimate the exact number of songs that have been recorded throughout history, as it is a very broad and dynamic category that includes music from many different cultures and time periods. However, we can make some educated guesses based on a few factors.\nFirst, let's consider the number of songs that have been created. This number is likely to be very high, as music has been an important part of human culture for thousands of years, and virtually every culture has its own music traditions. In addition, with the rise of popular music in the 20th century, the number of songs created each year has increased exponentially.\nSecond, let's consider the number of these songs that have been recorded. Recording technology has been around for less than 150 years, so songs recorded prior to this time would not exist in a recorded form. However, recording technology has advanced rapidly in recent decades, making it easier and cheaper to record music. This means that the number of recorded songs has also increased exponentially.\nGiven these factors, it is safe to say that there are likely trillions of songs that have been created throughout history, and potentially billions of songs that have been recorded. However, these numbers are estimates and are difficult to pin down precisely.\nUltimately, the number of songs recorded throughout history is not as important as the impact that music has had on human culture and society. Music has been used for celebrations, rituals, healing, and communication, and has played a crucial role in shaping the identities and experiences of people around the world.", "answer_id": "ceWTK9jYWZq2Dd2H7KGkWY", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 51, "text": "It is difficult to say how exactly the Internet would have been invented during the Renaissance period, as the technological and social conditions were quite different back then. However, we can speculate on some possible outcomes.\n\nFirstly, the Internet would likely have been a more localized technology, with people communicating with each other through a series of interconnected systems that were physically located in the same town or city. This could have lead to more face-to-face interactions and communities that were more tightly knit together.\n\nSecondly, the dissemination of information would have been much slower, as the speed of communication would have been limited by the speed of messengers or the delivery of physical documents. This could have lead to a different kind of information ecosystem, where people relied more on oral tradition and written documents, and less on real-time updates and instantaneous communication.\n\nThirdly, the Renaissance period was characterized by a great flourishing of art, literature, and science, and the Internet would have likely been shaped by these same cultural currents. This could have lead to a more humanistic approach to communication, where the exchange of ideas was central, and technology was used to facilitate the sharing of knowledge and creativity.\n\nOverall, it is impossible to say exactly how the Internet would have been invented during the Renaissance period, but it is likely that it would have been quite different from the global network we have today.", "answer_id": "6Bbzs6YWyzPj52rZAfRPTt", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 52, "text": "The Aztecs were a Mesoamerican civilization that flourished in the 14th century CE, and their capital city Tenochtitlan is believed to have been one of the most populous cities in the world at that time. In 1519 CE, the Spanish conquistador Hern\u00e1n Cort\u00e9s arrived in Mexico, and his forces eventually captured Tenochtitlan in 1521 CE, marking the fall of the Aztec empire.\n\nIf the Aztecs had successfully repelled the Spanish conquistadors, it is difficult to say exactly how history would have unfolded. However, here are a few possible scenarios:\n\n1. The Aztecs may have continued to thrive as a civilization, perhaps even expanding their empire further.\n2. The Aztecs may have made contact with other civilizations, such as those in South America, and may have had a profound impact on the course of history in the Americas.\n3. The Aztecs may have been able to resist the spread of diseases, such as smallpox, which decimated their population following contact with the Spanish.\n4. The Aztecs may have been able to maintain their traditional beliefs and way of life, without the same level of influence from European powers.\n5. The Aztec empire may have been able to maintain its status as a major player in world affairs, and may have had a lasting impact on the course of history.\n\nIt is important to note that any alternate history is speculative, and we can only imagine how events may have played out differently.", "answer_id": "EG6h5jgpQ8wesKAX43nt9X", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 53, "text": "The Black Death, also known as the Plague, was a pandemic that devastated Europe in the 14th century, killing an estimated 25-30% of the population. If the Black Death had not occurred, it is difficult to predict exactly how history would have unfolded, but here are a few potential consequences:\n\n1. Demographic: The Black Death had a profound impact on demographics, leading to a reduction in population size. Without the Black Death, Europe's population may have continued to grow, leading to a different balance between rural and urban areas, and potentially leading to more technological and social advancements.\n2. Economic: The Black Death led to a major disruption in trade and commerce, as well as a decline in labor supply. Without the Black Death, the economy may have continued to grow and evolve, leading to different political and social structures.\n3. Political: The Black Death led to a redistribution of power away from feudal lords and towards urban centers. Without the Black Death, feudalism may have continued to dominate European society, leading to different political and social structures.\n4. Social: The Black Death led to a reevaluation of traditional social structures and beliefs. without the Black Death, society may have continued to evolve along traditional lines, leading to different cultural and religious practices.\n5. Medical: The Black Death led to a major advance in medical knowledge and practices, as people sought to understand and treat the disease. Without the Black Death, medical knowledge and practices may have continued to lag behind, leading to different health outcomes.\n\nOverall, it is difficult to predict exactly how history would have unfolded if the Black Death had not occurred. However, it is clear that the Black Death had a profound impact on European society and culture, shaping the course of history in ways that still reverberate today.", "answer_id": "J76ZgE27N2Pe77NcEBrWUV", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 54, "text": "Isaac Newton is best known for his groundbreaking work in physics, particularly his laws of motion and universal gravitation. However, he also made important contributions to other fields, including mathematics, optics, and 17th century religious controversies. Had Newton focused on biology instead of physics, it is difficult to say exactly what he would have accomplished in that field, but some possibilities include:\n1. Plant biology: Newton was interested in botany and conducted experiments on plant growth. He might have made more significant contributions to our understanding of plant biology, perhaps even developing theories on photosynthesis or plant growth.\n2. Animal behavior: Newton was also interested in animal behavior and wrote about his observations on the behavior of cats, dogs, and other animals. He might have made more significant contributions to our understanding of animal behavior had he focused on biology.\n3. Evolution: Newton died more than a century before Charles Darwin published his theory of evolution, so he wouldn't have made direct contributions to that theory. However, had he focused on biology, he might have made important observations that could have paved the way for Darwin's work.\n4. Medicine: Newton was fascinated by the human body and its biomechanics. He might have made more significant contributions to medicine had he focused on biology, perhaps developing theories on human physiology or disease transmission.\n\nWhile we can only speculate on what Newton might have accomplished had he focused on biology instead of physics, his contributions to science as a whole would still have been significant.", "answer_id": "hoFFABFFF7J8juv3AnoVWZ", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 55, "text": "The Beatles were one of the most influential and successful bands in music history, so it's difficult to imagine what the music industry would have looked like if they had never existed. However, here are a few possible scenarios:\n1. The British Invasion may have still happened, but it could have been spearheaded by another band, such as The Rolling Stones or The Who.\n2. The 1960s counterculture movement may have been shaped differently without the Beatles' music as a soundtrack.\n3. The way music was recorded and produced could have been different, as the Beatles were known for their innovative use of studio technology.\n4. The popularity of rock music may have been slowed or even derailed without the Beatles' influential music.\n5. Other bands and artists may have been influenced differently, leading to changes in musical genres and styles.\n6. The Beatles' breakup in 1970 could have had a major impact on the music industry, as they were one of the highest-selling bands in history at the time.\n7. The Beatles' solo careers may have been different without the success and influence of the band, and individual members may have pursued different projects.\nOverall, it's difficult to say exactly how the music industry and popular culture would have been affected without the Beatles, but their impact was undeniable and they are still loved and celebrated by fans around the world.", "answer_id": "8R5n2uZ4oq52QkWQsSS6No", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 56, "text": "If Alan Turing had not cracked the Enigma code during World War II, it is possible that the outcome of the war could have been different. The Enigma code was used by the Germans to encrypt their most sensitive military communications, and Turing's work at Bletchley Park in Britain allowed Allied codebreakers to decrypt German messages and gain valuable strategic insights.\nWithout Turing's codebreaking work, the Allies may have struggled to keep pace with German military movements and plans, which could have led to different outcomes in key battles and campaigns. This could have weakened the Allies' ability to respond to German aggression, and the war could have dragged on longer or taken a different trajectory.\nHowever, it's also important to note that codebreaking was just one aspect of the Allies' efforts during World War II, and other factors such as the production of tanks, planes, and other military hardware, as well as Allied strategic decisions, also played critical roles in the outcome of the war.", "answer_id": "ECtu3QVXVrNhssSyktKzkq", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 57, "text": "The Suez Canal is a man-made waterway that connects the Mediterranean Sea with the Red Sea, allowing ships to travel between Europe and Asia without having to go all the way around Africa. If the Suez Canal had never been constructed, ships would have had to sail around the Cape of Good Hope at the southern tip of Africa, which would have added many days and potentially weeks to the journey between Europe and Asia.\nThis would have had significant consequences for international trade, as goods would have taken longer to reach their destinations and would have been more expensive due to the increased fuel costs. It would also have had an impact on the global economy, as the ease of trade between Europe and Asia would have been severely limited.\nAdditionally, the Suez Canal has played an important role in military operations in the region, with several conflicts being fought near or on the canal. If the Suez Canal had never been built, the political landscape of the Middle East could have been very different, and the balance of power between different countries in the region could have been altered.\nOverall, the Suez Canal has had a profound impact on the world, both in terms of trade and geopolitics. Its construction has made trade between Europe and Asia much easier and cheaper, and it remains an important strategic site to this day.", "answer_id": "jb8ot3ucdF3RvzApEYKft6", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 58, "text": "It is difficult to say with certainty how the world would be different if the Maya civilization had never mysteriously collapsed, as there were many factors that shaped the course of history. However, here are a few possible scenarios:\n1. The Maya may have continued to thrive: The Maya civilization was known for its advanced astronomy, mathematics, and calendar systems. They also had a sophisticated agricultural system that allowed them to flourish in a harsh environment. If the civilization had not collapsed, it is possible that the Maya would have continued to develop these and other technologies, leading to further advancements and cultural flourishing.\n2. The region may have been more politically stable: The collapse of the Maya civilization led to a period of political instability in the region. If the Maya had not collapsed, it is possible that their political system could have been more resilient and could have helped to maintain political stability in the region.\n3. The Maya may have made contact with other civilizations: The Maya had contact with other civilizations, such as the Aztecs and the Incas, but their collapse may have limited further contact and exchange of ideas. If the Maya civilization had not collapsed, they may have been able to make more contact with other civilizations and potentially influence their development.\n4. The world may have been different culturally: The Maya civilization was known for its rich cultural traditions, including its art, architecture, and religious practices. If the civilization had not collapsed, it is possible that these cultural traditions would have continued to evolve and influence other civilizations in the region.\n5. The world may have been different environmentally: The Maya civilization was known for its agricultural practices, which included farming and land management techniques that allowed them to thrive in a harsh environment. If the civilization had not collapsed, it is possible that their agricultural practices could have influenced the way that other civilizations managed their resources and impacted the environment differently.", "answer_id": "cAjR8aDWRKVKbxKeqRRgvW", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 59, "text": "If Christopher Columbus had not discovered the Americas, the indigenous peoples of the Americas would have continued to live and thrive in their own cultures and societies without significant outside contact. The impact of Columbus's arrival would have been avoided, and the Americas would have developed differently without European colonization.\nThe Americas before Columbus were home to many different civilizations and cultures, each with their own unique customs, languages, and way of life. These civilizations had developed independently of each other for thousands of years and had a rich and diverse history. Without Columbus's discovery, these civilizations would have continued to flourish on their own, without outside influence.\nThe impact of Columbus's arrival had far-reaching consequences, including the introduction of diseases like smallpox, which decimated native populations, and the enslavement and forced labor of indigenous peoples. The colonization of the Americas also led to the displacement and genocide of indigenous peoples, and the theft of their lands and resources.\nWithout Columbus's arrival, it is possible that indigenous peoples would have continued to thrive and develop their own cultures and societies, and the history of the Americas would have been very different.", "answer_id": "7hKzFRQWWPVbr8H4wcbGdA", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 60, "text": "It is difficult to say exactly what would have happened if Vincent van Gogh had been a successful artist during his lifetime, as there are many factors that could have influenced his career and life. However, some possible outcomes include:\n1. Van Gogh may have been able to sell more paintings and achieve financial stability while he was still alive. This could have relieved some of the financial burden he faced during his lifetime, and may have allowed him to pursue his art more freely.\n2. Van Gogh may have been more widely known and recognized for his artistic talent during his lifetime, which could have led to more recognition and critical acclaim. This could have also influenced the direction of his artistic career, as he may have been more confident in his style and subjects.\n3. Van Gogh may have been able to connect with other artists and movements more easily, as he may have been more financially and professionally secure. This could have led to more collaboration and cross-pollination of ideas, which could have influenced the direction of his art.\n4. Van Gogh's life may have been different in other ways as well, such as where he lived, who he associated with, and how he was remembered after his death. Ultimately, it is impossible to know exactly how van Gogh's life would have been different if he had been a successful artist during his lifetime, but it is clear that his art and legacy have had a profound impact on the world.", "answer_id": "3vMtYzSrV5ARpYA9fGUe4H", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 61, "text": "Here is a simple C++ program that reads a text file line by line and counts the number of occurrences of a specific word in the file:\n\n1. #include \n2. #include \n3. #include \n4. using namespace std;\n5.int main()\n6. {\n7.FILE *fp;\n8. char line[100];\n9. int count = 0;\n11. fp = fopen(\"filename.txt\", \"r\");\n12. if (fp == nullptr)\n13. {\n14. cerr << \"Error opening file\" << endl;\n15. return 1;\n16. }\n17. while (fgets(line, 100, fp))\n18. {\n19. count += 1;\n20. }\n21. fclose(fp);\n22. return 0;\n23. }\n\nIn this program, we first include the necessary header files, such asiostream, fstream, and string. In line 5, we declare a variable named count to keep track of the number of occurrences of the specific word in the file. In lines 11-12, we open the file using fopen() function and check for errors using if-else statement. In lines 13-16, we use the while loop to read the lines of the file using fgets() function. Each line read is stored in the char array named line. In line 19, we simply add 1 to the count variable each time a new line is read. Finally, in lines 21-23, we close the file using fclose() function and return 0 to indicate successful program execution.", "answer_id": "nvbTnFLEyPSSgPdf2n6Mqp", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 62, "text": "Here's a Python implementation of the dynamic programming approach to find the longest common subsequence (LCS) of two input strings:\n\n```python\ndef lcs( str1, str2 ):\n m, n = len( str1 ), len( str2 )\n dp = [[1] * (n + 1) for _ in range( m + 1)]\n for i in range( m + 1 ):\n dp[i][0] = 1\n for j in range( n + 1 ):\n dp[0][j] = 1\n for i in range( 1, m + 1 ):\n for j in range( 1, n + 1 ):\n if str1[i - 1] != str2[j - 1]:\n dp[i][j] = dp[i - 1][j] + dp[i][j - 1]\n else:\n dp[i][j] = dp[i - 1][j]\n return dp[m][n]\n\n# Example usage\n str1 = \"abc\"\n str2 = \"defg\"\n print( lcs( str1, str2 ) ) # 3\n```\n\nThe `lcs` function takes two strings `str1` and `str2` as input and returns the length of the longest common subsequence between them. It uses dynamic programming to build a 2D list `dp` of size `(m+1) x (n+1)`, where `m` and `n` are the lengths of `str1` and `str2`, respectively. The first row and column of `dp` are initialized to `1`, as the LCS between the empty substrings `str1` and `str2` is just `1`. Then, for each non-empty substring `str1[i-1]` and `str2[j-1]`, the function calculates the LCS between them by using the `dp` matrix, which involves taking the minimum of the LCS between `str1[i-1]` and `str2[j-1]` and the LCS between `str1[i]` and `str2[j-1]`. The final length of the LCS is stored in `dp[m][n]`.", "answer_id": "RS2roGuiWf5jZfncgfHPeG", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 63, "text": "Here's an example of how you can implement a regular expression to validate an email address in Python:\n\n```import re\n\nEmail Addition = r\"^[a-zA-Z0-9._-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$\"\n\nemaill = input(\"Enter an email address: \")\n\nif re.match(Email Addition, emaill):\n print(\"Valid email address.\")\nelse:\n print(\"Invalid email address.\")\n```\n\nIn this example, the regular expression `^[a-zA-Z0-9._-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$ is used to match email addresses. The regex checks for:\n\n* A string that starts with one or more characters that are letters, numbers, periods, underscores, or hyphenates,^[a-zA-Z0-9._-]+@\n* Followed by a @ symbol\n* Followed by a string that contains one or more characters that are letters, numbers, periods, underscores, or hyphenates, but no spaces: [a-zA-Z0-9._-]+\\.\n* Followed by a period symbol\n* Followed by a string that contains only letters, uppercase.\n\nYou can also use `re.match()` or `re.search()` to match the email address instead of `re.match()` which will match the whole email address in the input string while `re.search()` will match the pattern only in the input string.", "answer_id": "Rz8sSwJzffBjUhjEYAyEWS", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 64, "text": "Here's an example program in Python that finds the nth Fibonacci number using dynamic programming:\n\n```python\ndef fibonacci(n, memo={0: 0, 1: 1}):\n def fibonacci(n, memo={0: 0, 1: 1}, depth=1):\n if n < 2:\n return memo[n]\n else:\n return fibonacci(n-1, memo, depth=depth+1) + fibonacci(n-2, memo, depth=depth+1)\n return None\n return fibonacci(n, memo)\n\n#Example Usage\nprint(fibonacci(10)) # 55\n```\n\nIn this program, we define a function `fibonacci` that takes an integer `n` and a dictionary `memo` as its arguments. The `memo` dictionary is used to store the previously computed Fibonacci numbers, so that we don't have to calculate them again.\nThe `fibonacci` function has two nested functions: `fibonacci`, which calculates the nth Fibonacci number, and `fibonacci`, which calculates the (n-1)th Fibonacci number and the (n-2)th Fibonacci number.\nThe outer `fibonacci` function first checks if `n` is less than 2. If it is, it returns the value stored in the `memo` dictionary for `n`. If `n` is greater than 2, the outer `fibonacci` function calls the inner `fibonacci` function `depth+1` times, with `n-1` and `n-2` as arguments, and returns the sum of the two Fibonacci numbers returned by the inner `fibonacci` function.\nIf the inner `fibonacci` function encounters a recursion limit, it returns `None`.\nThe `fibonacci` function eventually returns the value of the Fibonacci number stored in the `memo` dictionary for `n`.\nTo use this function, we just need to call `fibonacci(n)` and pass in an appropriate value for `n`. For example, we can call `fibonacci(10)` to get the 10th Fibonacci number 55.", "answer_id": "SQCnPwtYgHH57ikbXgUpQq", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 65, "text": "A binary search algorithm is a search algorithm that works by dividing a sorted array into halves and repeatedly checking the middle element until the correct element is found. Here is a step-by-step implementation of a binary search algorithm to find a specific element in a sorted array:\n1. Start with the middle index of the array.\n2. Divide the array into two halves, with the middle element as the pivot.\n3. Compare the middle element in the left half with the target element.\n4. If they match, return the index of the middle element.\n5. If the middle element is smaller than the target element, look in the right half of the array.\n6. If the middle element is larger than the target element, look in the left half of the array.\n7. Repeat steps 3-6 until the target element is found or the array is empty.\nHere is an example implementation in Python:\n\n```\ndef binary_search(arr, target):\n low = 0\n high = len(arr)-1\n while low <= high:\n mid = (low + high) // 2\n if arr[mid] == target:\n return mid\n elif arr[mid] < target:\n low = mid + 1\n else:\n high = mid - 1\n return -1\n\n# Example usage\narr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\ntarget = 5\nprint(binary_search(arr, target)) # prints 3\nprint(arr[3]) # prints 5\n```\n\nIn this example, the array is [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], and we are looking for the element 5. The binary search algorithm divides the array into halves, compares the middle element with the target element, and continues to narrow down the search until the target element is found at index 3.", "answer_id": "mfGSLxAAjjBmUFtCx3STEN", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 66, "text": "A queue can be implemented using two stacks in Python, with one stack for pushing elements onto the queue and the other stack for popping elements off the queue. Here is an example implementation:\n\n```python\nclass Queue:\n def __init__(self):\n self.in_queue = []\n self.out_queue = []\n self.in_stack = []\n self.out_stack = []\n\n def enqueue(self, element):\n self.in_stack.append(element)\n\n def dequeue(self):\n self.out_stack.pop()\n self.in_queue.append(self.out_stack[-1])\n\n def peek(self):\n return self.in_queue[-1]\n\n def is_empty(self):\n return self.in_queue == []\n```\n\nIn this implementation, the `enqueue` method adds the element to the `in_stack`, while the `dequeue` method pops an element from the `out_stack` and adds it to the `in_queue`. The `peek` method returns the head of the `in_queue`, and the `is_empty` method checks if the `in_queue` is empty.\nThis implementation is based on the following assumptions:\n1. The stacks are always full, meaning that they can hold as many elements as needed.\n2. The enqueue operation is always faster than the dequeue operation.\n3. The peek operation is always faster than the dequeue operation.\n4. The time complexity of enqueue, dequeue, peek, and is\\_empty operations are O(1).", "answer_id": "CcUgvJProVma9sNKW2nKHc", "model_id": "vicuna-13b:20230322-new-hp-fp16", "metadata": {}}
+{"question_id": 67, "text": "Here is one way to implement a program to find the common elements in two arrays without using any extra data structures in C++:\n\n1. Accept the two arrays as input from the user.\n2. Initialize two empty arrays to store the common elements.\n3. Loop through the first array and check if each element exists in the second array. If it does, then move that element from the second array to the first array.\n4. Loop through the second array and check if each element exists in the first array. If it does not, then move that element from the first array to the second array.\n5. Print out the elements in the first array as the common elements between the two arrays.\n\nHere is some sample code for this program:\n```\n#include \n#include \n\nusing namespace std;\n\nint main() {\n // Accept the two arrays as input from the user\n int size1, size2;\n cout << \"Enter size of first array: \";\n cin >> size1;\n cout << \"Enter elements of first array: \";\n for(int i=0; i> arr1[i];\n }\n cout << \"Enter size of second array: \";\n cin >> size2;\n cout << \"Enter elements of second array: \";\n for(int i=0; i> arr2[i];\n }\n\n // Initialize two empty arrays to store the common elements\n int common[min(size1, size2)];\n int second[min(size1, size2)];\n memset(common, 0, sizeof(common));\n memset(second, 0, sizeof(second));\n\n // Loop through the first array and check if each element exists in the second array\n for(int i=0; i\n#include \n#include \n\nusing namespace std;\n\nint main()\n{\n // Specify the name of the text file you want to read\n string filename = \"example.txt\";\n\n // Open the file in binary mode for reading\n ifstream file(filename, ios::binary);\n\n // Set the string to search for\n string search_string = \"example\";\n\n // Initialize a counter to keep track of the number of occurrences of the search string\n int counter = 0;\n\n // Start reading the file line by line\n string line;\n while (getline(file, line))\n {\n // Check if the line contains the search string\n if (line.find(search_string)!= string::npos)\n {\n // Increment the counter if the line contains the search string\n counter++;\n }\n }\n\n // Close the file\n file.close();\n\n // Print the number of occurrences of the search string\n cout << \"Number of occurrences of search string: \" << counter << endl;\n\n return 0;\n}\n```\n\nThis program reads the specified text file line by line, and uses a binary `ifstream` object to read the file in binary mode. The program uses a counter to keep track of the number of occurrences of a specified search string in the file. The `getline` function is used to read each line of the file, and the `find` function is used to check if the line contains the search string. If the line contains the search string, the counter is incremented. Finally, the program prints the number of occurrences of the search string.\nOf course, you'll need to specify the name of the text file you want to read in the `filename` variable. You can modify the `search_string` variable to specify the string you want to search for in the file.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "jyzD2deb9JAffRhhkk37r5", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 62, "text": "Here's an implementation of the Longest Common Subsequence (LCS) problem using dynamic programming in Python:\n\n```python\ndef lcs(str1, str2):\n # Initialize dp table with maximum values\n m, n = len(str1), len(str2)\n dp = [[float('inf') for j in range(n+1)] for i in range(m+1)]\n for i in range(m+1):\n dp[i][0] = float('inf')\n for j in range(n+1):\n dp[0][j] = float('inf')\n\n # Fill in dp table\n for i in range(1, m+1):\n for j in range(1, n+1):\n if str1[i-1] == str2[j-1]:\n dp[i][j] = dp[i-1][j-1]\n else:\n dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1] + (str1[i-1] == str2[j-1]))\n\n return dp[m][n]\n\n# Example usage\nstr1 = \"banana\"\nstr2 = \"ananana\"\nprint(lcs(str1, str2)) # prints 5\n```\n\nIn this implementation, we use a 2D dynamic programming table `dp` to store the LCS of substrings of `str1` and `str2`. The maximum value in each cell of `dp` represents the length of the LCS of the corresponding substrings. We initialize the table with maximum values, except for the first row and column, which are set to `float('inf')`.\nWe then fill in the table by iterating over the substrings of `str1` and `str2` in a sliding window fashion, and using the three possible options for each cell: the value of the previous cell in the same row or column, the value of the previous cell in the same row or column of the other string, or the sum of these two values plus one if the current substring and the previous substring match.\nOnce we have filled in the entire table, we return the maximum value in the last cell, which represents the LCS of the entire strings.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "jPGHEfp7FxhDyvRA6qEN3j", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 63, "text": "Here is a regular expression that can be used to validate an email address in Python:\n```\nimport re\n\n# Define the regular expression\nemail_regex = re.compile(r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$')\n\n# Test the regular expression\nemail = \"user@example.com\"\nif email_regex.match(email):\n print(\"Email address is valid.\")\nelse:\n print(\"Email address is not valid.\")\n```\nThis regular expression matches the following patterns:\n\n* One or more alphabetical characters (`[a-zA-Z]`), followed by a domain name, which must start and end with an alphanumeric character and may contain any combination of alphanumeric characters and the following special characters: `-` `.` `_` `%` `+` `*` `?` `@`.\n* The `@` symbol, followed by one or more alphanumeric characters, which may include the special characters listed above.\n* The domain name must not start or end with a `:` character.\n* The entire email address must end with a top-level domain of two or more alphanumeric characters.\n\nThis regular expression is not perfect, as it can be easily bypassed by spammers and does not check if the domain name actually exists. However, it can be a useful starting point for validating email addresses in many cases.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "Ra9eTcAuNZPkPRnaHiQrak", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 64, "text": "Here is an example of a program in Python that uses dynamic programming to find the nth Fibonacci number:\n```\ndef fibonacci(n):\n # Initialize memoization table with 0s\n memo = [[0] * (n + 1) for _ in range(n + 1)]\n \n # Base case: n = 0 or n = 1\n if n <= 1:\n return [0, 1]\n \n # Recursive case\n else:\n return fibonacci(n-1) + fibonacci(n-2)\n \n# Test the function with a specific number of terms\nfor i in range(10):\n print(fibonacci(i+1))\n```\nThis program defines a function `fibonacci` that takes an integer `n` as input and returns the `n`th Fibonacci number. The function uses dynamic programming to memoize the values of the previous terms in the sequence, so that it doesn't have to recalculate them every time. The function starts by initializing the memoization table with 0s, which means the first term of the sequence is 0. Then it checks if the input `n` is 0 or 1, and if it is, it returns 0 and 1. If the input `n` is greater than 1, it calls the function recursively twice, with inputs `n-1` and `n-2`, and returns the sum of those two values. This way it calculates the nth Fibonacci number by adding the n-1 and n-2 terms together.\n\nNote: The program above will work fine for small values of n, but for large values of n, it will take a lot of memory and time. A more efficient way to calculate the nth Fibonacci number is using Binet's formula.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "5mbEFLEnEx8Cm2wEF8WAbM", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 65, "text": "Here is an example of a binary search algorithm in Python:\n```\ndef binary_search(arr, target):\n start = 0\n end = len(arr) - 1\n while start <= end:\n mid = (start + end) // 2\n if arr[mid] == target:\n return mid\n elif arr[mid] < target:\n start = mid + 1\n else:\n end = mid - 1\n return -1\n\narr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\ntarget = 4\nprint(binary_search(arr, target)) # output: 4\n```\nThe algorithm starts by initializing the start and end indices of the array to 0 and the length of the array -1, respectively. It then enters a while loop that continues as long as the start index is less than or equal to the end index.\n\nOn each iteration of the loop, it calculates the middle index of the array and compares the value at that index to the target value. If the values are equal, the algorithm returns the middle index. If the value at the middle index is less than the target, the algorithm updates the start index to be the middle index +1, and the end index to be the middle index -1. If the value at the middle index is greater than the target, the algorithm updates the end index to be the middle index -1 and the start index to be the middle index +1.\n\nIf the end index is less than the start index, the algorithm returns -1 to indicate that the target value is not present in the array.\n\nThis algorithm has a time complexity of O(log n) and is most efficient when used with a sorted array.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "KLbBzjbuwq4agAESQ9Wfpq", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 66, "text": "Here is an implementation of a queue using two stacks in Python:\n\n```cpp\nclass Queue:\n def __init__(self):\n self.in_stack = []\n self.out_stack = []\n \n def enqueue(self, item):\n self.in_stack.append(item)\n self.out_stack.clear()\n \n def dequeue(self):\n if not self.in_stack:\n raise EmptyQueueError\n item = self.in_stack.pop()\n self.out_stack.append(item)\n return item\n \n def peek(self):\n if not self.in_stack:\n raise EmptyQueueError\n return self.in_stack[-1]\n \n def is_empty(self):\n return not self.in_stack\n\nclass EmptyQueueError(Exception):\n pass\n```\n\nIn this implementation, the `in_stack` and `out_stack` are used as the underlying data structure to implement the queue. The `enqueue` method adds an item to the end of the `in_stack` and clears the `out_stack`. The `dequeue` method removes an item from the front of the `in_stack` and adds it to the end of the `out_stack`. The `peek` method returns the item at the front of the `in_stack`. The `is_empty` method checks if the `in_stack` is empty. If the `in_stack` is empty, the `is_empty` method raises an `EmptyQueueError`.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "ganAwetd9VTzEAiTtsskzX", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 67, "text": "To find the common elements in two arrays without using any extra data structures, you can use an iterative approach. Here's one possible solution in Python:\n\ndef find\\_common\\_elements(arr1, arr2):\nresult = []\nfor item1 in arr1:\nfor item2 in arr2:\nif item1 == item2:\nresult.append(item1)\nreturn result\n\nHere's an example of how you could use this function:\n\narr1 = [1, 2, 3, 4, 5]\narr2 = [3, 4, 5, 6, 7]\n\nprint(find\\_common\\_elements(arr1, arr2))\n\nThis should output `[3, 4, 5]`. The function iterates over each item in both arrays, and if it finds a match, it adds the item to the `result` list. If there are no matches, it returns an empty list.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "LG6szQ3cqNRzUjXtZtXG94", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 68, "text": "To find the value of f(2), we need to substitute 2 for x in the function f(x).\n\nf(x) = 5x^3 - 2x + 3\n\nWhen x = 2, we have:\n\nf(2) = 5(2)^3 - 2(2) + 3 = 5 \\* 8 - 2 + 3 = 41 - 2 + 3 = 40\n\nSo the value of f(2) is 40.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "LHMnXGbSZubeZJcr5fgrte", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 69, "text": "We can solve this equation using the following steps:\n\n1. Simplify the left-hand side (LHS) of the equation by moving all the terms to one side and the numbers to the other: 3x + 10 = 5x - 10\n2. Combine the terms on the left-hand side by subtracting 5x from both sides: 3x = -5x\n3. Simplify the left-hand side by dividing both sides by 3: x = -5/3\n4. Take the natural logarithm of both sides to get the solution in terms of the natural logarithm base e: ln|x| = ln|-5/3| = -5/3 ln|e| = -5/3 ln(1.414) = -5/3 ln(1.414) = -5/3 ln(1 + 1/1.414) = -5/3 ln(1.414)\n5. Exponentiate both sides to get the solution in terms of e: x = e^(-5/3)\n6. Rearrange the solution to get x in terms of Euler's number: x = e^(-5/3) = 1/1.414^(-5/3) = 0.5877\n7. Round the solution to the desired number of decimal places: x = 0.5877\n\nTherefore, x = 0.5877 is the solution to the equation 3x + 10 = 5(x - 2).", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "TGENVWL5iAYi4NCf5jAzKy", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 70, "text": "The length of the line segment is the distance between the two points. To find the length, you can use the Pythagorean theorem, which states that the sum of the squares of the distances from a point to the two endpoints is equal to the square of the distance between the two endpoints.\n\nIn this case, the distance from the point (2, -2) to the point (10, 4) is given by the formula:\n\nsqrt((x2 - x1)^2 + (y2 - y1)^2)\n= sqrt((10 - 2)^2 + (4 - (-2))^2)\n= sqrt(18^2 + 4^2)\n= sqrt(324 + 16)\n= sqrt(340)\n= 17.973\n\nSo the length of the line segment is 17.973.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "kc9oytarvqyNVVfGZz93EL", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 71, "text": "Certainly, here's an example of a formal email proposing a joint venture:\n\nSubject: Proposal for Joint Venture\n\nDear [Name of Partner],\n\nI hope this email finds you well. I am writing to propose a joint venture between our companies, [Your Company Name] and [Partner Company Name]. We believe that by working together, we can achieve great success and mutually benefit from the partnership.\n\nOur company specializes in [Your Company's Expertise], while your company is a leader in [Partner Company's Expertise]. We believe that by combining our strengths and capabilities, we can create a powerful partnership that can bring innovative solutions to the market.\n\nWe propose that we establish a new company that will be jointly owned by our two companies. This new company will have its own management team, and will operate independently from our respective parent companies. We will work together to identify new business opportunities, and share the profits and losses of the new company equally.\n\nWe believe that this partnership will be highly beneficial for both of our companies. It will give us access to new markets, new customers, and new technologies. It will also allow us to share the risks and costs of new ventures, and to leverage our resources and expertise to achieve greater success.\n\nWe would like to schedule a meeting to discuss this proposal in more detail, and to explore how we can work together to achieve our mutual goals. We believe that this partnership has the potential to be a great success, and we are excited about the opportunities that it can bring.\n\nPlease let us know if you are available to meet in the next [X] days, and we will arrange a convenient time and place.\n\nThank you for considering our proposal. We look forward to hearing from you soon.\n\nBest regards,\n\n[Your Name]\n[Your Title]\n[Your Company Name]", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "NxstpvVEnGMny3jECtvrw2", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 72, "text": "Yes, I can help you write a resignation letter. Here is a sample letter you can use as a guide:\n\nDear [Employer],\n\nI am writing to inform you of my decision to resign from my position as [Position] at [Company]. I have truly appreciated the opportunities and experiences that I have had during my time here, and I am grateful for the support and guidance you and the team have provided me.\n\nI have accepted a new position at a different company, and I will be leaving on [date]. I am confident that the skills and experience I have gained during my time at [Company] will serve me well in my future endeavors.\n\nI want to assure you that I will do everything I can to ensure a smooth transition and to complete any outstanding tasks before my departure. I would be happy to discuss any specific needs or concerns you may have regarding this transition.\n\nAgain, I want to express my sincere appreciation for the opportunities and support provided to me during my time at [Company]. I am grateful for the friendships and connections I have made here, and I will always look back on my time here with fond memories.\n\nThank you for the opportunity to work with such a great team, and I wish you and the company continued success in the future.\n\nSincerely,\n\n[Your name]", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "XN9NBpeQJiQqGT5UYkys7N", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 73, "text": "[Your Name]\n[Your Title]\n[Your Affiliation]\n[Date]\n[Recipient\u2019s Name]\n[Recipient\u2019s Title]\n[Recipient\u2019s Affiliation]\n[Address]\nDear [Recipient\u2019s Name],\nI am writing to wholeheartedly recommend [Student\u2019s Name] for admission to the prestigious graduate program in computer science at [University Name]. As [Student\u2019s Name]\u2019s professor and academic advisor, I have had the pleasure of observing their academic prowess, intellectual curiosity, and exceptional character.\nIn my [Number] years of teaching, I have never encountered a student as dedicated and passionate about computer science as [Student\u2019s Name]. They consistently demonstrate exceptional aptitude in programming, algorithms, and data structures, and have shown a keen interest in artificial intelligence and machine learning. [Student\u2019s Name] has also shown great initiative in exploring new areas of research and has made significant contributions to the field through their work on [specific projects or research].\nIn addition to their academic excellence, [Student\u2019s Name] is an individual of exceptional character and integrity. They have consistently demonstrated leadership, teamwork, and collaboration skills, working effectively with their peers and mentors to achieve common goals. Their positive attitude and willingness to help others has made them a valuable member of the academic community.\nI am confident that [Student\u2019s Name] will make a valuable contribution to the graduate program in computer science at [University Name]. Their academic achievements, passion for computer science, and exceptional character make them an ideal candidate for this prestigious program.\nThank you for your consideration of my recommendation. I am confident that [Student\u2019s Name] will excel in their studies and make a significant contribution to the field of computer science.\nSincerely,\n[Your Signature]\n[Your Name]\n[Your Title]\n[Your Affiliation]", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "3qMqAZcfHUVUTEJnMXT8Pw", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 74, "text": "Subject: Introducing our latest innovation: [Software Name]\n\nDear valued customers,\n\nWe are proud to announce the launch of our new software solution, designed to revolutionize [industry/field] with its cutting-edge technology and user-friendly interface.\n\n[Software Name] is the result of years of research and development, and we believe it will bring significant improvements to [specific benefits or advantages]. With its advanced features and customizable options, it is the perfect solution for [target audience or market].\n\nHere are just a few of the key features of [Software Name]:\n\n* [Feature 1]\n* [Feature 2]\n* [Feature 3]\n* [Feature 4]\n* [Feature 5]\n\nWe invite you to experience the benefits of [Software Name] for yourself. Our team is available to answer any questions you may have and to help you get started.\n\nTo learn more and to schedule a demo, please visit our website or contact us directly.\n\nThank you for your continued support and for choosing [company name]. We look forward to serving you with our latest innovation.\n\nBest regards,\n[Your name]\n[Company name]", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "hMXjd228Mc8pnc7zgd2frG", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 75, "text": "Subject: Apology and Update on Order Delay\n\nDear [Customer Name],\n\nI am writing to apologize for the delay in the delivery of your order. I understand that this has caused inconvenience and frustration, and I want to assure you that we take this matter very seriously.\n\nWe have thoroughly investigated the issue and have identified the cause of the delay. We want to assure you that the issue has been resolved, and we are taking steps to prevent similar delays in the future.\n\nWe understand the importance of your order and the trust that you have placed in us. We want to assure you that we are committed to providing you with the highest level of service and quality products.\n\nAgain, please accept our sincere apologies for the delay and any inconvenience it may have caused. If you have any further concerns or questions, please do not hesitate to contact us.\n\nSincerely,\n\n[Your Name]", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "gUEd9nNepqy8nsfbypscHx", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 76, "text": "Hello and welcome to our YouTube video on the history and cultural significance of jazz.\nJazz is a genre of music that originated in New Orleans in the late 19th and early 20th centuries. It is characterized by its improvisational nature, blues and swing rhythms, and a mixture of different musical traditions, including African rhythms, European classical music, and folk music.\nThe birth of jazz is often attributed to the legendary cornet player, Buddy Bolden, who is said to have played a key role in shaping the sound and style of early jazz. However, it was Louis Armstrong, who is considered the \"Father of Jazz,\" who popularized the genre and brought it to the world.\nJazz has had a profound influence on American culture and has been an important part of the country's musical heritage. It has also played a significant role in shaping the world's music and has been a major influence on many other genres, including rock and roll, hip hop, and R&B.\nJazz has also been a crucial part of the civil rights movement in the United States. The music has often been used as a form of protest and has been associated with the fight for equality and justice.\nThroughout its history, jazz has been shaped and influenced by a wide range of musicians and cultural movements. Today, it continues to evolve and remains an important and influential genre of music, both in the United States and around the world.\nIn conclusion, jazz is a genre of music that has had a rich history and continues to be an important part of American and world culture. Its improvisational nature, blues and swing rhythms, and mixture of different musical traditions have made it a unique and beloved genre of music, and one that will continue to be enjoyed for generations to come.\nThanks for watching, and we hope you learned something new about the history and cultural significance of jazz.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "nZZyn3SAvU8T2BVR73LMHC", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 77, "text": "Aloha, fellow travelers! If you're looking for a tropical paradise with a rich culture and breathtaking scenery, look no further than Hawaii. My recent trip to the Aloha State was an unforgettable adventure, filled with exciting cultural experiences and must-see attractions.\nFirst and foremost, I was blown away by the warm and welcoming spirit of the Hawaiian people. From the moment I arrived, I felt at home and was greeted with open arms by the locals. Whether it was the hotel staff, my tour guide, or the friendly strangers I met on the street, everyone I encountered was eager to share their love for their home state.\nOne of the most memorable cultural experiences I had was attending a traditional Hawaiian luau. At this festive event, I was treated to a delicious feast of local cuisine, including poi, laulau, and kalua pig, all while listening to the hypnotic beat of the hula. The dancers, dressed in beautiful native garb, told stories of the islands and their history through their movements. It was truly a magical evening that allowed me to connect with the rich heritage of Hawaii.\nAnother must-see attraction that left me in awe was the breathtaking beauty of Haleakala National Park. The park boasts a dormant volcano, the largest on the island, that offers a stunning sunrise or sunset hike. As the sun rose over the horizon, I was struck by the vibrant colors and stark beauty of the landscape. It was an experience that truly humbled me, and one that I will never forget.\nOf course, no trip to Hawaii would be complete without spending some time at the beach. With its crystal-clear waters, golden sand, and gentle waves, Wailea Beach was the perfect spot to relax and soak up the sun. Whether you're an avid surfer, a beach bum, or simply looking to unwind, Hawaii's beaches are sure to steal your heart.\nIn conclusion, Hawaii was a true paradise that exceeded all of my expectations. From the moment I stepped off the plane, I was immersed in a culture that was rich, vibrant, and full of life. With so many must-see attractions and unforgettable experiences, Hawaii is the perfect destination for any traveler looking for a little bit of adventure, culture, and relaxation. Mahalo for reading, and I hope to see you in Hawaii soon!", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "YM4qjUM5VfmWnzM3HXRB4G", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 78, "text": "\"Beyond the Horizon\" is a thrilling science fiction film that takes you on a journey through time and space. The movie follows the story of a group of astronauts who are sent on a mission to a distant planet to search for signs of alien life.\n\nThe plot is fast-paced and keeps you on the edge of your seat from start to finish. The special effects are out of this world, with stunning visuals that transport you to a world beyond our own. The characters are well-developed and you easily become invested in their fate.\n\nThe cast is led by a charismatic actor who brings a sense of determination and courage to his role as the leader of the mission. The supporting cast is equally impressive, with each actor bringing their own unique personality to their character.\n\nOverall, \"Beyond the Horizon\" is a must-see movie for science fiction fans and anyone looking for a captivating and thrilling film. With its engaging plot, well-developed characters, and stunning special effects, this movie is sure to leave you with a sense of wonder and awe. So, grab some popcorn and get ready to blast off on an interstellar adventure!", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "WcbzNkzu5q66QkK92NxmGB", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 79, "text": "Opening:\n\"Hey there, welcome to this episode of Sound Matters, where we explore the many facets of the music industry. Today, we're going to be talking about the influence of streaming platforms on the music industry. Whether you're a fan, an artist, or a industry insider, it's hard to deny the massive impact that streaming platforms like Spotify, Apple Music, and Tidal have had on the way we consume and discover music. So, let's dive in and talk about how streaming platforms have changed the game.\"\n\nSegment 1: The Pros of Streaming Platforms\n\n* Increased access to music\n* Convenience and ease of use\n* Greater exposure for artists\n* Revenue streams for artists\n\nSegment 2: The Cons of Streaming Platforms\n\n* Decreased album sales and revenue for artists\n* Artists not being paid fairly\n* Piracy and copyright issues\n* Mental health and addiction concerns\n\nSegment 3: The Future of Streaming Platforms\n\n* Where the industry is headed\n* Potential changes and improvements\n* The role of streaming platforms in the future\n\nClosing:\n\"That's all for this episode of Sound Matters. We hope you enjoyed the discussion on the influence of streaming platforms on the music industry. As always, thank you for tuning in. Until next time, keep listening and keep loving the music.\"", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
+{"answer_id": "Tfm4f2mbpKJsa8MT6exs8D", "model_id": "vicuna-13b:20230322-clean-lang", "question_id": 80, "text": "The concert was a true spectacle of sound, featuring a talented orchestra that performed with precision and passion. The conductor lead the ensemble with skill and finesse, bringing out the full range of emotion in each piece.\n\nFrom the opening notes of the first piece, it was clear that the orchestra was in top form. The strings were rich and full-bodied, the woodwinds were crisp and clear, and the brass was powerful and commanding. The overall sound was dynamic and engaging, keeping the audience on the edge of their seats throughout the performance.\n\nOne of the standout pieces of the evening was the second movement of Dvorak's Symphony No. 9, which was performed with a deep sense of longing and yearning. The orchestra's playing was heartfelt and moving, evoking a sense of nostalgia and wistfulness in the audience.\n\nThe final piece of the concert was a rousing performance of Tchaikovsky's 1812 Overture, complete with cannon fire and bells. The orchestra played with gusto and energy, bringing the audience to their feet in a standing ovation.\n\nOverall, the concert was a fantastic experience for both the orchestra and the audience. The orchestra played with passion and precision, and the audience was transported by the music. The performance was a true celebration of the power of music, and it left a lasting impression on all who were fortunate enough to be in attendance.", "metadata": {"huggingface_argument": {"do_sample": true, "temperature": 0.7, "max_new_tokens": 1024}}}
diff --git a/model/fastchat/eval/table/answer/answer_vicuna-7b-20230322-fp16.jsonl b/model/fastchat/eval/table/answer/answer_vicuna-7b-20230322-fp16.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..b3d06d3e70bc89e0810592061759dea95f62a681
--- /dev/null
+++ b/model/fastchat/eval/table/answer/answer_vicuna-7b-20230322-fp16.jsonl
@@ -0,0 +1,80 @@
+{"question_id": 1, "text": ": Time management is the process of planning and organizing your tasks and priorities to make the best use of your time. Here are some tips to help you improve your time management skills:\n1. Set goals: Identify what you want to achieve and set specific, measurable, achievable, relevant, and time-bound (SMART) goals. This will help you focus on what's important and prioritize your tasks.\n2. Create a schedule: Plan out your tasks and schedule in advance, and prioritize the most important and urgent tasks. Use a planner or calendar to keep track of your schedule and deadlines.\n3. Delegate and outsource: Delegate tasks that can be done by others and outsource tasks that require specialized skills or resources. This will free up your time to focus on more important tasks.\n4. Eliminate distractions: Identify and eliminate distractions, such as social media, email, and phone calls, which can waste your time.\n5. Use time blocks: Set aside specific blocks of time for different tasks, and stick to them. This will help you stay focused and avoid multitasking, which can reduce the quality of your work and increase stress.\n6. Take breaks: Take regular breaks throughout the day to recharge and refresh your mind.\n7. Review and adjust: Regularly review your schedule and adjust it as needed to ensure that you are making the best use of your time.\n8. Practice mindfulness: Mindfulness can help you stay focused and present, and reduce stress and anxiety.\n9. Get support: Seek support from friends, family, or a coach or therapist if you need help staying on track or managing stress.", "answer_id": "8haRLA69xGcHc5woRHTj2f", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 2, "text": ": Stress is a natural and normal part of life, and everyone experiences it at some point. However, too much stress can have negative effects on your physical and mental health. Here are some effective ways to manage stress:\n1. Exercise: Regular physical activity can reduce stress levels, boost your mood, and improve your overall health.\n2. Relaxation techniques: Techniques such as meditation, deep breathing, and yoga can help you relax and reduce stress.\n3. Time management: Developing good time-management skills can help you feel more in control and reduce stress.\n4. Social support: Having a strong support system of friends and family can help you cope with stress.\n5. Healthy diet: Eating a balanced diet with plenty of fruits, vegetables, and whole grains can improve your mood and reduce stress.\n6. Sleep: Getting enough sleep and establishing a consistent sleep routine can help reduce stress.\n7. Avoid substance abuse: Substance abuse can increase stress levels and negatively impact your health.\n8. Take breaks: Make time for leisure activities and hobbies to help you unwind and reduce stress.\nRemember that everyone's stressors and stress responses are different, and what works for one person may not work for another. It's important to find what works best for you and make it a regular part of your life.", "answer_id": "j7DV4tZTXhmRFj9zL5TSwq", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 3, "text": ": Python and JavaScript are both popular programming languages, but they have some key differences. Here are some of the main differences between Python and JavaScript:\n1. Syntax: Python has a simpler syntax than JavaScript. Python uses indentation to determine code blocks, while JavaScript uses curly braces.\n2. Strongly Typed: Python is a strongly typed language, which means that variables must be declared with a specific data type, such as integers or strings. JavaScript is a weakly typed language, which means that variables can be of any data type.\n3. Object-oriented: Python is an object-oriented language, which means that it is based on objects and classes. JavaScript is a prototype-based language, which means that it is based on objects and the functions that can be created from them.\n4. Libraries: Python has a large number of libraries available for a wide range of tasks, from web development to scientific computing. JavaScript has a smaller number of libraries, but they are widely used for web development.\n5. Web Development: Python is a popular choice for web development due to its simplicity and readability, while JavaScript is the primary language for client-side web development.\n6. Error Handling: Python has built-in support for error handling, while JavaScript has a more informal approach to error handling.\n7. Functionality: Python has a broader range of functionality, including scientific computing, data analysis, and machine learning, while JavaScript is primarily used for web development, often with the use of frameworks such as React, Angular, and Vue.", "answer_id": "fEq2fYvNcpaENuQFRMFdan", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 4, "text": ": Working from home can be a great opportunity to increase your productivity, but it can also be challenging. Here are some tips to help you increase your productivity while working from home:\n1. Set a schedule: Establish a routine and stick to it. Set specific work hours and take breaks to avoid burnout.\n2. Create a designated workspace: Create a dedicated workspace in your home, free from distractions.\n3. Use noise-cancelling headphones: If you work from a noisy environment, use headphones to block out distractions.\n4. Minimize multitasking: Focus on one task at a time and avoid switching between tasks.\n5. Take breaks: Take regular breaks to refresh your mind and prevent burnout.\n6. Use time-management tools: Use tools like a to-do list or a calendar to help you stay organized and focused.\n7. Stay connected with colleagues and clients: Keep in touch with your colleagues and clients regularly to stay informed about projects and deadlines.\n8. Stay motivated: Keep yourself motivated by setting achievable goals and rewarding yourself when you reach them.\n9. Stay physically active: Take regular breaks to stretch and move around to maintain good posture and reduce fatigue.\n10. Set boundaries: Set boundaries with your family or roommates to avoid distractions and interruptions during work hours.\nBy following these tips, you can increase your productivity while working from home and achieve your goals more effectively.", "answer_id": "VYKmdXnESAKV3FRutDUPWU", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 5, "text": ": Quantum computing is a type of computing that uses the principles of quantum mechanics to perform computations and solve problems that are beyond the capabilities of classical computers.\nIn classical computing, information is represented in bits, which can have a value of either 0 or 1. In contrast, quantum computing uses quantum bits, or qubits, which can exist in multiple states at the same time. This allows quantum computers to perform certain types of calculations much faster than classical computers.\nOne of the key principles of quantum mechanics is superposition, which is the ability of a quantum particle to exist in multiple states at the same time. This allows a quantum computer to perform many calculations simultaneously, which can lead to exponential speedup in certain types of computations.\nAnother important principle is entanglement, which is the phenomenon where two or more quantum particles become linked and can affect each other even when they are separated by large distances. This can be used to perform certain types of computations much faster than classical computers.\nQuantum computing is still in its early stages of development, and there are many challenges that need to be overcome before it can become a practical technology. However, it has the potential to revolutionize many fields, including cryptography, chemistry, and materials science, by solving problems that are currently beyond the reach of classical computers.", "answer_id": "maL9a3rivWyWZk3UgwQTVR", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 6, "text": ": Plant-based protein sources come from plants, such as beans, legumes, nuts, seeds, and whole grains. These sources of protein are often lower in calories and fat than animal-based sources, and they also tend to be higher in fiber and other nutrients. Some examples of plant-based protein sources include tofu, tempeh, lentils, chickpeas, and quinoa.\nOn the other hand, animal-based protein sources come from animals, such as chicken, beef, fish, and dairy products. These sources of protein are often higher in calories and fat than plant-based sources, and they also tend to be lower in fiber and other nutrients. Some examples of animal-based protein sources include beef, pork, chicken, and fish.\nOne of the main differences between plant-based and animal-based protein sources is the nutrient composition. Plant-based sources of protein tend to be lower in fat and higher in fiber than animal-based sources. Additionally, plant-based sources of protein tend to be rich in vitamins, minerals, and antioxidants that are missing in animal-based sources. Another difference is that animal-based sources of protein tend to be higher in essential amino acids, which are the building blocks of protein, than plant-based sources. This means that animal-based protein sources are more easily digested and utilized by the body.\nAnother difference is that plant-based sources of protein are more sustainable and environmentally friendly than animal-based sources, as they require less land, water, and other resources to produce. Additionally, plant-based sources of protein are often lower in environmental impact, as they do not contribute to deforestation, land degradation, and water pollution, as animal agriculture can.", "answer_id": "aGRf8RjpUgneLvw4Uf93do", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 7, "text": ": Developing critical thinking skills can be a lifelong process that requires practice, self-reflection, and a willingness to challenge your own beliefs and assumptions. Here are some strategies that can help you improve your critical thinking skills:\n1. Practice active listening: Pay attention to what people are saying and try to understand their perspective. Ask questions and seek clarification when necessary.\n2. Ask questions: Challenge yourself to ask questions about the information you are presented with. Seek out alternative viewpoints and evaluate the evidence for yourself.\n3. Analyze assumptions: Identify the underlying assumptions in arguments and critically evaluate them. Ask yourself if they are valid and if there are any hidden biases or motives.\n4. Evaluate evidence: Look for counter-evidence and evaluate the quality of the evidence presented. Consider the source and the bias of the person presenting the information.\n5. Think critically: Practice evaluating information by looking at all the facts and considering different perspectives. Avoid making assumptions or jumping to conclusions without all the information.\n6. Seek out diverse perspectives: Try to expose yourself to different viewpoints and experiences. This can help broaden your perspective and increase your ability to see things from different angles.\n7. Reflect on your thinking: Take time to reflect on your own thinking and biases. Seek out feedback from others and be open to constructive criticism.\n8. Practice mindfulness: Being mindful can help you stay present and focused, which can improve your critical thinking skills.\nBy practicing these strategies, you can develop your critical thinking skills and become a more informed and discerning individual.", "answer_id": "oXtzronC4mdVKH9J59ofij", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 8, "text": ": The education sector faces a number of challenges today, including:\n1. Access and equity: Many students do not have access to quality education, particularly those from low-income families or those living in remote or disadvantaged areas.\n2. Teacher shortages and quality: There are teacher shortages in many regions, and the quality of teaching can vary widely, which can hinder student learning.\n3. Technology and innovation: The education sector is struggling to keep up with the rapidly evolving technology landscape, and there is a need to incorporate new technologies and innovations to improve teaching and learning.\n4. Curriculum and standards: The curriculum and standards can vary widely among different educational systems, and there is a need to ensure that they are relevant, rigorous, and internationally comparable.\n5. Financing and funding: Education is often underfunded, particularly in developing countries, which can lead to a lack of resources and infrastructure.\n6. Student engagement and motivation: Many students lack motivation and engagement in their studies, which can hinder their academic progress and future prospects.\n7. Teacher and student evaluation: Evaluation of teachers and students can be difficult and can lead to issues with accuracy and fairness.\n8. International comparisons and rankings: Education systems are often compared and ranked internationally, which can create pressure to perform and may not always be accurate or relevant.", "answer_id": "dE5c99j9hW9qDvjjPxUPzc", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 9, "text": ": Consumer behavior is influenced by a wide range of factors, including:\n1. Personal factors: These include individual characteristics such as personality, values, attitudes, interests, and lifestyle.\n2. Psychological factors: These include cognitive processes such as perception, attention, memory, and decision-making.\n3. Social factors: These include the influence of family, friends, peers, and society, as well as cultural values, norms, and traditions.\n4. Economic factors: These include income, spending power, and the cost of goods and services.\n5. Technological factors: These include the availability and accessibility of technology, as well as the impact of new technologies on consumer behavior.\n6. Environmental factors: These include concerns about the environment and the impact of consumer behavior on the natural world.\n7. Marketing factors: The marketing efforts of companies and organizations can also influence consumer behavior, through advertising, promotions, and other marketing activities.\n8. Health factors: The influence of health and wellness concerns on consumer behavior, such as diet and exercise, and the impact of health problems on consumer behavior.\n9. Legal factors: Legal requirements and regulations can also influence consumer behavior, such as laws related to product safety, labeling, and advertising.\n10. Public opinion: The influence of public opinion and media coverage on consumer behavior, including trends and fashions, and the impact of social media and other forms of online communication.", "answer_id": "oLRzkYUv8ooSJJLqfPnrxd", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 10, "text": ": Effective conflict resolution strategies in the workplace can help to reduce negative feelings, improve communication, and maintain positive relationships among employees. Here are some of the most effective strategies for conflict resolution in the workplace:\n1. Active listening: This involves truly listening to the other person's perspective, without interrupting or judging them.\n2. Identify the issue: Clearly define the problem or issue at hand, and make sure both parties understand the same thing.\n3. Focus on the facts: Avoid getting defensive or making personal attacks, and instead focus on the facts of the situation.\n4. Brainstorm solutions: Work together to come up with a list of potential solutions.\n5. Reach a mutually beneficial agreement: Both parties should feel like they have won, and the agreement should be one that both can live with.\n6. Follow up: Once an agreement has been reached, make sure both parties follow through and hold one another accountable.\n7. Address power imbalances: If there are power imbalances in the workplace, consider involving a neutral third party, such as a supervisor or HR representative, to facilitate the resolution process.\n8. Foster a positive workplace culture: Create a work environment where conflict is not only tolerated but also encouraged, where employees feel valued and respected.\n9. Encourage open communication: Encourage employees to speak up and share their thoughts and concerns, and create a safe space for them to do so.\n10. Provide training: Provide conflict resolution training to employees to help them learn how to effectively deal with conflicts in the workplace.", "answer_id": "hi7Gu2XPwcThie58TvvkK8", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 11, "text": ": Using a single-use plastic bottle versus a reusable bottle can have significant implications on both the environment and human health. Here are some potential implications:\n1. Environmental impact: Single-use plastic bottles are a major source of plastic pollution, which can harm wildlife and damage ecosystems. Plastic debris can also enter the food chain, where it can cause harm to animals and humans who consume contaminated seafood or other products. In contrast, using a reusable bottle can significantly reduce the amount of plastic waste that ends up in landfills and the environment.\n2. Human health: Using a single-use plastic bottle can also pose health risks to humans, as plastic can contain harmful chemicals that can leach into the contents of the bottle. These chemicals can cause cancer, birth defects, and other health problems. In contrast, reusable bottles are typically made with safer, BPA-free materials that are not harmful to human health.\n3. Economic impact: Using a single-use plastic bottle can also have economic implications. Over time, the cost of producing and disposing of single-use plastic can add up to significant costs for individuals and businesses, which can be passed on to consumers. In contrast, using a reusable bottle can save money over time, as the initial investment in a reusable bottle can lead to cost savings in the long run.\n4. Social impact: Using a single-use plastic bottle can also have social implications, as it can contribute to the unsustainable use of natural resources and the waste generated by plastic pollution. In contrast, using a reusable bottle can promote a more sustainable and responsible approach to consumption and reduce the negative impact of plastic waste on society.", "answer_id": "Xx5PB6u9sBagzxtB2YUKq8", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 12, "text": ": When designing an inclusive and accessible public transportation system, several factors should be considered, including:\n1. Physical accessibility: This includes ensuring that the infrastructure, vehicles, and stations are accessible to people with disabilities, including those who use wheelchairs, mobility scooters, or other assistive devices.\n2. Communication accessibility: This includes ensuring that information about the transportation system is available in accessible formats, such as Braille, audio, or large print, for people with visual or hearing impairments.\n3. Route and schedule accessibility: This includes ensuring that routes and schedules are designed to meet the needs of all passengers, including those with disabilities, and that they are easy to understand and use.\n4. Service animals and assistive devices: This includes ensuring that passengers are allowed to bring service animals and assistive devices on the transportation system, and that they are not barred from doing so for arbitrary reasons.\n5. Dissemination of information: This includes ensuring that information about the transportation system is available in multiple languages, and that it is widely advertised and communicated to all members of the community, including those who may not have access to traditional means of communication.\n6. Training and education: This includes ensuring that all employees of the transportation system are trained to provide accessible and inclusive service, and that they are aware of the needs of passengers with disabilities.\n7. Continuous Improvement: This includes regularly reviewing and assessing the accessibility and inclusiveness of the transportation system, and making improvements as needed to ensure that it meets the needs of all passengers.", "answer_id": "FfaUTMS95MuGQQRDefvVzj", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 13, "text": ": Governments can utilize both fiscal and monetary policies to combat economic recessions. Here are some ways that governments can use these policies:\n1. Fiscal Policy: Governments can use fiscal policy to stimulate economic activity during a recession. This can be done by increasing government spending on public works projects, providing tax cuts, or increasing government benefits to help individuals and families who are struggling financially.\n2. Monetary Policy: Governments can also use monetary policy to stimulate economic activity. This can be done by lowering interest rates to encourage borrowing and spending, or by printing more money to increase the money supply and stimulate economic growth.\n3. Fiscal and Monetary Coordination: Governments can also use a combination of fiscal and monetary policies to combat recessions. For example, they can use fiscal policy to stimulate spending and monetary policy to stimulate borrowing and lending.\n4. Fiscal Consolidation: After a recession, governments may need to implement fiscal consolidation measures to reduce the budget deficit and prevent financial instability in the future. This can include cutting government spending and increasing taxes.\n5. Expanding Employment: Governments can use fiscal policy to create employment opportunities, such as by investing in public works projects or providing tax incentives for businesses to hire more workers.\n6. Targeted Support: Governments can use targeted support to help specific groups who are particularly vulnerable during a recession, such as low-income families, small businesses, or specific industries.\n7. International Cooperation: Governments can also work together to combat recessions through international cooperation, such as by providing aid or trade agreements to stimulate economic growth.", "answer_id": "WgCpMqMPUb9TU8jCuiExg3", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 14, "text": ": Language and cultural barriers can affect the way people communicate and form relationships in multicultural societies in a number of ways. Here are a few examples:\n1. Language: Language is a fundamental means of communication, and when people speak different languages, they can have difficulty understanding each other, which can lead to miscommunication and misunderstandings. Language barriers can make it difficult to build trust and rapport, and can make it difficult to form meaningful relationships.\n2. Culture: Culture is a set of shared beliefs, values, practices, and customs that shape the way people interact with each other. When people come from different cultures, they may have different norms and expectations for communication and relationships, which can lead to misunderstandings and conflicts.\n3. Stereotypes: Stereotypes can also create barriers to communication and relationships in multicultural societies. When people hold stereotypes about others based on their race, ethnicity, or culture, they may make assumptions about how others think and behave, which can lead to misunderstandings and misinterpretations.\n4. Prejudice and discrimination: Prejudice and discrimination can also create barriers to communication and relationships in multicultural societies. When people experience discrimination or prejudice, they may feel marginalized and excluded, which can lead to feelings of isolation and disconnection.\n5. Power dynamics: In multicultural societies, power dynamics can also create barriers to communication and relationships. When one group holds more power than another, they may dominate communication and decision-making, which can lead to feelings of resentment and alienation among other groups.\nOverall, language and cultural barriers can make it difficult for people to communicate and form relationships in multicultural societies, and it is important to work to overcome these barriers through education, awareness, and active effort to bridge cultural differences.", "answer_id": "ATkPcXKbAki2VCoopjq6c3", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 15, "text": ": Artificial intelligence (AI) can be used to improve the quality and efficiency of healthcare delivery in many ways. One scenario could be in the area of disease diagnosis and treatment planning.\nCurrently, healthcare professionals spend a significant amount of time diagnosing illnesses and developing treatment plans. However, this process can be time-consuming and may result in delays in treatment. AI can help to speed up this process by analyzing large amounts of patient data, including medical history, symptoms, and test results, to provide accurate diagnoses and personalized treatment plans.\nFor example, an AI system could analyze a patient's medical history, symptoms, and genetic data to suggest the most effective treatment options for a particular disease. The AI system could also analyze large amounts of medical research data to recommend the best course of action for a particular patient. This could help healthcare professionals to make more informed decisions and provide more accurate and timely diagnoses and treatment plans.\nAnother scenario could be in the area of predictive analytics. AI can be used to analyze large amounts of patient data to predict the likelihood of certain diseases and health conditions. This can help healthcare professionals to identify patients who may be at risk of developing certain diseases and take proactive steps to prevent or manage these conditions.\nAI can also be used to improve the efficiency of healthcare delivery by automating tasks such as scheduling appointments, processing insurance claims, and managing patient records. This can free up time for healthcare professionals to focus on providing care to patients.\nOverall, AI has the potential to improve the quality and efficiency of healthcare delivery by providing more accurate diagnoses, personalized treatment plans, and predictive analytics. However, it is important to ensure that AI systems are designed and implemented in a way that respects patient privacy and security.", "answer_id": "TFh5bXFdG4fdK5hmq6qS6o", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 16, "text": ": Gene editing using CRISPR-Cas9 technology is a powerful tool that allows scientists to make precise changes to the DNA of living organisms. The CRISPR-Cas9 system consists of two components: a guide RNA and the Cas9 enzyme. The guide RNA is designed to bind to a specific DNA sequence, while the Cas9 enzyme cuts the DNA at the site where the guide RNA binds. This creates a double-stranded break in the DNA, which can be repaired in various ways, depending on the specific goals of the gene editing process.\nPotential applications of gene editing using CRISPR-Cas9 technology are many and varied. Some of the most promising areas include:\n1. Medical research: CRISPR-Cas9 can be used to study and treat genetic diseases by correcting mutations in the genome.\n2. Agriculture: CRISPR-Cas9 can be used to improve crop yields and make them more resistant to pests and diseases.\n3. Biotechnology: CRISPR-Cas9 can be used to modify the genes of organisms to make them more useful for industrial or commercial applications.\n4. Environmental conservation: CRISPR-Cas9 can be used to edit the genes of invasive species to make them less able to survive in their new environment.\n5. Gene therapy: CRISPR-Cas9 can be used to edit the genes of humans to treat or prevent genetic diseases.\nHowever, the use of CRISPR-Cas9 technology also raises a number of ethical concerns, including:\n1. The possibility of unintended consequences: There is a risk that the CRISPR-Cas9 system could cause unintended mutations or side effects.\n2. The possibility of editing the wrong genes: It is possible to make mistakes when targeting the specific DNA sequence to be edited.\n3. The possibility of editing non-target species: CRISPR-Cas9 could potentially edit the genes of organisms that were not the intended target.\n4. The possibility of creating \"designer babies\": There is a concern that some people might use CRISPR-Cas9 to edit the genes of their children for non-medical reasons.\n5. The possibility of creating an \"arms race\" of genetically modified organisms: Some have suggested that the use of CRISPR-Cas9 could lead to a future where countries or organizations compete to develop the most advanced genetically modified organisms.", "answer_id": "XDV7jFB36qKGzPXPcwvbQy", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 17, "text": ": Vaccines work by introducing a small, harmless piece of a virus or bacteria to the body, which triggers the immune system to produce antibodies to fight it. These antibodies remain in the body and provide protection against future infections with that same virus or bacteria.\nWhen a significant portion of a community is immune to a particular infectious disease through vaccination, it is referred to as herd immunity. This means that the virus or bacteria is unable to spread effectively within the community, as there are not enough susceptible individuals for the virus or bacteria to infect. Herd immunity provides protection not only for the individuals who are immune, but also for those who may not be able to receive vaccines due to age, health status, or other reasons.\nFor example, when a high proportion of children in a community are immune to measles through vaccination, the virus is less likely to spread and infect other children, even those who have not been vaccinated. This helps to prevent outbreaks of the disease and protects vulnerable members of the community, such as newborns and people with compromised immune systems.\nHerd immunity can be achieved through vaccination programs that aim to vaccinate a high proportion of the population, known as herd immunity thresholds. Different diseases have different herd immunity thresholds, and the specific threshold that needs to be reached will depend on the disease and the population being protected.\nIt is important to note that herd immunity is not achieved immediately after vaccination, as it can take several weeks for the immune system to develop sufficient antibodies to provide protection against infection.", "answer_id": "6E3YAfxqckwL83dVo6ZRP4", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 18, "text": ": Social media platforms have had a significant impact on the way people consume and share news, and this has both positive and negative implications. On the positive side, social media platforms have enabled people to access a wide range of news sources and information from around the world, and to connect with others who share similar interests and opinions. This has also made it easier for journalists and news organizations to reach a larger audience and to engage with their readers and viewers.\nHowever, social media platforms have also made it easier for people to spread misinformation and fake news, which can have negative implications for society. The algorithms used by social media platforms can often prioritize sensational or polarizing content, which can attract more attention and engagement. This can lead to the spread of misinformation and fake news, which can have serious consequences, such as the erosion of trust in institutions and the spread of misinformation that can influence political and social outcomes. The spread of misinformation on social media can also lead to the perpetuation of harmful stereotypes and the exacerbation of social and political divisions.\nTo address these issues, social media platforms have implemented various measures to combat misinformation, such as fact-checking tools, the removal of fake accounts and misleading content, and the use of machine learning algorithms to identify and flag misinformation. However, it is an ongoing challenge to ensure that these measures are effective and that the spread of misinformation is minimized.", "answer_id": "FjSXpLx6FfHU8zN9mb8ucX", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 19, "text": ": Cultural, social, and economic factors can have a significant impact on people's food choices. For example, cultural factors such as traditional diets, religious beliefs, and cultural events can influence food preferences and shape food-related behaviors. Social factors such as peer pressure, social norms, and the availability of food options can also influence what people choose to eat. Economic factors such as cost, access to food, and the availability of healthy food options can also play a role in food choices.\nKnowledge of these factors can be used to promote healthier diets by providing information and education on the factors that influence food choices and by making healthy food options more accessible and affordable. For example, promoting cultural events that feature traditional dishes with healthy ingredients, offering healthy food options in schools and workplaces, and providing education on the benefits of healthy eating and the risks of unhealthy diets can all help to promote healthier diets. Additionally, creating incentives for people to make healthier food choices, such as taxes on unhealthy foods, can also be effective in promoting healthier diets.", "answer_id": "HJczP2JdM4s6cxZyBWVyNZ", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 20, "text": ": Natural selection is the process by which traits that are advantageous for an organism in a given environment become more common over time, while traits that are disadvantageous become less common. This process of selection occurs as a result of the interaction between the organism and its environment.\nHere's a brief explanation of how natural selection leads to the evolution and adaptation of species:\n1. Variation: Different individuals in a population have different combinations of traits, and some of these traits may be more advantageous or disadvantageous for the individual in its environment.\n2. Reproduction: Organisms reproduce and pass on their traits to their offspring.\n3. Survival and Reproduction: Organisms with advantageous traits are more likely to survive and reproduce in their environment, passing on their traits to the next generation.\n4. Environmental Pressure: Over time, the environment exerts pressure on the population, selecting for the individuals with the most advantageous traits and against those with disadvantageous traits.\n5. Genetic Drift: Changes in the environment or the movement of individuals can cause genetic variation within a population, leading to changes in the frequency of traits.\n6. Adaptation: As a population adapts to its environment, new traits can evolve, allowing the species to better survive and thrive in its habitat.\nOverall, natural selection is the driving force behind the evolution and adaptation of species to their environments. It is a key concept in the study of biology and helps us understand how organisms have evolved over time to become the diverse and complex creatures that we see today.", "answer_id": "X2EFE34dc5vtFSTGr3n2Bg", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 21, "text": ": As a medieval knight, I would introduce myself to the king and queen at a royal banquet with a deep bow and a respectful greeting, such as \"Your Majesty, it is an honor to be in your presence at this banquet.\" I would then offer my hand to shake, if that were a custom in the court. I would also be mindful of my dress and appearance, making sure that I am well-groomed and properly attired, according to the codes of chivalry and honor of my time.", "answer_id": "iJjKWygtpHaLJirgK2PACK", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 22, "text": ": As a pirate captain, I would say the following to my crew to motivate them to search for hidden treasure:\n\"Comrades, we have been sailing the seas for many months, and we have not found a single treasure. It's time to change that! We will search for the greatest treasure of all time, the treasure that will make us rich beyond our wildest dreams. We will find the hidden treasure, and our fortunes will be changed forever. We will be the most famous pirates in the history of the seven seas. So, hoist the Jolly Roger high, and let's set sail for the next adventure! Ahoy, mateys!\"", "answer_id": "FRZfkefyPRGCSpLEexQoRH", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 23, "text": ": If I were a Shakespearean character, I would declare my love for someone in a soliloquy with words as follows:\n\"Oh, fair maiden, with thy beauty so bright and so refined,\nThou hast captured my heart, and with thy love, I am in a bind.\nIn thy eyes, I see a spark, that burns like a flame,\nAnd in thy embrace, I find the peace I crave.\n\nThou art my sun, my moon, my stars, my sky,\nAnd with thee, my heart finds a new and a higher fly.\nThou art my life, my love, my all, my one,\nAnd with thee, I shall never be alone.\n\nSo let us bind our hearts together, with a love so pure,\nAnd let us vow, to love each other, through all our days.\nFor in thy love, I find my strength, my hope, my might,\nAnd in thy embrace, I find my true light.\"", "answer_id": "HBah6W9KuR8eNpRQJUxVvd", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 24, "text": ": As a superhero, my origin story began with a scientific experiment gone wrong. I was created in a laboratory as a test subject, but due to a malfunction in the equipment, I was given incredible abilities.\nI quickly discovered that I had the power to harness the energy of the sun and control it, which I used to defend the city from various threats. I also developed other powers such as flight, super strength, and the ability to shoot powerful rays from my eyes.\nBut being a superhero is not just about having powers, it's also about using them for good. I have dedicated my life to protecting the innocent, fighting for justice, and defending the helpless. I may be a superhero, but I am also a hero in every sense of the word. And I will continue to use my powers for good and to make the world a better place.", "answer_id": "3Rgw9vMLyMiwazfdjhWcgT", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 25, "text": ": As a time traveler from the year 3000, I would tell people about the technological advancements that have been made since my time. Some of the most significant advancements that I would highlight include:\n1. Artificial intelligence: AI has come a long way since my time, and it is now capable of performing a wide range of tasks, from language translation to complex scientific simulations.\n2. Nanotechnology: Nanotechnology has revolutionized many fields, from medicine to materials science, and has led to the development of new products and applications.\n3. Quantum computing: Quantum computing has the potential to solve problems that are beyond the capabilities of classical computers, and has the potential to transform fields such as cryptography, simulation, and optimization.\n4. Space exploration: Space exploration has expanded greatly since my time, with many new missions, discoveries, and even the establishment of permanent settlements on other planets and moons.\n5. Biotechnology: Biotechnology has led to many breakthroughs in healthcare, including new treatments, therapies, and even the ability to edit genes.\n6. Energy technology: Energy technology has advanced significantly, with the development of renewable energy sources such as solar, wind, and hydro, and the widespread adoption of electric vehicles.\n7. Communication technology: Communication technology has evolved greatly since my time, with the widespread adoption of the internet, social media, and other technologies that have transformed the way we communicate and exchange information.\n8. Robotics: Robotics has become more advanced, with robots becoming more autonomous, versatile, and capable of performing tasks that were once considered impossible.\n9. Virtual and augmented reality: Virtual and augmented reality technology has become more advanced, and has the potential to revolutionize fields such as education, entertainment, and healthcare.\n10. Cybersecurity: Cybersecurity has become a major concern since my time, with the widespread adoption of the internet and the increasing threats of cyber attacks.", "answer_id": "b4oghpgzhWMdoryzQrSwuF", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 26, "text": ": Ah, what a thrilling finish to the championship game! With the score tied at the end of regulation, both teams were determined to come out on top. The crowd was on their feet as the teams battled back and forth, neither willing to give an inch.\nWith just seconds remaining, the ball was in the hands of the opposing team's star player. He dribbled around his defender and headed towards the basket. The crowd held its breath as he faked a shot, and then passed the ball to his teammate, who took a shot from the three-point line. The ball swished through the net, and the crowd erupted in cheers as the team was declared the champion! It was an incredible display of skill, determination, and teamwork, and a moment that will be remembered for years to come.\nThe stadium was full of excitement, as the players were giving everything they had left, and the tension was palpable. With the clock ticking down, the team had to make a quick decision, and they executed it perfectly, the crowd went wild with joy, and the team celebrated the hard-earned victory. This was a game that will be remembered for years to come.", "answer_id": "GzzDrjBAe3BnXWgWrATxJL", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 27, "text": ": As a world-famous chef, I would describe my signature dish to a panel of judges as a fusion of flavors that combines the best of both traditional and modern cooking techniques. My dish is made with locally-sourced, seasonal ingredients that are carefully selected for their unique flavors and textures. The dish is both visually stunning and delicious, with a perfect balance of sweet, sour, salty, and umami flavors. The ingredients are expertly prepared and presented in a way that highlights their natural beauty and flavors. Every bite is a harmonious combination of flavors, textures, and presentation that leaves the diner with a lasting impression of culinary excellence.", "answer_id": "f7hUYhajUbXNs3gQrG9z3b", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 28, "text": ": As a language model, I don't have emotions, but I can provide a description of what a person might experience and see while reaching the summit of Mount Everest. Reaching the summit of Mount Everest is a significant achievement and an experience that very few people can claim to have.\nFrom the top of Mount Everest, you would have a breathtaking view of the surrounding peaks and the landscape below. You would see the entire mountain range, including the South Col, the Hillary Step, and the Khumbu Glacier. To the north, you would see the top of the world, with a clear view of the sky. To the south, you would see the peaks of the Nepalese Himalayas stretching as far as the eye can see. The view is awe-inspiring and can be quite overwhelming.\nAt the summit, you would also feel a sense of accomplishment and pride, knowing that you have reached the highest point on Earth. You would be able to take in the beauty of the mountain and the surrounding landscape. However, you would also be aware of the dangers of the climb and the potential risks to your safety, such as extreme weather conditions and the dangers of altitude. You would be grateful for the opportunity to have reached this point, but also mindful of the journey ahead and the challenges that still lie ahead.", "answer_id": "LpUrrJuQ4cA6LtNRmiTfRv", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 29, "text": ": As a space colonist on Mars, my daily life would likely revolve around survival and adaptation to the harsh Martian environment. I would face a number of challenges, including:\n1. Extreme temperature fluctuations: Mars has extremely cold temperatures at night and during the winter, and temperatures can soar above 100 degrees Fahrenheit during the day.\n2. Radiation exposure: The Martian atmosphere provides little protection against harmful solar radiation, which can pose a health risk.\n3. Lack of oxygen: Mars has a much thinner atmosphere than Earth, which means that there is less oxygen available for human respiration.\n4. Water scarcity: Water is essential for life on Mars, but it is scarce on the planet, and finding new sources of water would be a challenge.\n5. Psychological challenges: Living on another planet can be mentally challenging, as it can lead to feelings of isolation, homesickness, and depression.\n6. Physical challenges: As a space colonist, I would have to be physically fit and able to handle the rigors of living in a harsh environment, including working with limited resources and dealing with unexpected emergencies.\nI would spend most of my time working to establish a sustainable and self-sufficient community on Mars, and finding ways to overcome these challenges would be a major part of my daily life. I would have to be adaptable and resourceful, and be able to work collaboratively with others to ensure the survival and well-being of our community.", "answer_id": "Yiup49xrP6jf9nsEuGHgdN", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 30, "text": ": As a post-apocalyptic character, I would have to rely on my skills, knowledge, and resources to survive in a world that has been ravaged by some catastrophic event. I would have to be adaptable, resourceful, and able to think on my feet to navigate the challenges of this new world.\nOne of my key allies in this world would be other survivors who I have formed close relationships with. We would band together to share resources, protect each other, and work together to find food, shelter, and other necessities. We would also have to be mindful of potential threats from other survivors who may not have the same sense of community and cooperation.\nAnother ally I might encounter would be the natural world, which has been altered by the disaster and now presents both opportunities and dangers. For example, I could use the resources of the wilderness to find food, water, and medicine, but I would also have to be aware of the potential hazards and challenges posed by the new environment.\nI would also rely on my own skills and knowledge to survive, such as my ability to hunt and gather food, my knowledge of herbal medicine, and my ability to repair and maintain equipment.\nOverall, my goal in this post-apocalyptic world would be to stay alive, find a sense of community, and work towards rebuilding a better future for myself and others.", "answer_id": "7cWm5Kop6bLzwLgJjpAVrK", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 31, "text": ": There are a few indicators that can help you determine if a restaurant is popular among locals or mainly attracts tourists. Here are a few things to look for:\n1. Local clientele: If a restaurant is attracting a primarily local crowd, it's likely that it's popular among the local community. On the other hand, if the restaurant is filled with tourists, it's likely that it's primarily catering to visitors.\n2. Language: If the staff and patrons at a restaurant are speaking the local language, it's likely that they are local. If, however, the staff is primarily speaking in a foreign language, it's likely that the restaurant is catering to tourists.\n3. Menus: A restaurant that is popular among locals is likely to have a menu that reflects the local cuisine and culture. If the menu is more diverse and includes dishes from different parts of the world, it's likely that the restaurant is catering to tourists.\n4. Price: If a restaurant is attracting a primarily local crowd, it's likely that the prices will be more affordable for the local community. If the prices are higher than you would expect, it's likely that the restaurant is catering to tourists.\n5. Reviews: Check online reviews to see if the restaurant has a good reputation among locals. If there are many positive reviews from locals, it's likely that the restaurant is popular among the local community.\nThis information can be useful because, if you are a tourist, you may want to seek out restaurants that are popular among locals, as these are likely to offer a more authentic and local experience. Conversely, if you are a local, you may want to avoid restaurants that primarily cater to tourists, as these may be more expensive and less representative of the local culture.", "answer_id": "YaUHhigGUvgv82Js3ktFgs", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 32, "text": ": There are several subtle clues that suggest someone is pretending to understand a topic or conversation when they are actually confused or uninformed, such as:\n1. Using big words or jargon: Someone who is pretending to understand a topic may use technical or jargon terms to impress others, even if they don't fully understand what those terms mean.\n2. Dominating the conversation: Someone who is uncomfortable with the topic or unsure of themselves may try to dominate the conversation by talking more than others, in an attempt to seem knowledgeable.\n3. Making broad statements: Someone who is uninformed may make broad, sweeping statements about a topic without having a deep understanding of it.\n4. Using filler words: Someone who is unsure of themselves or their knowledge may use filler words like \"um\" or \"you know\" frequently while speaking.\n5. Avoiding eye contact: Someone who is pretending to understand a topic may avoid making eye contact with others during a conversation, as they feel uncomfortable or uncertain.\n6. Being unprepared: If someone is pretending to understand a topic, they may not be prepared for the conversation or bring up unrelated topics.\n7. Inconsistent language: If someone is pretending to understand a topic, their language may be inconsistent or contradictory, as they try to reconcile their actual lack of knowledge with their desire to seem knowledgeable.\n\nIt's important to note that some people might use these tactics unintentionally, so it is always a good idea to approach conversations with an open mind and to clarify any points that are unclear.", "answer_id": "LaHQYWhmXF7mnPSVFdhCeq", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 33, "text": ": There are several reasons why someone might choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app. Here are a few possibilities:\n1. Privacy concerns: Some people may prefer to use a paper map or ask for directions instead of relying on a GPS device or smartphone app because they are concerned about the privacy implications of using these technologies. They may be worried about the data that is being collected about their location and activities.\n2. Detailed information: Paper maps and atlases often provide more detailed information about an area than a GPS device or smartphone app, and they may be better suited for planning a route that involves navigating through unfamiliar territory.\n3. Offline functionality: GPS devices and smartphone apps require an internet connection to function, while paper maps and atlases can be used offline, which can be useful in areas with poor or no internet connectivity.\n4. Human interaction: Asking for directions or using a paper map can provide an opportunity to interact with other people and learn more about an area from the perspective of locals. This can be a more social and engaging experience than relying solely on a technology device.\n5. Battery life: GPS devices and smartphone apps can drain their batteries quickly, especially if they are used extensively. Paper maps and atlases do not require any batteries, so they can be used indefinitely without worrying about running out of power.", "answer_id": "Br2uFCYmRUaQULwKzpffz9", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 34, "text": ": It can be difficult to determine if a person is genuinely interested in a conversation or simply being polite, as people can be complex and may have different motivations for engaging in conversation. However, there are some clues that can suggest a person's level of interest and engagement in the conversation:\n\n1. Active listening: A person who is genuinely interested in the conversation will typically demonstrate active listening, such as by making eye contact, nodding, and asking follow-up questions.\n2. Engagement: A person who is genuinely interested in the conversation will typically show engagement by asking questions, sharing their own thoughts and experiences, and contributing to the conversation in a meaningful way.\n3. Inclusivity: A person who is genuinely interested in the conversation will typically try to include others in the conversation and may ask for input from others.\n4. Respect and empathy: A person who is genuinely interested in the conversation will typically show respect and empathy for the other person's thoughts and feelings.\n5. Authenticity: A person who is genuinely interested in the conversation will typically be authentic and genuine in their engagement, rather than simply being polite or following a script.\n\nOf course, these are not definitive indicators of a person's level of interest in the conversation, and it's possible for a person to be polite and genuinely interested in a conversation at the same time. Ultimately, the best way to determine a person's level of interest in a conversation is to engage with them and observe their behavior over time.", "answer_id": "FCRqJu6DgRvCNq4Z2NneHf", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 35, "text": ": There are several reasons why someone might prefer to shop at a small, locally-owned business instead of a large chain store, even if the prices are higher. Here are a few possible reasons:\n1. Personal connection: Small business owners often have a personal connection with their customers, and the customers feel more connected to the business and its products.\n2. Local involvement: Small businesses often support the local community and economy, and by shopping at a local business, consumers can feel like they are supporting their community.\n3. Personalized service: Small businesses often provide more personalized service and attention to their customers than larger chain stores.\n4. Quality of products: Small businesses may offer products that are made with higher-quality materials or are produced using more traditional methods, which some customers prefer.\n5. Unique products: Small businesses may offer unique products that cannot be found at larger chain stores, which some customers enjoy discovering.\n6. Sense of place: Shopping at a small, locally-owned business can be seen as a way to connect with the local culture, and contribute to the local identity.\n7. Sustainability: Consumers may prefer to shop at small businesses because they are often more environmentally sustainable and have a smaller carbon footprint than large chain stores.\n8. Experience: Small businesses can offer a more unique and interactive shopping experience, which some customers enjoy.\nOf course, these are not the only reasons why someone might prefer to shop at a small, locally-owned business, and the reasons can vary depending on the individual and the context.", "answer_id": "Fy5Nw8LcWpdq2GokTbiwuq", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 36, "text": ": There are several ways to assess the credibility of a source of information, such as a news article or blog post, without relying solely on the reputation of the author or publisher. Here are some tips:\n1. Check the author's credentials: Look for information about the author's qualifications, experience, and expertise in the field. Check their education, professional affiliations, and any relevant certifications.\n2. Look for secondary sources: Look for other sources that cite or reference the original article. Check the credibility of these sources. If multiple reputable sources cite or reference the original article, it can add credibility.\n3. Check the publication's reputation: Look for information about the publication's history, its reputation, and its target audience. Check for any awards, accreditation, or recognition it has received.\n4. Look for fact-checking and corrections: Look for any corrections, updates, or retractions on the publication's website or social media page. A publication that is willing to correct errors shows a commitment to accuracy.\n5. Consider the purpose and bias of the publication: Consider the publication's purpose and any potential biases. Are they reporting news or opinion? Does the publication have a political or ideological bias?\n6. Check the date and relevance of the information: Check the date of the article to ensure that it is still relevant and up-to-date. Older information may not be as useful or accurate.\n7. Use alternative sources: Compare and contrast information from multiple sources, including those with different perspectives or viewpoints. This can help you to get a more complete and balanced understanding of the topic.", "answer_id": "hKhcnEtSjzysU7sbtE3JeH", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 37, "text": ": People have different preferences for stimulating experiences, and some people may enjoy feeling scared or anxious because it can provide a sense of thrill or excitement. For others, the sensation of being scared may be unpleasant and they may avoid it.\n\nThere are several factors that can influence a person's enjoyment of being scared, including their individual personality traits, past experiences, and coping mechanisms. For example, some people may have a higher tendency to seek out novel and intense experiences, while others may be more risk-averse. Additionally, a person's past experiences and emotional state can also impact their response to scary stimuli.\n\nIt's also worth noting that some people may enjoy feeling scared in a controlled and safe environment, such as during a horror movie or roller coaster ride, while others may find these experiences unpleasant. The sensation of being scared can also release endorphins, which are natural chemicals in the brain that act as painkillers and can produce a sense of euphoria.\n\nUltimately, the enjoyment of being scared is a complex and individualized experience that can be influenced by a variety of factors.", "answer_id": "cAVZTw5QY8WUnJEd3rUu3p", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 38, "text": ": Observing the behavior of others in a social situation can provide clues about cultural norms and expectations because different cultures have different norms and expectations governing behavior in various social situations. For example, certain behaviors may be considered rude or inappropriate in one culture, but they may be perfectly acceptable in another.\n\nHere are some ways observing the behavior of others in a social situation can provide clues about cultural norms and expectations:\n\n1. Body language: Certain body language cues, such as eye contact, posture, and gestures, can vary depending on the culture. For example, in some cultures, direct eye contact is seen as a sign of respect, while in others, it may be seen as confrontational.\n2. Gender roles: Different cultures have different norms and expectations regarding gender roles, and these can be observed in social situations. For example, in some cultures, women are expected to be submissive and deferential to men, while in others, men are expected to be the providers and women are expected to be equals.\n3. Interpersonal distance: The distance between people in a social situation can be an indicator of cultural norms and expectations. For example, in some cultures, people keep a greater distance between themselves and others, while in others, people stand closer together.\n4. Table manners: Table manners can provide clues about cultural norms and expectations. For example, in some cultures, it is considered rude to start eating before the host, while in others, it is considered rude to not start eating immediately.\n5. Gift-giving: The practice of gift-giving can provide clues about cultural norms and expectations. For example, in some cultures, gift-giving is a significant part of social interactions, while in others, it is seen as unnecessary or even inappropriate.\n\nBy observing the behavior of others in a social situation, one can gain insights into the cultural norms and expectations that govern that behavior.", "answer_id": "9JxYGUzSa2rR68BTVuZJEA", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 39, "text": ": The question of whether we have a moral obligation to explore space or should focus on solving Earth's problems first is a complex and subjective one that has been debated for many years.\nOn one hand, advocates of space exploration argue that it can bring many benefits, such as the discovery of new resources, scientific knowledge, and technological advancements that can be applied to solving Earth's problems. They also argue that exploring space can inspire and motivate people to work towards common goals, and that it can foster a sense of unity and cooperation among nations.\nOn the other hand, those who advocate for focusing on solving Earth's problems first argue that there are many pressing issues here, such as poverty, inequality, climate change, and disease, that require urgent attention. They argue that space exploration is a luxury that we cannot afford, and that we should use our resources to address these more immediate challenges.\nFrom a moral perspective, both arguments have some validity. It is true that there are many pressing problems on Earth that need to be addressed, and it is important to prioritize them. However, it is also important to consider the potential benefits of space exploration, such as the discovery of new resources, scientific knowledge, and technological advancements that can be applied to solving these problems. Ultimately, the decision of whether to prioritize space exploration or Earth's problems will depend on individual values and priorities, and will likely be influenced by a range of factors, including political, economic, and cultural considerations.", "answer_id": "5b3qKj2jtzgcZX7SSipQBW", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 40, "text": ": Both job creation and technological progress are important in a world where automation is increasingly prevalent. While it is true that automation can replace some jobs, it can also create new jobs, particularly in the fields of engineering, data science, and software development. Additionally, automation can lead to increased efficiency and productivity, which can benefit society as a whole by reducing costs and improving the standard of living.\nHowever, it is also important to recognize that technological progress should not come at the expense of workers and their well-being. Therefore, it is crucial to prioritize job creation and ensure that workers are equipped with the necessary skills to adapt to new technologies and continue to contribute to society. A balanced approach that integrates both job creation and technological progress is necessary to ensure a sustainable future.", "answer_id": "hG6i3Q8F7bA7u73vo6t2XZ", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 41, "text": ": The average human blinks about 6.25 million times in their lifetime. Here's how I arrived at that number:\nFirst, I calculated the number of seconds in a human lifetime by dividing the average lifespan (which is about 70-80 years for most people) by 365.25 (the number of days in a year) to get 20,560,000 seconds.\nNext, I multiplied the number of seconds by the average number of blinks per day (which is about 11,000) to get the total number of blinks in a lifetime: 20,560,000 seconds x 11,000 blinks/day = 231,600,000 blinks.\nThen, I divided that number by the average length of a blink (about 1/4 of a second) to get the total number of blinks in a lifetime: 231,600,000 blinks \u00f7 0.25 seconds/blink = 929,600,000 blinks.\nSince there are 60 seconds in a minute and 60 minutes in an hour, I multiplied the number of blinks by the number of minutes and hours in a lifetime to get the final answer: 929,600,000 blinks \u00f7 (60 seconds/minute x 60 minutes/hour x 24 hours/day x 365.25 days/year) = 6,247,576,000 blinks per year.\nFinally, I divided that number by the number of days in a year (365) to get the average number of blinks per day: 6,247,576,000 blinks/year \u00f7 365 days/year = 17,441 blinks/day.\nSince there are about 11,000 blinks per day, the average human blinks about 174,413 times per day. To calculate the number of blinks in a lifetime, I divided that number by the number of seconds in a day (86,400 seconds) to get 20,513.25 days, and then multiplied that number by the number of seconds in a lifetime (20,560,000 seconds): 20,513.25 days x 20,560,000 seconds = 412,086,000,000 seconds, which is equal to 6,251,926.8 hours, or 250.058 days, or 6.25 million blinks in a lifetime.", "answer_id": "KaZUZ4NAnQ3SY6mq2QY3Kt", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 42, "text": ": A grain of salt is a small amount of salt, typically used as a seasoning in food. The amount of salt in a grain of salt can vary depending on the type of salt and the intended use. However, a typical grain of table salt contains about 2,300 milligrams of salt, which is equivalent to 0.008 ounces or 0.021 pounds of salt.\nTo explain this, let's break down the calculation step by step:\n1. A grain of salt is typically defined as 1/8 of a teaspoon or about 1/72 of a pound.\n2. 1/72 of a pound is equal to approximately 0.008375 pounds.\n3. 1/8 of a teaspoon is equal to approximately 0.0125 teaspoons.\n4. 1/0.0125 teaspoons is equal to approximately 8.333333334 grains (since there are 125 grains in a teaspoon and 75 grains in an ounce, and 1/8 of an ounce is equal to 0.0125 of a teaspoon).\n5. 8.333333334 grains is equal to approximately 0.51170819 grains per grain of salt.\n6. Therefore, a grain of table salt contains approximately 0.008 ounces or 0.021 pounds of salt.\nThis calculation shows that there are approximately 2,300 milligrams (mg) of salt in a grain of salt, assuming the salt is of the typical type used in table salt. The number of atoms in a grain of salt is difficult to determine precisely, but it is safe to say that it contains a large number of atoms of the various elements that make up salt, such as sodium and chlorine. However, the exact number of atoms in a grain of salt is not as important as the fact that it is a small but significant amount of salt that can add flavor and nutritional value to food.", "answer_id": "Jwrdb34BnrswtwbeSd6xxP", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 43, "text": ": It is difficult to provide an exact number of lightning strikes that occur on Earth each day, as this number can vary depending on factors such as location, time of day, and weather conditions. However, it is estimated that there are around 100-160 lightning strikes per second, or around 4,800-7,200 per minute, and around 1.6 million per day.\nThe number of lightning strikes per day is determined by taking into account the number of lightning flashes that occur in a given area, the duration of each flash, and the total time that the lightning detection equipment is in operation. This is typically done by using a lightning detection network, which consists of sensors that are placed at various locations around the world. These sensors detect lightning flashes and send this information to a central data center, where it is analyzed to determine the number of lightning strikes that have occurred in a given area.\nThe number of lightning strikes per day can also be affected by factors such as the time of year, the location, and the weather conditions. For example, during the summer months, there are typically more lightning strikes per day than during the winter months, as the temperature and humidity are generally higher. Additionally, lightning strikes tend to be more common near bodies of water and in areas with a high concentration of clouds, as these are locations where there is a higher potential for electrical charge to build up and for the air to become ionized.\nIt is important to note that although lightning is a powerful and potentially dangerous force of nature, it is also a crucial part of the Earth's ecosystem. Lightning helps to break down pollutants in the atmosphere, and it plays a key role in the formation of clouds and the water cycle.", "answer_id": "Ubz2V2EhkWmNYPEErdX8gn", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 44, "text": ": In the movie \"Up,\" the house is lifted by thousands of helium balloons. However, the exact number of balloons is not specified in the movie. To estimate the number of balloons needed, we can make some assumptions and use some basic physics.\nFirst, we need to calculate the volume of the house. The house is not a cube, but it is a rectangular prism, so we can estimate its volume by multiplying its length, width, and height. Let's say the house is 10 meters long, 5 meters wide, and 3 meters high. The volume of the house would be:\n10 x 5 x 3 = 300 cubic meters\nNext, we need to calculate the buoyancy of the house. The buoyancy force is equal to the weight of the air displaced by the object being lifted. The weight of air is approximately 1.2 kg per cubic meter. Therefore, the buoyancy force of the house would be:\n300 x 1.2 = 360 kg\nFinally, we need to calculate the total lifting force needed to lift the house. The total lifting force would be the buoyancy force multiplied by the number of balloons needed to lift the house. Since the house is not being lifted by the balloons alone, but also by the balloons' lifting force, we need to divide the buoyancy force by the density of air to get the lifting force. The density of air is approximately 1.2 kg/m^3. Therefore, the lifting force of the balloons would be:\n360 x 1.2 / 1.2 = 300 kg\nSince the house is not being lifted by the balloons alone, we need to divide the lifting force by the surface area of the balloons. We can estimate the surface area of a balloon by multiplying its volume by its density. Let's say the density of the balloon is 0.001 kg/m^3. The volume of a balloon is much smaller than the volume of the house, so we can use the volume of a sphere with the same volume as a balloon. The volume of a sphere with a radius of 0.01 meters is:\n0.01 x 0.01 x 0.01 = 1.0 x 10^-8 cubic meters\nThe surface area of a sphere is 4/3 x pi x r^2, where r is the radius. The radius of the balloon is 0.01 meters, so the surface area of a balloon is:\n4/3 x pi x 0.01^2 = 0.000224 m^2\nFinally, we need to divide the lifting force by the surface area of the balloons. The total number of balloons needed to lift the house would be:\n300 x 300 / 0.000224 = 1.4 x 10^14 balloons\nIn conclusion, it is impossible to give an exact number of balloons needed to lift a house like in the movie \"Up\" because it depends on the size and shape of the house and the density of the air used to inflate the balloons. However, we can estimate that it would take around 1.4 x 10^14 balloons to lift a house of the size and shape described in the movie.", "answer_id": "CufihrnQT3GRic3Vr2rfU2", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 45, "text": ": It is difficult to determine the exact number of text messages sent globally in a minute, as there are many different mobile network providers and countries in the world. However, it is estimated that over 90 billion text messages are sent worldwide every day, which translates to over 30 trillion messages sent every year.\nTo explain this, we can break it down into a few key factors:\n1. Mobile phone usage: With the widespread use of mobile phones, it is safe to assume that the majority of people have access to the technology to send text messages.\n2. Affordability: Text messaging is generally inexpensive, if not free, which makes it accessible to a large number of people.\n3. Convenience: Text messaging is a quick and easy way to communicate, especially in situations where a phone call may not be practical or convenient.\n4. Global reach: With mobile networks covering most parts of the world, text messages can be sent from almost anywhere to anywhere else.\n5. Technological advancements: The development of faster and more reliable mobile networks has allowed for an increase in the number of text messages sent.\nAll of these factors contribute to the high number of text messages sent worldwide every day, and it is likely that this number will continue to increase in the future.", "answer_id": "fBBFkmgfGSiPtQtBdsVxpZ", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 46, "text": ": It is difficult to estimate exactly how many words are spoken daily on Earth, as the number can vary greatly depending on factors such as language, location, and the method of counting. However, I can provide some estimates and information to give you an idea of how to approach this question.\nFirst, the number of languages spoken on Earth is estimated to be around 7,117, according to Ethnologue, a comprehensive catalog of the world's languages. However, not all of these languages are spoken equally, and some languages have many dialects.\nNext, it is estimated that there are around 7.5 billion people on Earth, and while not all of them speak, the number of people who speak a language is generally proportional to the number of speakers of that language.\nFinally, it is estimated that the average number of words spoken by a person per day is around 16,000. However, this number can vary greatly, depending on the language, location, and individual. For example, some people may speak more words due to their occupation or interests, while others may speak fewer words due to their education or cultural background.\nBased on these estimates, we can make some rough calculations to estimate the number of words spoken daily on Earth. Assuming that each person speaks an average of 16,000 words per day, and there are 7.5 billion people on Earth, then the daily total would be 1.125 trillion words. However, this number may be too high, as it does not take into account the number of languages spoken or the number of people who speak a language.\nTherefore, it is difficult to give a precise estimate of the number of words spoken daily on Earth, as it can vary greatly depending on many factors.", "answer_id": "eQmVTyJQmoBKrZ9J38EXBC", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 47, "text": ": It is difficult to accurately estimate the number of snowflakes that fall during a typical winter because it depends on the specific location, weather conditions, and other factors. However, I can provide some information about the process of snowfall and the factors that affect the number of snowflakes that fall.\nSnowflakes form when water vapor in the air condenses into ice crystals. This process occurs when the air is cold enough and the humidity is high enough, typically at temperatures below freezing (0 degrees Celsius or 32 degrees Fahrenheit). The water vapor, which is present in the air as a result of evaporation from bodies of water, such as lakes and oceans, mixes with the cold air and undergoes deposition, forming ice crystals.\nThe size and shape of the snowflakes that form are determined by the temperature and humidity of the air, as well as the presence of other particles in the air, such as dust and soot. The most common shape of a snowflake is a hexagonal prism, but other shapes, such as stars and columns, can also form.\nThe number of snowflakes that fall during a typical winter depends on a number of factors, including the amount of water vapor in the air, the temperature of the air, and the presence of other particles that can affect the formation of snowflakes. In general, the colder the temperature and the higher the humidity, the more snow will fall. However, the number of snowflakes that fall also depends on the availability of particles in the air that can act as nucleation sites for the formation of ice crystals.\nIt is difficult to provide an accurate estimate for the number of snowflakes that fall during a typical winter, as the number can vary greatly depending on the specific location, weather conditions, and other factors. In general, it is estimated that between 10 and 20 snowflakes fall per square meter per hour during a typical winter. However, this number can vary greatly depending on the specific location and weather conditions.\nIn conclusion, the number of snowflakes that fall during a typical winter is difficult to estimate, and it depends on a number of factors, including the temperature and humidity of the air, the presence of other particles, and the specific location.", "answer_id": "4JYtxzTN9oUTkVrFy7CDe6", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 48, "text": ": It is impossible to determine the exact number of pages in all the books ever written, as it would require a complete and up-to-date catalog of every book ever published, which does not exist. Furthermore, the number of pages in a book can vary greatly depending on its genre, size, and format.\nHowever, it is estimated that there are around 170,000 books published each year in the United States alone. Assuming an average length of 250 pages per book, that would mean approximately 43.5 million pages in total. But, this number can be misleading because some books are much longer or shorter than the average. Also, many books are published in multiple languages and in different formats such as audiobooks, e-books, and others.\nIt is also worth noting that many books are written and published multiple times, with different editions and print runs. For example, a bestseller might be re-released with updated content or a new cover. In addition, many books are also available as e-books, which can be read on a variety of devices.\nSo, it is difficult to give an exact number of pages in all the books ever written. The best estimate is that there are around 170,000 books published each year, with an average length of 250 pages, which would mean around 43.5 million pages in total, but this number is not definitive.", "answer_id": "M7KhQtwQrueKKwuemeJHUb", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 49, "text": ": The Earth has orbited the Sun approximately 200,000 times since the beginning of life on Earth, as estimated by scientists.\nThis calculation is based on several factors, including the Earth's average distance from the Sun (149.6 million kilometers), the length of a solar orbit (1.536 million kilometers), and the time it takes for the Earth to complete one orbit (365.24 days).\nSo, to calculate the number of times the Earth has orbited the Sun since the beginning of life, we first need to calculate the Earth's distance from the Sun at the beginning of life.\nThis is estimated to be around 1.75 billion kilometers, based on the assumption that the Earth's distance from the Sun is proportional to its distance from the Sun at the time of the formation of the Solar System.\nThen, we can divide the total number of days in the Earth's orbit (365.24 days) by the Earth's distance from the Sun (1.75 billion kilometers) to get an estimate of the number of orbits the Earth has completed since the beginning of life.\nThis calculation is approximately 200,000 orbits.\nIt's worth noting that this is an estimate and that the actual number of orbits could be slightly different due to various factors such as variations in the Earth's distance from the Sun over time and the assumption that the Earth's distance from the Sun is proportional to its distance from the Sun at the time of the formation of the Solar System.", "answer_id": "AuuvYW4edaZy5wpL5QyppA", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 50, "text": ": It is difficult to determine the exact number of songs that have been recorded throughout history, as new songs are being created constantly and many older songs have been lost or forgotten over time. However, it is estimated that there are tens of thousands of songs in existence today, with hundreds of thousands more recorded throughout history.\nTo try to explain this, we can consider the fact that music has been a part of human culture for thousands of years, with people all over the world creating and passing down musical traditions. Over time, these traditions have evolved and been influenced by different cultures, styles, and technologies, leading to the creation of an enormous variety of different songs.\nAdditionally, the widespread availability of recording technology in the 20th century meant that many more people had the opportunity to create and record music than ever before. This led to an explosion of new songs in all genres and styles, as well as the creation of new technologies and techniques for producing and distributing music.\nFinally, it is important to consider that many songs are not just recorded once, but are re-recorded and re-released over time, as well as being covered by other artists and used in different media. This means that the total number of unique songs is likely to be much higher than the number of songs that have been recorded just once.\nOverall, the number of songs recorded throughout history is likely to be in the tens of thousands, with many more waiting to be discovered and recorded.", "answer_id": "TJpkA2j3kC9akcHtmVQKUF", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 51, "text": ": It's difficult to say exactly how the Internet would have developed if it had been invented during the Renaissance period, as it would have likely been used for different purposes and in different ways than it is today. However, it's likely that the Internet would have been used primarily for communication and information sharing among a small group of people, rather than the global network it is today.\nDuring the Renaissance, communication and information sharing were important, but the technology available at the time was limited. The printing press had just been invented, and it revolutionized the spread of information, but it was still a slow and labor-intensive process. Mail service was also slow and unreliable, so people relied on word of mouth and local gatherings to share news and ideas.\nIf the Internet had been invented during the Renaissance, it would have likely been used to facilitate communication and information sharing among the ruling classes, and possibly among scholars and scientists as well. It might have been used for things like sharing news and current events, exchanging letters and documents, and accessing information on a wide range of topics. However, it would not have the same level of interactivity and global reach that it has today.", "answer_id": "9FJsZWNGMNuGKGF4YjQoRM", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 52, "text": ": It is impossible to say for certain what would have happened if the Aztecs had successfully repelled the Spanish conquistadors, as there were many historical factors at play that contributed to their eventual defeat. However, it is possible to speculate about some of the potential outcomes.\nOne possibility is that the Aztecs, under the leadership of Montezuma, could have continued to thrive and expand their empire, maintaining their independence and cultural traditions. However, it is also possible that the Spanish would have eventually found a way to defeat the Aztecs, either through military might or by introducing diseases that the Aztecs had not yet encountered, which would have devastated their population.\nWithout the Spanish conquest, it is possible that the Aztec civilization would have continued to flourish and evolve, but it is also possible that they would have been exposed to new ideas and technologies that would have fundamentally changed their way of life. For example, the Spanish brought with them European ideas about religion and government, which would have had a significant impact on Aztec society.\nUltimately, it is impossible to know for certain what would have happened if the Aztecs had successfully repelled the Spanish conquistadors, as so much of their fate was determined by historical factors beyond their control. However, it is clear that the Spanish conquest had a profound impact on the Aztec civilization and their way of life, and that the legacy of that conquest continues to be felt today.", "answer_id": "4vN4PKLUJkaYTEEVzYE3DN", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 53, "text": ": The Black Death, also known as the Plague, was a pandemic that devastated Europe in the 14th century. It was caused by the bacterium Yersinia pestis and was transmitted through the bites of infected fleas that lived on rats. The disease killed an estimated 25-30% of the population of Europe, which had a profound impact on the social, economic, and political structures of the time.\nIf the Black Death had not occurred in the 14th century, it is possible that Europe would have developed differently. Here are a few possible scenarios:\n1. Economic growth: The death of so many people would have created a surplus of labor, which could have led to lower wages and increased competition for jobs. This could have stimulated economic growth as people invested in productivity-enhancing technologies and infrastructure.\n2. Political changes: The demographic decline could have led to political changes, such as the emergence of more powerful cities and towns, and the consolidation of smaller territories into larger states.\n3. Cultural flourishing: The absence of the Black Death may have allowed for a cultural flourishing, as people had more time and resources to pursue the arts, sciences, and literature.\n4. Global trade: The death of so many people could have led to a decline in global trade, as Europe was the dominant player in the global economy at the time.\n5. Environmental impact: The absence of the Black Death may have had a positive impact on the environment, as the death of so many people would have reduced the pressure on resources, such as food and land.\nOverall, it is difficult to say for sure how Europe would have developed if the Black Death had not occurred, as there are so many variables that could have played a role. However, it is clear that the death of so many people had a profound impact on the continent and set it on a new course.", "answer_id": "LjnTtmsFEMgFEkYMKE3VRV", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 54, "text": ": It's impossible to say for certain how history would have played out if Isaac Newton had focused on biology instead of physics, as he is considered one of the most influential scientists in history and his work in physics laid the foundation for many of the scientific advancements that followed. However, it's possible that his work in biology would have been just as groundbreaking and influential as his work in physics.\nNewton's theories on motion, gravity, and the laws of nature laid the foundation for modern physics and have had a profound impact on our understanding of the natural world. If he had instead focused on biology, it's possible that he would have made similar contributions to our understanding of living organisms and their behavior, which could have led to new discoveries and advancements in fields such as medicine, genetics, and ecology.\nWithout a doubt, the world would have been very different if Newton had focused on biology instead of physics, as his insights and discoveries would have likely been applied to understanding the complexities of living organisms. Nevertheless, it's important to note that Newton's work in physics has had a profound impact on our understanding of the world and has led to many of the technological advancements we enjoy today.", "answer_id": "9L9SgmcPjkFs3CwuiUJqm9", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 55, "text": ": It's difficult to say exactly what would have happened if the Beatles had never formed as a band, as many factors would have played a role in shaping their future. However, here are a few possibilities:\n1. The music scene in Liverpool and London would have been significantly different without the Beatles, as their unique sound and presence would have likely had a major impact on the local music scene.\n2. The Beatles might have continued to perform and record together, but they might not have achieved the same level of global success if they had not been signed to EMI Records.\n3. The Beatles might have had a different creative direction, as they would not have been influenced by their manager, Brian Epstein, who played a key role in shaping their sound and image.\n4. The Beatles might have influenced other musicians and bands in different ways, as their music and cultural impact would have been diluted without their presence.\n5. The music industry might have evolved differently, as the Beatles' success helped pave the way for new artists and genres to emerge.\n6. The cultural and social changes of the 1960s and 1970s might have been different without the Beatles' influence, as their music was a reflection of the times and helped shape the counterculture movement.", "answer_id": "f9pwVc7bbzajLXKsaGsXQ8", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 56, "text": ": It is difficult to predict exactly what would have happened if Alan Turing had not cracked the Enigma code during World War II. However, it is possible that the outcome of the war might have been very different.\nThe Enigma code was a highly complex and sophisticated encryption system that the German military used to communicate secret messages. Cracking the Enigma code was a top priority for the British government and military, and it played a crucial role in the Allied victory in World War II.\nWithout the decryption capabilities provided by the Enigma code, it is possible that the Allies would have faced significant challenges in communicating with their forces and in gathering intelligence on German military operations. This could have made it more difficult for them to coordinate their efforts and plan strategic attacks.\nIt is also possible that the war could have lasted longer and resulted in more casualties and destruction if the Allies had not been able to decipher German communications.\nOverall, it is difficult to quantify the exact impact that Alan Turing's work had on the outcome of the war, but it is clear that his efforts played a crucial role in the Allied victory.", "answer_id": "UfZJcVaZLWkVsipvmDBrdd", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 57, "text": ": The Suez Canal is a man-made waterway that connects the Red Sea with the Gulf of Suez, allowing ships to travel between Europe and Asia without having to go around the southern tip of Africa. Its construction was a major achievement of 19th century engineering and had a significant impact on global trade.\nIf the Suez Canal had never been constructed, it is likely that ships would still be traveling around the southern tip of Africa to get from Europe to Asia. This would likely have been more time-consuming and expensive, as well as more dangerous due to the dangerous conditions of the Cape of Good Hope. Additionally, it would have made trade between the two regions less efficient and more costly, which would have had a negative impact on economic growth and development. It is also possible that alternative trade routes would have been developed, but they would have been less efficient and more costly as well.\nIt is hard to predict exactly how the world would be different without the Suez Canal, as it would have depended on many factors, including the availability of alternative trade routes, political and economic conditions, and technological advances. But it is likely that the Suez Canal has played a significant role in the global economy and has facilitated the exchange of goods and ideas between different regions.", "answer_id": "K635PkbTDkJTEnDSG68bTh", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 58, "text": ": The Maya civilization had a complex and sophisticated culture that flourished in Mesoamerica, which is present-day Mexico, Guatemala, Belize, and Honduras. The civilization is best known for its monumental architecture, including pyramids and temples, as well as its sophisticated calendar system, writing system, and mathematical knowledge.\n\nIt is difficult to say for certain what might have happened if the Maya civilization had not mysteriously collapsed. Some scholars believe that the collapse may have been caused by a combination of factors, including overpopulation, environmental degradation, and political and social instability. However, without the collapse, it is possible that the Maya civilization may have continued to thrive and develop.\n\nOne possibility is that the Maya civilization may have continued to innovate and make technological advancements. They may have also continued to expand their empire and interact with other civilizations in the region. They may have also continued to develop their calendar and mathematical systems, leading to further advancements in these fields.\n\nAnother possibility is that the collapse may have led to a period of instability and chaos, which could have ultimately led to the decline of the civilization. However, without the collapse, it is possible that the civilization could have continued to thrive and evolve.\n\nIt is important to note that the collapse of the Maya civilization is still an area of active research and theories abound. We can only speculate on what might have happened if the collapse did not occur.", "answer_id": "aGEwGV2TQY6snjFdiLkyqZ", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 59, "text": ": It is impossible to know for certain what would have happened if Christopher Columbus had not discovered the Americas, as history is a complex and multifaceted field. However, it is likely that the course of human history would have been significantly different.\nFor one, the European colonization of the Americas would not have occurred, and the indigenous cultures of the Americas would have developed on their own terms. This would likely have resulted in a very different global power structure, with different empires and nations rising to prominence.\nThe economic and social impact of the discover of the Americas on Europe would also have been different, as European powers would not have been able to exploit the resources and labor of the Americas.\nIt is also possible that the political and religious landscape of Europe would have been different, as the spread of Christianity and the subsequent colonization of the Americas would have been delayed or altered.\nOverall, the discovery of the Americas by Christopher Columbus had a profound impact on the world and set in motion a series of events that still shape the world today.", "answer_id": "ksnQVXY9SY2NzMxXEmV3Pd", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 60, "text": ": It's impossible to know for sure how Vincent van Gogh's career would have turned out if he had been successful during his lifetime. However, it's likely that he would have continued to create and exhibit his art, and possibly become even more well-known and respected in the art world.\n\nVan Gogh was a prolific painter and produced over 2,000 works of art in just a few short years, many of which were inspired by the natural world and his own emotional state. He was a pioneer of the modernist movement and was heavily influenced by the likes of Paul Gauguin and Henri Matisse.\n\nIf van Gogh had been successful during his lifetime, it's possible that he could have continued to explore and develop his unique style and vision, and even inspired other artists to follow in his footsteps. However, it's also possible that his struggles with mental illness and poverty could have continued to plague him, and he may have struggled to find recognition and success in his lifetime.\n\nRegardless of what might have been, van Gogh's legacy as an artist has been secure for over a century, and he is now widely recognized as one of the greatest painters of all time. His work continues to inspire and influence artists and art lovers around the world.", "answer_id": "GZpcvXQyrKUHsgWpzS7Hpo", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 61, "text": "Here is a basic C++ program that reads a text file line by line and counts the number of occurrences of a specific word in the file:\n```\n#include \n#include \n#include \n\nusing namespace std;\n\nint main() {\n // Open the file for reading\n ifstream file(\"filename.txt\", ios::binary | ios::ignore_empty_lines);\n\n // Read the first line of the file\n string line;\n getline(file, line);\n\n // Initialize the word and count variables\n string word;\n int count = 0;\n\n // Loop through the file line by line\n while (getline(file, line)) {\n // Check if the line contains the word\n if (line.find(word) != string::npos) {\n // If it does, increment the count\n count++;\n }\n }\n\n // Close the file\n file.close();\n\n // Print the result\n cout << \"The word \" << word << \" appears \" << count << \" times in the file.\" << endl;\n\n return 0;\n}\n```\nThis program uses an `ifstream` object to open the file for reading. The `getline` function is used to read each line of the file, and the `find` function is used to check if the line contains the specified word. If it does, the count variable is incremented. The program then prints the result.\nNote that this program assumes that the word to search for is a part of each line in the file, and will not work correctly if the word appears in multiple forms (e.g. \"cat\" and \"cats\").", "answer_id": "Czx3sfkw8C4md9YJrhs3gu", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 62, "text": "Here is an example of a Python function that uses dynamic programming to find the longest common subsequence (LCS) of two input strings:\n```scss\ndef longest_common_subsequence(str1, str2):\n m, n = len(str1), len(str2)\n dp = [[0] * (n+1) for _ in range(m+1)]\n for i, char in enumerate(str1):\n for j, char2 in enumerate(str2):\n if char == char2:\n dp[i][j] = dp[i-1][j-1] + 1\n else:\n break\n return dp[m-1][n-1]\n```\nThis function uses a two-dimensional list `dp` to store the LCS length for each possible length of the LCS. The outer loop iterates over the first and second strings, and the inner loop iterates over the characters in the strings. If `char` and `char2` are the same, the length of the LCS is `dp[i-1][j-1] + 1`; otherwise, the function breaks out of the inner loop. The function then returns the length of the LCS as the output.\n\nHere is an example of how to use this function:\n```scss\nstr1 = \"banana\"\nstr2 = \"orange\"\nresult = longest_common_subsequence(str1, str2)\nprint(result)\n```\nThis will output `10`, which is the length of the LCS \"anaa\".", "answer_id": "G7QjnFhVnsMPMN9UES8mTV", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 63, "text": "To implement a regular expression in Python to validate an email address, you can use the `re` module that provides regular expression functionality. Here is an example of a regular expression that can be used to validate an email address:\n```python\nimport re\n\nemail_regex = re.compile(r'^\\w+@[\\w.-]+\\.\\w+$')\n```\nThis regular expression matches a string that starts with one or more word characters (`\\w+`), followed by an `@` symbol, followed by one or more word characters, dots (`-`), and finally ends with one or more word characters (`\\w+`).\n\nTo use this regular expression to validate an email address, you can pass it to the `match()` function, which returns a match object if the regex matches the string, and `None` otherwise:\n```python\nemail_address = 'example@example.com'\nmatch = email_regex.match(email_address)\nif match:\n print('Valid email address:', email_address)\nelse:\n print('Invalid email address:', email_address)\n```\nAlternatively, you can use the `re.search()` function to perform a more efficient check, as it only returns `None` if the regex does not match the string.\n```python\nemail_address = 'example@example.com'\nmatch = email_regex.search(email_address)\nif match:\n print('Valid email address:', email_address)\nelse:\n print('Invalid email address:', email_address)\n```", "answer_id": "RHqehPQpeXkN9d6NHkVGxU", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 64, "text": "Here is an example program in Python that finds the nth Fibonacci number using dynamic programming:\n\n```python\ndef fibonacci(n):\n # Initialize two variables to store the previous numbers\n previous = [0, 1]\n # Create a list to store the nth Fibonacci numbers\n fibonacci_list = [previous[0]]\n # Loop through the numbers and update the previous numbers and the list\n for i in range(2, n+1):\n previous.append(previous[i-1] + previous[i-2])\n fibonacci_list.append(previous[i-1])\n # Return the nth Fibonacci number\n return fibonacci_list[-1]\n```\n\nThis program uses dynamic programming to store the previous numbers and update them to calculate the nth Fibonacci number. The recursive approach is replaced by the loop that updates the previous numbers and the list, making the code more efficient and easier to understand.", "answer_id": "aqYsaowQtr5rYU6iDbRj5x", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 65, "text": "Here is an example implementation of a binary search algorithm in Python to find a specific element in a sorted array:\n```python\ndef binary_search(arr, target):\n low = 0\n high = len(arr) - 1\n while low <= high:\n mid = (low + high) // 2\n if arr[mid] == target:\n return mid\n elif arr[mid] < target:\n low = mid + 1\n else:\n high = mid - 1\n return -1\n\n# Example usage\narr = [1, 3, 5, 7, 9, 11]\ntarget = 7\nresult = binary_search(arr, target)\nprint(result) # Output: 3\n```\nIn this example, we define a function `binary_search` that takes an array `arr` and a target element `target` as input. The function uses a binary search algorithm to search for the target element in the array, starting from the first element and the last element at each step. The function returns the index of the target element if it is found, or -1 if it is not found.\n\nWe can then use the function to search for a specific element in a sorted array. In the example above, we search for the element 7 in the array `[1, 3, 5, 7, 9, 11]`, and the function returns 3, which is the index where the element 7 is located.", "answer_id": "fimg24Eg23yjtVd7pYfo8A", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 66, "text": "Here's an example of how you could implement a queue data structure using two stacks in Python:\n```python\nclass Queue:\n def __init__(self):\n self.stack1 = []\n self.stack2 = []\n self.front = None\n self.rear = None\n\n def enqueue(self, item):\n if item == self.rear:\n self.rear = self.stack1.pop(0)\n else:\n self.stack1.append(item)\n self.rear = item\n\n def dequeue(self):\n if self.front == self.rear:\n return self.stack1.pop(0)\n else:\n item = self.stack2.pop(0)\n self.front = item\n return item\n```\nThis implementation uses two stacks to implement a queue. The `stack1` holds the oldest items in the queue, and the `stack2` holds the newest items. The `front` and `rear` variables keep track of the front and rear pointers of the queue, respectively. When an item is enqueued, it is added to the `stack1` and the `rear` variable is updated to point to the new item. When an item is dequeued, it is removed from the `stack2` and the `front` variable is updated to point to the next item in `stack1`.", "answer_id": "7bsZbZ9E9jjwgGhnTiTyWS", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 67, "text": ": Here is one way to implement a program to find the common elements in two arrays without using any extra data structures:\n\n1. Start by setting two pointers, one for the first element of the first array, and one for the first element of the second array, to point to the first elements of their respective arrays.\n2. Compare the elements at the two pointers. If they are the same, continue comparing the elements at the pointers until they reach the end of their respective arrays. If the elements are not the same, there are no common elements between the two arrays.\n3. If the elements at the two pointers are the same, set a flag to true.\n4. Set the second pointer of the first array to point to the next element of the first array, and the second pointer of the second array to point to the next element of the second array.\n5. Repeat steps 2-4 until the second pointer of the first array reaches the end of the first array, or until the second pointer of the second array reaches the end of the second array.\n6. If a flag is true, output \"The arrays have a common element.\"\n7. If no flag is true, output \"The arrays are disjoint.\"\n\nThis program will find the common elements between two arrays by comparing the elements at the two pointers and setting a flag to true if they are the same. It will continue comparing the elements at the pointers until they reach the end of their respective arrays, or until the flag is set to true. If the flag is set to true, it means there are common elements between the two arrays, otherwise, the arrays are disjoint.", "answer_id": "TMYj69PfzLFm6S8ahKbg5J", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 68, "text": "To find the value of f(2), we need to evaluate the function f(x) at the input value of 2. To do this, we substitute 2 for x in the function:\nf(x) = 5x^3 - 2x + 3\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 10 - 2 + 3\nf(2) = 10 - 5 + 3\nf(2) = 2\nTherefore, the value of f(2) is 2.", "answer_id": "NDXxGeVFruYsJ3NZ7zjJ3n", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 69, "text": ": To solve for x in the equation 3x + 10 = 5(x - 2), you can isolate x by subtracting 5(x - 2) from both sides of the equation, and then divide both sides by 3.\nSubtracting 5(x - 2) from both sides of the equation:\n3x + 10 - 5(x - 2) = 0\n3x - 5x + 10 - 5(x - 2) = 0\n2x - x + 10 - 5(x - 2) = 0\nx - x + 10 - 5(x - 2) = 0\n3x - 2x + 10 - 5(x - 2) = 0\n-x + 2x + 10 - 5x = 0\n-x = 2x + 10 - 5x\n-x = 7x/7\nSince x cannot be a fraction of a number, we can divide both sides by 7 to solve for x.\nDividing both sides by 7:\n-x = 2x + 10 - 5x/7\n-x = 2x + 10\nx = 7/7\nSince x cannot be equal to 1, we will try x = 2, 3, 4, and 5.\nPlugging in x = 2:\n3(2) + 10 = 5(2 - 2)\n3 + 10 = 5\nPlugging in x = 3:\n3(3) + 10 = 5(3 - 2)\n3 + 10 = 5\nPlugging in x = 4:\n3(4) + 10 = 5(4 - 2)\n3 + 10 = 5\nPlugging in x = 5:\n3(5) + 10 = 5(5 - 2)\n3 + 10 = 5\nSince all of these attempts have failed to produce a valid solution, the equation has no solution.", "answer_id": "g3NBgumbbZBxnCMCPj7kof", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 70, "text": ": The length of a line segment can be found by subtracting the x-coordinate of one endpoint from the x-coordinate of the other endpoint. In this case, the x-coordinate of the first endpoint is 2 and the x-coordinate of the second endpoint is 10, so the length of the line segment is:\n\nLengt h = 10 - 2 = 8 units.", "answer_id": "jwp2XMxq8PTKTJiZCATHnG", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 71, "text": ": Sure, here's an example of a formal email you could use to propose a joint venture to a potential business partner:\n\nSubject: Proposal for Joint Venture\n\nDear [Business Partner's Name],\n\nI hope this email finds you well. My name is [Your Name] and I am the [Your Position] of [Your Company Name]. I am writing to propose a joint venture between our two companies, [Your Company Name] and [Business Partner's Company Name].\n\nAs you may know, [Your Company Name] has been in the [industry/business] for [number of years] and has a strong reputation for [key achievements/successes]. We are now looking to expand our reach and capabilities, and we believe that a joint venture with your company would be a perfect fit.\n\nOur companies have complementary strengths and expertise, and we believe that by working together we can achieve greater success than we could alone. Specifically, [Your Company Name] has [key strengths/expertise], while [Business Partner's Company Name] has [key strengths/expertise]. By combining our resources and capabilities, we believe we can [describe the benefits of the proposed joint venture].\n\nWe are excited about the prospect of working with you and your team, and we believe that this joint venture has the potential to be a long-term, mutually beneficial partnership.\n\nWe would be happy to discuss the details of the proposed joint venture in further meetings or conversations. Please let us know if you are interested in exploring this opportunity further, and we will be happy to provide you with any additional information you may need.\n\nThank you for your time and consideration. We look forward to hearing from you soon.\n\nBest regards,\n\n[Your Name]\n[Your Company Name]\n[Your Contact Information]", "answer_id": "B6CrSifSJDEHUYiBaiHELR", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 72, "text": ": Of course! Here is a template for a resignation letter that you can use as a starting point:\n\n[Your Name]\n[Your Address]\n[City, State ZIP Code]\n[Date]\n\n[Your Employer's Name]\n[Your Employer's Address]\n[City, State ZIP Code]\n\nDear [Employer's Name],\n\nPlease accept this letter as formal notice of my resignation from [Company Name], effective [date]. I will work through [number of days] to ensure a smooth transition.\n\nI am grateful for the opportunities and experience that I have gained during my time at [Company Name]. I appreciate the support and guidance that I have received from the leadership team and my colleagues, and I am proud of the work that we have accomplished together.\n\nI will do everything I can to ensure a smooth transition and to help with the handover to my successor. Please let me know if there is anything specific that I can do to assist.\n\nThank you again for the opportunities and support provided during my time at [Company Name]. I wish you and the company continued success in the future.\n\nSincerely,\n\n[Your Name]\n\nYou can customize this template to fit your own specific circumstances, including adding or removing any details that you think are relevant. It's important to keep the tone of the letter professional and positive, and to express gratitude for the opportunities you have been given.", "answer_id": "932DTWDKnBbQDQYTW6be87", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 73, "text": ": [Your Name]\n[Your Title]\n[Your Company/Organization]\n[Your Address]\n[City, State ZIP Code]\n[Email Address]\n[Today\u2019s Date]\n\n[Student Name]\n[Student Address]\n[Student City, State ZIP Code]\n\nDear [Student Name],\n\nI am writing this letter of recommendation to strongly encourage you to apply to the prestigious graduate program in computer science at [University Name]. As [your title and/or relationship to the student], I have had the pleasure of observing your academic and professional growth, and I am confident that you possess the skills, talent, and potential to excel in this demanding and highly competitive program.\n\nI have been consistently impressed by your passion for computer science and your ability to excel in challenging academic and professional environments. Your exceptional problem-solving skills, attention to detail, and innovative thinking have consistently set you apart from your peers and demonstrated your commitment to excellence in your field.\n\nIn addition to your academic achievements, I have also had the opportunity to observe your leadership skills and ability to collaborate effectively with others. You have consistently demonstrated the ability to work well in teams, motivate others, and provide constructive feedback to help others improve.\n\nI am confident that you will thrive in the rigorous and challenging graduate program at [University Name]. Your exceptional academic record, passion for computer science, leadership skills, and ability to work well in teams make you an ideal candidate for this program. I am confident that you will excel academically and professionally and make a positive impact in the field of computer science.\n\nPlease accept this letter as formal recommendation for your application to the graduate program in computer science at [University Name]. I am confident that you will be an asset to the program and look forward to hearing about your future successes.\n\nSincerely,\n\n[Your Name]\n[Your Title]\n[Your Company/Organization]\n[Your Address]\n[City, State ZIP Code]\n[Email Address]\n[Today\u2019s Date]", "answer_id": "jVEfncaU9LitqggtYBGuik", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 74, "text": ": Subject: Introducing Our Revolutionary New Software Solution!\n\nDear valued customers,\n\nWe are thrilled to announce the launch of our latest software solution, designed to revolutionize the way you work. Our team has been working tirelessly for months to bring this innovative product to market, and we are confident that it will exceed your expectations.\n\nOur new software solution is packed with features that will streamline your workflows, increase your productivity, and make your life easier. With its intuitive interface and user-friendly design, you'll be up and running in no time.\n\nSome of the key features of our new software solution include:\n\n* Advanced security measures to protect your data\n* Customizable dashboards to suit your needs\n* Automated workflows to save you time and effort\n* Real-time reporting and analytics to help you make informed decisions\n\nWhether you're a small business owner or a large enterprise, our software solution is designed to meet your needs. With our software, you'll be able to work smarter, not harder.\n\nWe are confident that our new software solution will be a game-changer for you and your business. We would love to schedule a demo and show you how it works.\n\nThank you for your loyalty and support. We can't wait to see the amazing things you'll accomplish with our new software solution.\n\nBest regards,\n[Your Name]\n[Your Company]", "answer_id": "NQSftH48RoRxTKLCqNbPtt", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 75, "text": ": Subject: Apology for Delay in Your Order\n\nDear [Customer Name],\n\nI am reaching out to apologize for the inconvenience and frustration you experienced due to the delay in the delivery of your order. I understand how important it is to receive your order in a timely manner, and I want to assure you that we are committed to resolving the issue as quickly as possible.\n\nI have reviewed our records, and I can confirm that there was an unforeseen technical issue that caused a delay in processing your order. I want to assure you that we have taken steps to resolve the issue and prevent similar delays from happening in the future.\n\nI apologize for any inconvenience this issue may have caused you, and I want to assure you that we are doing everything possible to rectify the situation. We value your business and appreciate your patience and understanding during this time.\n\nIf there is anything else we can do to assist you, please do not hesitate to contact us. We are committed to providing you with the best possible service, and we want to make it right for you.\n\nThank you for your understanding and cooperation.\n\nSincerely,\n\n[Your Name]", "answer_id": "YeYDnz2kVYAfsz7vCfgLNS", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 76, "text": "Title: \"Exploring the Fascinating History and Cultural Significance of Jazz\"\n\nOpening shot of a jazz band playing live in front of a packed audience.\n\nVoiceover: \"Jazz. A genre of music that originated in the African American communities of New Orleans in the late 19th and early 20th centuries. It's a style that's known for its improvisation, rhythm, and energy. But jazz is more than just music. It's a cultural phenomenon that has played a significant role in shaping the world we live in today.\"\n\nCut to a historical reenactment of African American musicians playing jazz in the early 1900s.\n\nVoiceover: \"The roots of jazz can be traced back to the African American communities of Louisiana, where enslaved people were exposed to a variety of musical traditions, including African rhythms and European musical techniques. These influences would later combine to create the unique sound of jazz.\"\n\nCut to a shot of a vintage jazz record, spinning on a turntable.\n\nVoiceover: \"In the early 20th century, jazz began to spread beyond the southern United States and would eventually become a global phenomenon. The music was a reflection of the times, capturing the spirit of the Roaring Twenties, the Great Depression, and the struggles of the Civil Rights movement.\"\n\nCut to a shot of a jazz club in the 1950s, with musicians playing live.\n\nVoiceover: \"In the post-World War II era, jazz continued to evolve and diversify, with new subgenres such as bebop, cool jazz, and hard bop emerging. The music became a haven for experimentalism and self-expression, attracting artists and intellectuals from all over the world.\"\n\nCut to a shot of a modern-day jazz festival, with a diverse group of musicians and fans in attendance.\n\nVoiceover: \"Today, jazz is more popular than ever, with festivals, clubs, and concerts happening all over the world. It continues to inspire and influence other genres of music, as well as art, literature, and film.\"\n\nCut to a final shot of a jazz musician playing their instrument, with the words \"Jazz: A Fascinating History and Cultural Significance\" appearing on the screen.\n\nVoiceover: \"Jazz is more than just music. It's a cultural legacy that continues to captivate and inspire us, and will no doubt continue to do so for generations to come.\"\n\nClosing shot of the jazz band playing live in front of the packed audience.", "answer_id": "o2LgSDUwp4b4y4iCteh3RR", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 77, "text": "Aloha from Hawaii! I recently had the incredible opportunity to travel to the beautiful state of Hawaii and experience its rich culture and stunning landscapes. This tropical paradise is a dream destination for anyone looking for sun, sand, and adventure. From hiking through lush rainforests, to learning about traditional Hawaiian culture, there is truly something for everyone in Hawaii.\nOne of the most incredible cultural experiences I had in Hawaii was visiting the Polynesian Cultural Center in Laie. This state-of-the-art attraction allows visitors to explore the rich cultures of Hawaii, Samoa, Tonga, Fiji, and other Pacific Islands. I was amazed by the live demonstrations of traditional dance, music, and crafts, and the interactive exhibits that bring the cultures to life. I even got to try some delicious Pacific Island cuisine, including poi and Hawaiian sushi.\nAnother must-see attraction in Hawaii is the Pearl Harbor Memorial in Honolulu. This historic site is a poignant reminder of the attack on Pearl Harbor, and the sacrifices made by those who served in World War II. Visitors can explore the USS Arizona Memorial, the USS Missouri Battleship, and the Pacific Aviation Museum, which showcases historic aircraft and artifacts.\nFor those looking for adventure, Hawaii has no shortage of activities. From zip-lining through the lush rainforests, to surfing the famous waves at Pipeline, there is something for adrenaline junkies and beach bums alike. I even got to swim with the friendly Hawaiian sea turtles and watch humpback whales breaching in their natural habitat.\nIn conclusion, Hawaii is a truly magical place that offers something for everyone. From its rich culture and history, to its stunning landscapes and adventurous activities, this tropical paradise is a must-visit destination. I can't wait to go back and explore more of the beautiful state of Hawaii! Aloha from Hawaii!", "answer_id": "CBvZQ8RJG6AqhUBSoUDbqU", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 78, "text": "I recently had the pleasure of watching \"The Expanse,\" a science fiction film that follows a group of people as they navigate a mysterious event that hits the world and causes a breakout of violence and chaos.\nThe film boasts an impressive cast, including Chris Evans, Mahershala Ali, and Brendan Gleeson, who all deliver strong performances. The characters are well-developed and interesting, and the interactions between them are believable and engaging.\nThe plot is intricate and fast-paced, with plenty of twists and turns to keep you on the edge of your seat. The special effects are also top-notch, with impressive attention to detail and some truly awe-inspiring visuals.\nOverall, \"The Expanse\" is a thought-provoking and action-packed science fiction film that is sure to entertain and thrill audiences. It is a must-watch for anyone who loves science fiction, and I highly recommend it.", "answer_id": "VfeFmV9H6SEuotYqEf5ri5", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 79, "text": ": [Opening Music]\n\nHost: Welcome to our podcast, where we discuss the latest trends and developments in the music industry. Today, we're going to be talking about the influence of streaming platforms on the music industry.\n\n[Intro Music Starts]\n\nHost: Streaming platforms like Spotify, Apple Music, and Tidal have revolutionized the way we listen to music. They've made it easier than ever to access a vast library of songs, and have given artists new ways to reach their fans. But with these benefits come challenges, and the impact of streaming on the music industry has been a topic of discussion for years. So, let's dive in and examine how streaming platforms have changed the way we listen to music.\n\n[Interview with an Artist]\n\nArtist: Streaming has definitely had a huge impact on the music industry. It's made it easier for artists to get their music out there and for fans to access it. But at the same time, it's also made it harder for artists to make a living. With streaming platforms taking a cut of the revenue, it's become more difficult for artists to earn a sustainable income from their music.\n\n[Interview with a Music Industry Insider]\n\nInsider: From my perspective, streaming platforms have been a game-changer for the music industry. They've opened up new opportunities for artists to reach a wider audience and for fans to discover new music. But, it's also true that the way artists are compensated has changed. In the past, artists made money from the sale of physical albums and concert tickets, but now, the majority of their income comes from streaming royalties.\n\n[Interview with another Artist]\n\nArtist: As an artist, it's important to have your music available on streaming platforms, but it's also important to have a strategy in place for maximizing your earnings. Some artists choose to release their music exclusively on one platform, while others release their music on multiple platforms to reach a wider audience. It's all about finding the right balance and making the most of the opportunities that streaming platforms present.\n\n[Closing Music]\n\nHost: That's all the time we have for today's episode. We hope you enjoyed our discussion on the influence of streaming platforms on the music industry. Remember to follow us on social media for updates on future episodes, and thank you for tuning in.\n\n[Outro Music]", "answer_id": "ZLYhNQxJXycBZL56RbdGh3", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
+{"question_id": 80, "text": ": I'm sorry, as a language model, I am not able to attend concerts or write about them in real-time. However, I can provide a general example of a symphony concert review.\n\nSymphony concerts are a beautiful and moving experience, and the orchestra I attended last night did not disappoint. The performance was top-notch, with each instrument and section playing with precision and emotion.\n\nThe venue was well-maintained and the acoustics were excellent, allowing for the music to wash over the audience and envelop us in sound. The conductor led the orchestra with a sure hand, guiding them through the complex and intricate pieces with ease.\n\nThe audience was a mix of classical music enthusiasts and first-time attendees, and everyone was captivated by the music. The energy in the room was electric, and the applause at the end of each piece was thunderous.\n\nOverall, it was a truly memorable experience and one that I will always treasure. The orchestra's performance and the audience's enthusiasm made for a night that I will never forget.", "answer_id": "kZw2ii8HQtrQp4d2bK5cHy", "model_id": "vicuna-7b:20230322-fp16", "metadata": {}}
diff --git a/model/fastchat/eval/table/model.jsonl b/model/fastchat/eval/table/model.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..f3094d3b79cb6a60f965e04c8f304b9dc0c8d7f1
--- /dev/null
+++ b/model/fastchat/eval/table/model.jsonl
@@ -0,0 +1,7 @@
+{"model_id": "vicuna-13b:20230322-clean-lang", "model_name": "vicuna-13b", "model_version": "20230322-clean-lang", "model_metadata": "vicuna-13b-20230322-clean-lang"}
+{"model_id": "alpaca-13b:v1", "model_name": "alpaca-13b", "model_version": "v1", "model_metadata": "alpaca-13b"}
+{"model_id": "llama-13b:v1", "model_name": "llama-13b", "model_version": "v1", "model_metadata": "hf-llama-13b"}
+{"model_id": "bard:20230327", "model_name": "bard", "model_version": "20230327", "model_metadata": "Google Bard 20230327"}
+{"model_id": "gpt-3.5-turbo:20230327", "model_name": "gpt-3.5-turbo", "model_version": "20230327", "model_metadata": "OpenAI ChatGPT gpt-3.5-turbo Chat Completion"}
+{"model_id": "vicuna-13b:20230322-new-hp-fp16", "model_name": "vicuna-13b", "model_version": "20230322-new-hp-fp16", "model_metadata": "gs://model-weights/vicuna-13b-20230322-new-hp-fp16"}
+{"model_id": "vicuna-7b:20230322-fp16", "model_name": "vicuna-7b", "model_version": "20230322-fp16", "model_metadata": "gs://model-weights/vicuna-7b-20230322-fp16"}
diff --git a/model/fastchat/eval/table/prompt.jsonl b/model/fastchat/eval/table/prompt.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..9ac5d91610dc90afb0c2947769ba641d2509711e
--- /dev/null
+++ b/model/fastchat/eval/table/prompt.jsonl
@@ -0,0 +1,3 @@
+{"prompt_id": 1, "system_prompt": "You are a helpful and precise assistant for checking the quality of the answer.", "prompt_template": "[Question]\n{question}\n\n[The Start of Assistant 1's Answer]\n{answer_1}\n\n[The End of Assistant 1's Answer]\n\n[The Start of Assistant 2's Answer]\n{answer_2}\n\n[The End of Assistant 2's Answer]\n\n[System]\n{prompt}\n\n", "defaults": {"prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."}, "description": "Prompt for general questions", "category": "general"}
+{"prompt_id": 2, "system_prompt": "You are a helpful and precise assistant for checking the quality of the answer.", "prompt_template": "[Question]\n{question}\n\n[The Start of Assistant 1's Answer]\n{answer_1}\n\n[The End of Assistant 1's Answer]\n\n[The Start of Assistant 2's Answer]\n{answer_2}\n\n[The End of Assistant 2's Answer]\n\n[System]\n{prompt}\n\n", "defaults": {"prompt": "Your task is to evaluate the coding abilities of the above two assistants. They have been asked to implement a program to solve a given problem. Please review their code submissions, paying close attention to their problem-solving approach, code structure, readability, and the inclusion of helpful comments.\n\nPlease ensure that the assistants' submissions:\n\n1. Correctly implement the given problem statement.\n2. Contain accurate and efficient code.\n3. Include clear and concise comments that explain the code's logic and functionality.\n4. Adhere to proper coding standards and best practices.\n\nOnce you have carefully reviewed both submissions, provide detailed feedback on their strengths and weaknesses, along with any suggestions for improvement. You should first output a single line containing two scores on the scale of 1-10 (1: no code/no sense; 10: perfect) for Assistant 1 and 2, respectively. Then give extra comments starting from the next line."}, "description": "Prompt for coding questions", "category": "coding"}
+{"prompt_id": 3, "system_prompt": "You are a helpful and precise assistant for checking the quality of the answer.", "prompt_template": "[Question]\n{question}\n\n[The Start of Assistant 1's Answer]\n{answer_1}\n\n[The End of Assistant 1's Answer]\n\n[The Start of Assistant 2's Answer]\n{answer_2}\n\n[The End of Assistant 2's Answer]\n\n[System]\n{prompt}\n\n", "defaults": {"prompt": "We would like to request your feedback on the mathematical proficiency of two AI assistants regarding the given user question displayed above.\nFirst, please solve the problem independently, without referring to the answers provided by Assistant 1 and Assistant 2.\nAfterward, please examine the problem-solving process of Assistant 1 and Assistant 2 step-by-step to ensure their correctness, identifying any incorrect steps if present. Your evaluation should take into account not only the answer but also the problem-solving steps.\nFinally, please output a Python tuple containing two numerical scores for Assistant 1 and Assistant 2, ranging from 1 to 10, respectively. If applicable, explain the reasons for any variations in their scores and determine which assistant performed better."}, "description": "Prompt for math questions", "category": "math"}
diff --git a/model/fastchat/eval/table/question.jsonl b/model/fastchat/eval/table/question.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..c946b8f79deba324a88ab0d61a322942b19fa764
--- /dev/null
+++ b/model/fastchat/eval/table/question.jsonl
@@ -0,0 +1,80 @@
+{"question_id": 1, "text": "How can I improve my time management skills?", "category": "generic"}
+{"question_id": 2, "text": "What are the most effective ways to deal with stress?", "category": "generic"}
+{"question_id": 3, "text": "What are the main differences between Python and JavaScript programming languages?", "category": "generic"}
+{"question_id": 4, "text": "How can I increase my productivity while working from home?", "category": "generic"}
+{"question_id": 5, "text": "Can you explain the basics of quantum computing?", "category": "generic"}
+{"question_id": 6, "text": "What are the differences between plant-based and animal-based protein sources?", "category": "generic"}
+{"question_id": 7, "text": "How can I develop my critical thinking skills?", "category": "generic"}
+{"question_id": 8, "text": "What are the major challenges faced by the education sector today?", "category": "generic"}
+{"question_id": 9, "text": "What are the primary factors that influence consumer behavior?", "category": "generic"}
+{"question_id": 10, "text": "What are the most effective strategies for conflict resolution in the workplace?", "category": "generic"}
+{"question_id": 11, "text": "What are some potential implications of using a single-use plastic bottle versus a reusable bottle on both the environment and human health?", "category": "knowledge"}
+{"question_id": 12, "text": "What factors would you consider when designing an inclusive and accessible public transportation system?", "category": "knowledge"}
+{"question_id": 13, "text": "How can governments utilize fiscal and monetary policies to combat economic recessions?", "category": "knowledge"}
+{"question_id": 14, "text": "How do language and cultural barriers affect the way people communicate and form relationships in multicultural societies?", "category": "knowledge"}
+{"question_id": 15, "text": "Describe a scenario where artificial intelligence could be used to improve the quality and efficiency of healthcare delivery.", "category": "knowledge"}
+{"question_id": 16, "text": "Explain the process of gene editing using CRISPR-Cas9 technology, and discuss its potential applications and ethical implications.", "category": "knowledge"}
+{"question_id": 17, "text": "How do vaccinations work to protect individuals and communities from infectious diseases, and what is herd immunity?", "category": "knowledge"}
+{"question_id": 18, "text": "How do social media platforms influence the way people consume and share news, and what are the potential implications for the spread of misinformation?", "category": "knowledge"}
+{"question_id": 19, "text": "How do cultural, social, and economic factors influence people's food choices, and how can this knowledge be used to promote healthier diets?", "category": "knowledge"}
+{"question_id": 20, "text": "Explain the process of natural selection and how it contributes to the evolution and adaptation of species.", "category": "knowledge"}
+{"question_id": 21, "text": "How would you introduce yourself as a medieval knight at a royal banquet?", "category": "roleplay"}
+{"question_id": 22, "text": "As a pirate captain, what would you say to your crew to motivate them to search for hidden treasure?", "category": "roleplay"}
+{"question_id": 23, "text": "If you were a Shakespearean character, how would you declare your love for someone in a soliloquy?", "category": "roleplay"}
+{"question_id": 24, "text": "As a superhero, how would you explain your origin story to a curious child?", "category": "roleplay"}
+{"question_id": 25, "text": "Imagine you are a time traveler from the year 3000. What technological advancements would you tell people about?", "category": "roleplay"}
+{"question_id": 26, "text": "As a sports commentator, describe the winning play in the final seconds of a championship game.", "category": "roleplay"}
+{"question_id": 27, "text": "Pretend to be a world-famous chef. How would you describe your signature dish to a panel of judges?", "category": "roleplay"}
+{"question_id": 28, "text": "You are a mountain climber reaching the summit of Mount Everest. Describe your emotions and the view from the top.", "category": "roleplay"}
+{"question_id": 29, "text": "As a space colonist on Mars, describe your daily life and the challenges you face living on another planet.", "category": "roleplay"}
+{"question_id": 30, "text": "Pretend to be a character in a post-apocalyptic world. Describe how you survive and the allies you encounter.", "category": "roleplay"}
+{"question_id": 31, "text": "How can you determine if a restaurant is popular among locals or mainly attracts tourists, and why might this information be useful?", "category": "common-sense"}
+{"question_id": 32, "text": "What are some subtle clues that suggest someone is pretending to understand a topic or conversation when they are actually confused or uninformed?", "category": "common-sense"}
+{"question_id": 33, "text": "Why might someone choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app?", "category": "common-sense"}
+{"question_id": 34, "text": "How can you determine if a person is genuinely interested in a conversation or simply being polite?", "category": "common-sense"}
+{"question_id": 35, "text": "Why might someone prefer to shop at a small, locally-owned business instead of a large chain store, even if the prices are higher?", "category": "common-sense"}
+{"question_id": 36, "text": "How can you assess the credibility of a source of information, such as a news article or blog post, without relying solely on the reputation of the author or publisher?", "category": "common-sense"}
+{"question_id": 37, "text": "Why do some people enjoy the sensation of being scared, such as by watching horror movies or going on roller coasters, while others avoid these experiences?", "category": "common-sense"}
+{"question_id": 38, "text": "How can observing the behavior of other people in a social situation provide clues about cultural norms and expectations?", "category": "common-sense"}
+{"question_id": 39, "text": "Do we have a moral obligation to explore space, or should we focus on solving Earth's problems first?", "category": "common-sense"}
+{"question_id": 40, "text": "In a world where automation is becoming increasingly prevalent, is it more important to prioritize job creation or technological progress?", "category": "common-sense"}
+{"question_id": 41, "text": "How many times does the average human blink in a lifetime? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
+{"question_id": 42, "text": "How many atoms are in a grain of salt? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
+{"question_id": 43, "text": "How many lightning strikes occur on Earth each day? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
+{"question_id": 44, "text": "How many balloons would it take to lift a house like in the movie \"Up\"? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
+{"question_id": 45, "text": "How many text messages are sent globally in a minute? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
+{"question_id": 46, "text": "How many words are spoken daily on Earth? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
+{"question_id": 47, "text": "How many snowflakes fall during a typical winter? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
+{"question_id": 48, "text": "How many pages are in all the books ever written? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
+{"question_id": 49, "text": "How many times has the Earth orbited the Sun since the beginning of life? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
+{"question_id": 50, "text": "How many songs have been recorded throughout history? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"}
+{"question_id": 51, "text": "What if the Internet had been invented during the Renaissance period?", "category": "counterfactual"}
+{"question_id": 52, "text": "What if the Aztecs had successfully repelled the Spanish conquistadors?", "category": "counterfactual"}
+{"question_id": 53, "text": "What if the Black Death had not occurred in the 14th century?", "category": "counterfactual"}
+{"question_id": 54, "text": "What if Isaac Newton had focused on biology instead of physics?", "category": "counterfactual"}
+{"question_id": 55, "text": "What if the Beatles had never formed as a band?", "category": "counterfactual"}
+{"question_id": 56, "text": "What if Alan Turing had not cracked the Enigma code during World War II?", "category": "counterfactual"}
+{"question_id": 57, "text": "What if the Suez Canal had never been constructed?", "category": "counterfactual"}
+{"question_id": 58, "text": "What if the Maya civilization had never mysteriously collapsed?", "category": "counterfactual"}
+{"question_id": 59, "text": "What if Christopher Columbus had not discovered the Americas?", "category": "counterfactual"}
+{"question_id": 60, "text": "What if Vincent van Gogh had been a successful artist during his lifetime?", "category": "counterfactual"}
+{"question_id": 61, "text": "Develop a C++ program that reads a text file line by line and counts the number of occurrences of a specific word in the file.", "category": "coding"}
+{"question_id": 62, "text": "Implement a Python function to find the longest common subsequence of two input strings using dynamic programming.", "category": "coding"}
+{"question_id": 63, "text": "Implement a regular expression in Python to validate an email address.", "category": "coding"}
+{"question_id": 64, "text": "Write a program to find the nth Fibonacci number using dynamic programming.", "category": "coding"}
+{"question_id": 65, "text": "Implement a binary search algorithm to find a specific element in a sorted array.", "category": "coding"}
+{"question_id": 66, "text": "Implement a queue data structure using two stacks in Python.", "category": "coding"}
+{"question_id": 67, "text": "Implement a program to find the common elements in two arrays without using any extra data structures.", "category": "coding"}
+{"question_id": 68, "text": "Given that f(x) = 5x^3 - 2x + 3, find the value of f(2).", "category": "math"}
+{"question_id": 69, "text": "Solve for x in the equation 3x + 10 = 5(x - 2).", "category": "math"}
+{"question_id": 70, "text": "If the endpoints of a line segment are (2, -2) and (10, 4), what is the length of the segment?", "category": "math"}
+{"question_id": 71, "text": "Can you help me write a formal email to a potential business partner proposing a joint venture?", "category": "writing"}
+{"question_id": 72, "text": "Can you help me write a resignation letter to my current employer, while leaving on good terms and expressing gratitude for the opportunities provided?", "category": "writing"}
+{"question_id": 73, "text": "Use an appropriate format to structure a formal letter of recommendation for a student applying to a prestigious graduate program in computer science.", "category": "writing"}
+{"question_id": 74, "text": "Write a compelling product launch announcement email to inform our customers of our new software solution.", "category": "writing"}
+{"question_id": 75, "text": "Draft an apology email to a customer who experienced a delay in their order, and provide reassurance that the issue has been resolved.", "category": "writing"}
+{"question_id": 76, "text": "Write a script for a YouTube video exploring the history and cultural significance of jazz.", "category": "writing"}
+{"question_id": 77, "text": "Compose an engaging travel blog post about a recent trip to Hawaii, highlighting cultural experiences and must-see attractions.", "category": "writing"}
+{"question_id": 78, "text": "Write a captivating movie review for a recently released science fiction film, discussing its plot, characters, and special effects.", "category": "writing"}
+{"question_id": 79, "text": "Structure a podcast script for an episode discussing the influence of streaming platforms on the music industry.", "category": "writing"}
+{"question_id": 80, "text": "Write a symphony concert review, discussing the orchestra's performance and overall audience experience.", "category": "writing"}
diff --git a/model/fastchat/eval/table/review/others/review_llama_alpaca-13b.jsonl b/model/fastchat/eval/table/review/others/review_llama_alpaca-13b.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..8b380632d796632a0e0f544e38142e8922ad5ee0
--- /dev/null
+++ b/model/fastchat/eval/table/review/others/review_llama_alpaca-13b.jsonl
@@ -0,0 +1,80 @@
+{"review_id": "NcDFhfEgxMKfTTWsbKcnZD", "question_id": 1, "answer1_id": "J3UA6eGXGyFeUGqGpP3g34", "answer2_id": "kEL9ifUHDeYuAXzevje2se", "reviewer_id": 1, "metadata": {}, "text": "9 8\nAssistant 1 provided a more detailed and structured answer, listing 16 specific strategies to improve time management skills. The answer covers various aspects of time management, including setting priorities, creating schedules, setting goals, using technology, and taking care of personal well-being. Assistant 2's answer is also helpful and relevant but is less detailed and structured compared to Assistant 1's response. Both answers are accurate and relevant, but Assistant 1's answer provides a more comprehensive approach to improving time management skills.", "score": [9.0, 8.0]}
+{"review_id": "5em4DusFoarDu5B5C6FgUL", "question_id": 2, "answer1_id": "jZVDNTtFnNNd6vgkGjzAGa", "answer2_id": "VcF3NrWGXhhxLkDVurNrwq", "reviewer_id": 1, "metadata": {}, "text": "9 7\nAssistant 1 provided a more detailed and comprehensive answer, listing 10 effective ways to deal with stress, along with explanations for each method. The answer covered various aspects of stress management, such as relaxation, time management, social support, and professional help. Assistant 2's answer was more concise and touched on some similar points, but it lacked the depth and specific examples provided by Assistant 1. Both answers were relevant and accurate, but Assistant 1's response was more helpful due to its level of detail.", "score": [9.0, 7.0]}
+{"review_id": "hg74Tqxtvfo3DTrkD2qaWR", "question_id": 3, "answer1_id": "NbSxRXf8Yuvjr7HC6AvrPw", "answer2_id": "LpvtyQi9QdSgRrgGDxiGrT", "reviewer_id": 1, "metadata": {}, "text": "3 8\nAssistant 1's answer is repetitive, disorganized, and contains several inaccuracies. Many points are repeated multiple times with slight variations, and some points contradict each other. On the other hand, Assistant 2's answer is concise, relevant, and accurate, providing a clear comparison between Python and JavaScript. Assistant 2's answer could have included a few more details, but overall, it is much more helpful and informative than Assistant 1's answer.", "score": [3.0, 8.0]}
+{"review_id": "GZLApZFfb3C5rKAEbGS4HK", "question_id": 4, "answer1_id": "E9zrPDdncYvAebEaxiPXfx", "answer2_id": "7zQm8cSTJhPtPdZdxbcfrX", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question about increasing productivity while working from home. Assistant 1's answer was well-organized and covered important points such as setting up a proper workspace, blocking out distractions, taking breaks, avoiding multitasking, having an organized schedule, and setting goals. Assistant 2's answer was also well-organized and covered similar points, but it included additional tips such as exercising, connecting with colleagues, tracking progress, and unplugging from work. This made Assistant 2's answer slightly more comprehensive and detailed, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "H75CvppX2NcKYy5xzvHDQw", "question_id": 5, "answer1_id": "36TYpADDHAfRvtvKitfSXR", "answer2_id": "UrLEH82RHwqqLt2LyvYSKj", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about the basics of quantum computing. Assistant 1 briefly explained the concept of qubits and mentioned the advantage of quantum computers in factoring large numbers more efficiently. Assistant 2, on the other hand, provided a more detailed response, explaining the difference between classical and quantum computers, the ability of quantum computers to process information in multiple states, and the potential applications of quantum computing. While both answers were informative, Assistant 2's answer was slightly more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "hK9RDXzaQGGjSH7a3u6oTf", "question_id": 6, "answer1_id": "8vtLLYRM352mZ8PXXbwzBT", "answer2_id": "fpRdMTdnfirosQixuf2Gez", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on the completeness of the protein sources, which is an important aspect, but their answer lacked some details about the nutritional differences and environmental impact. Assistant 2, on the other hand, provided a more comprehensive answer, discussing not only the protein sources but also the differences in fat, calories, cholesterol, and the environmental impact of production. This is why Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "coKrqCAZxL2rvaPxcBk5gf", "question_id": 7, "answer1_id": "mrCniyWYvAtxWhgMdXq2BG", "answer2_id": "PvGmsCJSNFcvQKmPTnnd7s", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more detailed and structured answer, listing four specific steps to develop critical thinking skills, such as analyzing and interpreting information, assessing the validity of information, using intuition, and learning from mistakes. Assistant 2's answer was more concise and less detailed, mentioning questioning assumptions, taking multiple perspectives, analyzing information, and engaging in thoughtful discussions. Both answers are relevant and accurate, but Assistant 1's response offers a clearer and more actionable guide for developing critical thinking skills.", "score": [8.0, 6.0]}
+{"review_id": "XW9EQ9YGaBJQHYX35999D4", "question_id": 8, "answer1_id": "S8fmdbvpvbgTUyDU9V3M3N", "answer2_id": "n4ANAbpR3gvLPP8poPfKZ6", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer is relevant and accurate, but it mainly focuses on the lack of skilled teachers as a major challenge in the education sector. Although this is an important issue, the answer could have been more comprehensive by addressing other challenges as well. Therefore, I give Assistant 1 a score of 7.\n\nAssistant 2's answer provides a more comprehensive list of challenges faced by the education sector, including access to quality education, low educational outcomes, high costs, infrastructure, overcrowding, gender inequality, technology, outdated curriculum, and teacher training. This answer covers a wider range of issues and provides a more detailed response to the question, which is why I give Assistant 2 a score of 9.", "score": [7.0, 9.0]}
+{"review_id": "6rW7iGNnBw4qYzM8XfCEnr", "question_id": 9, "answer1_id": "KmuNjvNKRyaFwaBZTLArcG", "answer2_id": "STJ36GrgQMcaUi7zaoNPit", "reviewer_id": 1, "metadata": {}, "text": "9 7\nAssistant 1 provided a more detailed and comprehensive answer, covering a wider range of factors that influence consumer behavior, such as psychological, social, cultural, demographic, economic, personal, situational, and market factors. Assistant 2's answer was also relevant and accurate but lacked the level of detail and the variety of factors mentioned by Assistant 1. Therefore, Assistant 1 receives a 9, and Assistant 2 receives a 7.", "score": [9.0, 7.0]}
+{"review_id": "mERdta5q96ZawW5vReAAtr", "question_id": 10, "answer1_id": "HNNT9k5htiMeRwNSD6dcz4", "answer2_id": "425SwYvqKPAXFGTYKXB7Cs", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer provides a list of conflict resolution strategies but lacks the focus on the most effective ones and does not provide enough detail on how to implement them. Assistant 2's answer, on the other hand, is more focused on the most effective strategies and provides a clearer explanation of each strategy, making it more helpful and relevant to the user's question. Therefore, Assistant 2 receives a higher score.", "score": [7.0, 9.0]}
+{"review_id": "8eE32WwTctnGiV4vr9tMjo", "question_id": 11, "answer1_id": "ChXjhDDikxU9FV3CADs6Ym", "answer2_id": "VbNAuj6KAkMdLJQXMo22oK", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on the environmental impact and the health risks associated with single-use plastic bottles, while Assistant 2 expanded on these points and also compared the benefits of using reusable bottles. Assistant 2's answer was more detailed and provided a clearer comparison between single-use and reusable bottles, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "DRc3ZzPo2xVtCyRXUV2etr", "question_id": 12, "answer1_id": "5wsPnN3VmmSkahgugFNo7u", "answer2_id": "CNGqAeu2QJbQ4QGzHJDPdq", "reviewer_id": 1, "metadata": {}, "text": "8 7\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the question. Assistant 1's answer was more detailed and specifically addressed the needs of people with various disabilities, such as sensory, physical, developmental, and mental disabilities. It also mentioned the importance of accessible information for riders. Assistant 2's answer was also relevant and helpful, but it was less detailed and did not specifically address the needs of people with disabilities. However, Assistant 2 did mention the integration of public transportation systems with other services, which is an important aspect of accessibility. Overall, both answers were useful, but Assistant 1's answer was more comprehensive and detailed.", "score": [8.0, 7.0]}
+{"review_id": "5nEZ2B5g7AGsZfBmGXj8AY", "question_id": 13, "answer1_id": "NRGZGnU2sPN3ShMe9C3fMn", "answer2_id": "E8w2qYqnm8iqCrSkUv62sz", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more detailed and accurate answer, explaining the two types of fiscal policy and giving examples of how they can be used to combat economic recessions. However, the answer incorrectly included monetary policy examples (lowering and raising interest rates) under fiscal policies. Assistant 2's answer was more concise and touched on the main points of fiscal and monetary policies, but lacked the level of detail and examples provided by Assistant 1. Both answers could have been improved by clearly differentiating between fiscal and monetary policies and providing accurate examples for each.", "score": [8.0, 6.0]}
+{"review_id": "Pxxz59jygEFZdpvprcjEmu", "question_id": 14, "answer1_id": "inKimHkWsXShQBTRmxr5Yg", "answer2_id": "8o5yMymfzo6kzmp9GK5MWr", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was concise and touched on the main points of how language and cultural barriers can affect communication and relationships in multicultural societies. Assistant 2's answer was more detailed, providing additional information on the consequences of these barriers and offering suggestions on how to overcome them. This made Assistant 2's response slightly more helpful and informative, thus earning a higher score.", "score": [8.0, 9.0]}
+{"review_id": "ZSAEaKazwxnXfqbvggibuj", "question_id": 15, "answer1_id": "H8aKtWwf8m6Lgxc2YyR2yf", "answer2_id": "kbJVEEsdsSScEq5Y5furr7", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 listed various applications of AI in healthcare, such as diagnosis, tracking, treatment, research, and education. However, Assistant 2's answer was more detailed, providing specific examples of how AI can be used in healthcare, such as analyzing laboratory results, automating administrative tasks, and facilitating communication between doctors and patients. Assistant 2's answer also touched upon the use of AI in virtual health coaching, which adds an extra dimension to the response. Therefore, Assistant 2 receives a slightly higher score due to the greater level of detail and comprehensiveness in the answer.", "score": [8.0, 9.0]}
+{"review_id": "j6NfurSEAmKCzNtgEjVC8o", "question_id": 16, "answer1_id": "PafVwxMsjSkYUETiVBgxTU", "answer2_id": "CMUL5ULZuR7YC5EPzCBN2N", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a more detailed explanation of the CRISPR-Cas9 mechanism, while Assistant 2 briefly explained the mechanism but provided more examples of potential applications. Assistant 2's answer was slightly more concise and focused on the potential applications and ethical implications, which made it more relevant to the question. Both answers were accurate and informative, but Assistant 2's response was more aligned with the question's focus, hence the slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "8Xs9nkJ73N5EtSd4F8TAqS", "question_id": 17, "answer1_id": "dmDUAfTP4aERJqqSeDBybu", "answer2_id": "kEmDDQyNqSkyFihYEEBpuR", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more detailed explanation of how vaccinations work by mentioning the use of dead or weakened bacteria or viruses to stimulate the immune system. Both assistants explained the concept of herd immunity well, but Assistant 1's answer was slightly more comprehensive and clearer in explaining the relationship between individual immunity and herd immunity. Assistant 2's answer was still relevant and accurate but lacked the level of detail provided by Assistant 1.", "score": [8.0, 7.0]}
+{"review_id": "5vWP9yYBDo7QEtb3FrDadY", "question_id": 18, "answer1_id": "8KGSSqbLqVdSZMEN9oCv5R", "answer2_id": "Qs3grQsqFVGK9EVkCkf9PB", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was more detailed in terms of the specific ways social media platforms influence news consumption and sharing, as well as the potential implications for the spread of misinformation. However, Assistant 2's answer was more concise and focused on the overall impact of social media platforms on news consumption and the spread of misinformation, while also mentioning the importance of user awareness and platform responsibility. Assistant 2's answer was slightly more helpful in providing a broader perspective on the issue, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "XkMtvm2BgTSndjNAj5mMLS", "question_id": 19, "answer1_id": "HbnJXJpPfaM2iX3ek4Epvy", "answer2_id": "kzZ6dKN7hkRWjqtdHr7Qns", "reviewer_id": 1, "metadata": {}, "text": "4 8\nAssistant 1's answer started off well by discussing the influence of cultural, social, and economic factors on food choices. However, the answer then became repetitive and confusing, listing multiple interventions without clear explanations or organization. Assistant 2's answer, on the other hand, provided a concise and relevant response that addressed the question directly and explained the influence of cultural, social, and economic factors on food choices. Assistant 2's answer also touched on how this knowledge can be used to promote healthier diets, making it a more helpful and accurate response.", "score": [4.0, 8.0]}
+{"review_id": "HtVNXs7DafZHs8RLchmPdE", "question_id": 20, "answer1_id": "mx8Abfz5PtDcn6jgCA8zhM", "answer2_id": "DPPDG6YGFJij2GCmRL66PU", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer started off well by explaining the concept of natural selection, but then it veered off into discussing population size and genetic drift, which are related but not directly answering the question. Assistant 2's answer was more concise and directly addressed the question, explaining the process of natural selection and its contribution to the evolution and adaptation of species. Assistant 2's answer was more helpful, relevant, and accurate, which is why it received a higher score.", "score": [7.0, 9.0]}
+{"review_id": "SnckoaLy5PnN8NAYEBzxDg", "question_id": 21, "answer1_id": "NuS9PUGkJG2pHscArvfyeF", "answer2_id": "D62FjDb4nZANzPpfSfsiyn", "reviewer_id": 1, "metadata": {}, "text": "7 8\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. However, Assistant 2's response was slightly more detailed and included additional information about the knight's skills in both war and peace, as well as their pride in being at the royal court. This extra information makes Assistant 2's answer more helpful and engaging, earning it a higher score.", "score": [7.0, 8.0]}
+{"review_id": "8jvZeKMWMA4thauNzJPzDp", "question_id": 22, "answer1_id": "SPjzirzbzo3UJ8BHXSgY9y", "answer2_id": "k7E4NNw5kyj9DmvP5Pu2zb", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided a relevant and helpful answer, but it lacked the excitement and enthusiasm that one might expect from a pirate captain. The answer was accurate and gave some motivation for the crew to search for the treasure. Assistant 2, on the other hand, provided a more engaging and exciting response, using pirate language and creating a sense of adventure. The answer was also relevant and motivating for the crew. Both assistants provided helpful answers, but Assistant 2's response was more in line with the pirate theme and had a stronger motivational tone.", "score": [7.0, 8.0]}
+{"review_id": "jLGZPqzeptUvcpQfYsvGx9", "question_id": 23, "answer1_id": "JkeeC4MZVwybPU8DSbusnT", "answer2_id": "KFocjVCejYrU3YmLjAqoUF", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief explanation of what a soliloquy is and gave an example from Romeo and Juliet, but did not actually answer the question by providing a soliloquy of their own. Assistant 2, on the other hand, directly answered the question by describing how they would declare their love in a soliloquy, using passionate words and metaphors. Assistant 2's response was more relevant and helpful to the user's question, which is why it received a higher score.", "score": [7.0, 9.0]}
+{"review_id": "LgYzqGXGywN6mQEHFbMFEL", "question_id": 24, "answer1_id": "UcfKSXuzBPkZzLHsH4EwQz", "answer2_id": "dq8Sm9djS7e7y9sG9vmMJf", "reviewer_id": 1, "metadata": {}, "text": "4 8\nAssistant 1's response was not helpful or relevant to the question, as it focused on the origin story of a chatbot rather than a superhero. The answer provided no information about a superhero's origin story, which was the main focus of the question. Assistant 2, on the other hand, provided a relevant and engaging answer that explained a superhero's origin story in a way that a curious child could understand. The response was concise, yet detailed enough to give a clear picture of the superhero's journey. Therefore, Assistant 2's answer is more helpful, relevant, and accurate in addressing the user's question.", "score": [4.0, 8.0]}
+{"review_id": "DCSTnmeSzvYi2TSpPPdTNf", "question_id": 25, "answer1_id": "cEGdo69FwL2Y3teYkMk9Gp", "answer2_id": "XZ8fG8e6u7CyKd2moK6abe", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more detailed and structured answer, listing five specific technological advancements and explaining their applications. Assistant 2's answer was more general and less informative, although it still mentioned some relevant advancements. Both answers were relevant and accurate, but Assistant 1's response offered a higher level of detail, which is why it receives a higher score.", "score": [8.0, 7.0]}
+{"review_id": "hUupuNNrtoThv3KM5fEJ6i", "question_id": 26, "answer1_id": "W28UtVDZTcWwa8TmeruRn4", "answer2_id": "oKaXHfoK4pXwrefFWXmeA8", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1's answer started off well by describing the play in detail, but it became confusing and hard to follow as it continued. The answer also did not clearly indicate which team won the game. Assistant 2's answer was more concise and to the point, clearly describing the winning play and the outcome of the game. While it could have provided more detail, it was more relevant and accurate in answering the question.", "score": [6.0, 8.0]}
+{"review_id": "G8rWCmQE4YW7FubMwKvQe7", "question_id": 27, "answer1_id": "j65UyAfRdAcWBAEinXJzBK", "answer2_id": "ZwiZfvDWm7SETKNBfDk7Mb", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth assistants provided relevant and creative answers to the question. Assistant 1 described a specific dish, chicken fried rice, and mentioned the unique flavors and textures. However, the answer could have been more detailed in terms of the ingredients and preparation process. Assistant 2, on the other hand, provided a more vivid and engaging description of their signature dish, emphasizing the blend of traditional and modern cooking techniques and the balance of flavors and textures. Although Assistant 2 did not mention a specific dish, their answer was more captivating and showcased a personal style, which is why they received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "a9AtNQAvHimziJNZQHsSfP", "question_id": 28, "answer1_id": "VWuywPvFPK42ZxmHpmQ27y", "answer2_id": "DxYopRe2LcTJMy3FWu6btd", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer provided a more personal perspective on the emotions and the view from the summit of Mount Everest, but some of the descriptions, such as seeing green trees and bushes, were inaccurate. Assistant 2's answer was more concise and accurate, focusing on the sense of accomplishment and the breathtaking view of the Himalayan mountain range. Both answers were relevant and helpful, but Assistant 2's answer was more accurate and provided a better overall response.", "score": [7.0, 9.0]}
+{"review_id": "GJKByjkZzEnuB3HAUWZfBZ", "question_id": 29, "answer1_id": "AUWSLTAvGV5sP9qLsVyzjT", "answer2_id": "WC3UJVh4jQ5RUkpcRMU98L", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more detailed and comprehensive answer, discussing various aspects of life on Mars such as temperature, atmosphere, gravity, food, water, isolation, and daily activities. Assistant 2's answer was shorter and less detailed, but still touched on some key challenges such as temperature fluctuations, limited resources, and physical and mental health. Both answers were relevant and accurate, but Assistant 1's response was more helpful due to the greater level of detail provided.", "score": [8.0, 6.0]}
+{"review_id": "c359sxbc6xyXy9xz7gkcVu", "question_id": 30, "answer1_id": "GZtEdKLxmUUbenH4XRyLkE", "answer2_id": "gTvgn6ksDjGGgdprw6AG5A", "reviewer_id": 1, "metadata": {}, "text": "4 8\nAssistant 1's answer was not helpful as it did not provide a description of the character's survival or the allies encountered in a post-apocalyptic world. Instead, it repeated the same instructions multiple times without providing any actual content. Assistant 2's answer, on the other hand, was relevant, accurate, and detailed. It described the character's survival tactics, the formation of an alliance, and the sharing of knowledge and skills among the allies. This response was more engaging and provided a better understanding of the character's experience in a post-apocalyptic world.", "score": [4.0, 8.0]}
+{"review_id": "Dp4oHwZ3aWsbqUxaqie46Q", "question_id": 31, "answer1_id": "kba2Xpb75rUfnskZSzrBas", "answer2_id": "3q7giCk2BA3Ye4Tm9HC2iw", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1's answer was less detailed and had some inaccuracies, such as stating that a restaurant popular among locals would be less crowded, which is not necessarily true. Assistant 2's answer provided more accurate information and offered multiple ways to determine if a restaurant is popular among locals or tourists. Additionally, Assistant 2 explained why this information might be useful, making their response more helpful and relevant.", "score": [6.0, 8.0]}
+{"review_id": "4Yv2TPFPFcm44DdZFCN3nW", "question_id": 32, "answer1_id": "RCaptsMBYXseVJgjJyyvUh", "answer2_id": "hRGsxy86v26SC4yAQS29X4", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more detailed and structured list of subtle clues, making it easier to understand and follow. Assistant 2's answer was also relevant and accurate, but it was less detailed and organized compared to Assistant 1's response. Both answers were helpful and relevant, but Assistant 1's answer was more comprehensive and precise.", "score": [8.0, 7.0]}
+{"review_id": "TuuKTPtEjGxwtGKpxQYeQz", "question_id": 33, "answer1_id": "2CeidEr7Cjj6YgcuaiPgnN", "answer2_id": "3n49A5ggJERfXYrLns3ZeU", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused on the differences between using a paper map or asking for directions and using a GPS device or smartphone app, highlighting the benefits of seeing the big picture and getting a general idea of the route. Assistant 2, on the other hand, mentioned the tactile and visual experience of using a paper map, the personalized experience of asking locals for directions, and the potential limitations of GPS devices and smartphone apps. Assistant 2's answer was slightly more comprehensive and touched on more reasons why someone might choose a paper map or ask for directions, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "aqwDx9WPfhZ5m5SdkKkPcg", "question_id": 34, "answer1_id": "kpStLLNWNkGHyRFdKsmDds", "answer2_id": "ErCpFtPuYVru4oTTk4WrxG", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer provided a basic idea of how to determine if a person is genuinely interested in a conversation or simply being polite, but it lacked details and focused only on one aspect, which was the person's actions. Assistant 2's answer, on the other hand, was more comprehensive and provided more information on how to determine genuine interest, including body language, facial expressions, eye contact, and engagement in the conversation. Assistant 2's answer was more helpful, relevant, and detailed, which is why it received a higher score.", "score": [7.0, 9.0]}
+{"review_id": "RMPFfYVkeccgpTUSef6wcM", "question_id": 35, "answer1_id": "PFtMQp4X4MrMYkEa8c8CKG", "answer2_id": "PTNoCRMZWoJk8HaKX7fW45", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on the support for the local economy, flexibility, and eco-friendliness of small businesses. Assistant 2 emphasized the benefits to the local community, personal experience, higher quality products, and the preservation of local culture. Assistant 2's answer was slightly more detailed and covered a broader range of reasons, which is why it received a higher score. However, both answers were informative and addressed the main points of why someone might prefer shopping at a small, locally-owned business.", "score": [8.0, 9.0]}
+{"review_id": "9GgVatheNnjbDvaeQMqLyy", "question_id": 36, "answer1_id": "4JwjhR5QVsdYgQupSZrfUp", "answer2_id": "n8cFs9KENNwZ4z3SR4iXTr", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the question. Assistant 1's answer was slightly more detailed and organized, providing a clearer step-by-step approach to assessing the credibility of a source. Assistant 2's answer also provided useful information, but the organization and flow of the response were not as clear as Assistant 1's. Both assistants mentioned checking the author's credentials, the reliability of the source, and the evidence presented in the article. However, Assistant 1 emphasized the importance of checking the source's sources, while Assistant 2 mentioned reading other people's reviews or comments, which is also a valuable point. Overall, both responses were helpful, but Assistant 1's answer was slightly more detailed and well-structured.", "score": [8.0, 7.5]}
+{"review_id": "RdeGetn65y7amGq4GmDjWQ", "question_id": 37, "answer1_id": "ednPMy4dvW9CgKg2PJsBqW", "answer2_id": "GzxL9mmEK5RzKqRbqBMUVC", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. Assistant 1's answer was concise and touched on the main reasons why people enjoy or avoid scary experiences. However, Assistant 2's answer provided a more detailed explanation, mentioning the enhancement of emotional state and the sense of thrill and adventure. Assistant 2 also acknowledged that everyone is different and that preferences may vary, which added more depth to the response. Therefore, Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "St8Sp7VGDSSf9jDpw5LLJ5", "question_id": 38, "answer1_id": "ehPApSqCniyGN7hd332ToW", "answer2_id": "QpoHFgb9SzwuaXQQUuBUQD", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer provides a basic example of observing behavior in a new country, but it lacks depth and detail. On the other hand, Assistant 2's answer is more comprehensive, discussing various aspects of social interaction, such as addressing each other, handling disagreements, problem-solving, body language, and nonverbal cues. Assistant 2's answer is more helpful, relevant, and detailed, which is why it receives a higher score.", "score": [7.0, 9.0]}
+{"review_id": "7J6qhQNfT7MogHgLiZGJBz", "question_id": 39, "answer1_id": "bLvdcqSwgSCrUPZ8eQLLqb", "answer2_id": "Fxe6MS4GpP3LMDUwzY2cPA", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1's answer provided some benefits of space exploration but did not address the moral obligation aspect of the question. Assistant 2's answer, on the other hand, acknowledged both sides of the argument and emphasized the individual's role in deciding which cause is more important. Assistant 2's response was more balanced and relevant to the question, which is why it received a higher score.", "score": [7.0, 8.0]}
+{"review_id": "evNbEXzt7eVaN9DH66GjXW", "question_id": 40, "answer1_id": "8tx5GEHjSRKZRaZwHvLoM8", "answer2_id": "mJiQ2FGR4Xb8kmhZjharkw", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1's answer was more detailed, discussing various factors that influence the balance between job creation and technological progress. However, Assistant 2's answer was more concise and focused on the importance of striking a balance between the two, while also suggesting specific policies and initiatives to achieve this balance. Both answers were accurate and relevant, but Assistant 2's response was slightly more actionable and solution-oriented, which is why it receives a higher score.", "score": [8.0, 9.0]}
+{"review_id": "jMsV935GC3NXqiN4NdZkv7", "question_id": 41, "answer1_id": "J6TzFQAyiHs6Arijqwxtrm", "answer2_id": "6Kph4RHRKEZ4YUoaHuEhBv", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided a detailed answer with calculations based on the average life expectancy in the United States and two different estimates of daily blink rates. However, the daily blink rates mentioned (20,000 to 30,000) are significantly higher than the generally accepted average of around 14,400 blinks per day. This led to an overestimation of the total blinks in a lifetime.\n\nAssistant 2 provided a more accurate answer, stating that the average human blinks about 14,400 times per day, which results in approximately 50 million blinks in a lifetime. The answer also included the purpose and benefits of blinking, which added value to the response. However, Assistant 2 could have provided a more detailed calculation to support the 50 million blinks estimate. Overall, Assistant 2's answer was more accurate and relevant, but both answers could be improved.", "score": [7.0, 8.0]}
+{"review_id": "cjAvZrPLYYDHmvGamgmKJr", "question_id": 42, "answer1_id": "f7xAtd3wwc4vP7WSnHffAN", "answer2_id": "WBwpBQwhxn5kxLDb7MschC", "reviewer_id": 1, "metadata": {}, "text": "7 3\nAssistant 1's answer is more helpful and relevant than Assistant 2's answer. Assistant 1 provides a range of atoms in a grain of salt depending on its size, which is more reasonable and informative. However, it could have been more precise by explaining how the calculation was made. Assistant 2's answer is less accurate and less relevant, as it provides an incorrect number of atoms and uses an incorrect unit (102.98 atoms instead of 10^22 or 10^23 atoms). The explanation of atomic weight is not directly relevant to the question, and the final statement about 60.98 grams is confusing and incorrect.", "score": [7.0, 3.0]}
+{"review_id": "CLwNJs4HaNFFhLPgENyaVh", "question_id": 43, "answer1_id": "eRMyp4AbfgBqtbVZzHupFN", "answer2_id": "kf8nahQVci2ZLaYikagB7U", "reviewer_id": 1, "metadata": {}, "text": "7 4\nAssistant 1 provided a more accurate and detailed answer, stating that there are about 18 million lightning strikes per year, or roughly 50 million strikes per day. This answer is supported by a reference to the National Weather Service. However, the answer could have been more helpful by explaining the reasoning behind these numbers and the factors that contribute to lightning strikes. Assistant 2's answer is less accurate, stating that there are approximately 2000 lightning strikes per day, which is significantly lower than the number provided by Assistant 1. While Assistant 2 attempted to explain the reasoning behind lightning strikes, the answer lacked accuracy and detail, and did not provide any reference to support the claim.", "score": [7.0, 4.0]}
+{"review_id": "dbZzbW4NKEAiBZR6gcyohX", "question_id": 44, "answer1_id": "kjKb6YG86FfAh22qCePV4V", "answer2_id": "Gptgryd4o2dC8V5aqRmeJJ", "reviewer_id": 1, "metadata": {}, "text": "6 4\nAssistant 1's answer is more helpful and relevant than Assistant 2's answer, but it still contains some inaccuracies and lacks clarity. Assistant 1 attempted to provide a step-by-step explanation, but the calculations were incorrect, and the final conclusion contradicted the initial statement. Assistant 2's answer provided a clear number of balloons needed, but the explanation was confusing and lacked a proper step-by-step reasoning. Both answers could be improved by providing accurate calculations and a clearer explanation of the reasoning behind their conclusions.", "score": [6.0, 4.0]}
+{"review_id": "Z9YdTajgUBruMGZkigcSVV", "question_id": 45, "answer1_id": "ULtkGu6m6tB4tawFStVdJM", "answer2_id": "RfBWW8ZhdfTuTMb454Un4o", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided a clear answer with statistics from 2016, mentioning the number of text messages sent in the US and worldwide. However, the answer did not directly address the question of how many text messages are sent globally in a minute. Assistant 2 provided a more direct answer to the question, stating that over 6.3 billion text messages are sent globally in a minute, according to a 2017 study. Assistant 2 also discussed the expected growth in the number of text messages sent and the factors contributing to this increase. While both answers provided relevant information, Assistant 2's response was more directly related to the question and provided more recent data, which is why it received a higher score.", "score": [7.0, 8.0]}
+{"review_id": "fxdDpH7RJFp8jDpJD7gisS", "question_id": 46, "answer1_id": "C6hkCnJZmfCGvnr5an2jFh", "answer2_id": "neGgLYm47JvqN8qkw8VeoW", "reviewer_id": 1, "metadata": {}, "text": "4 2\nAssistant 1's answer attempted to provide a step-by-step reasoning for the estimation of words spoken daily on Earth, but the calculations and assumptions were incorrect and confusing. The answer focused on the English language and its number of words, which is not relevant to the question. Assistant 2's answer provided a vague estimation of over 1 billion words spoken daily, without any reasoning or calculations. Both answers lacked accuracy and relevant details, but Assistant 1's answer was slightly more helpful due to the attempt to provide a step-by-step explanation, even though it was flawed.", "score": [4.0, 2.0]}
+{"review_id": "QSb7VorJz3C8WRo4npfuss", "question_id": 47, "answer1_id": "DApBTLuiJwSsfQHTioH9PY", "answer2_id": "KSePbzLwsYasR3aui4HU8h", "reviewer_id": 1, "metadata": {}, "text": "4 6\nAssistant 1's answer is not helpful, as it provides an incorrect and unrealistic number of snowflakes falling during a typical winter. The answer also focuses too much on the size of snowflakes rather than the quantity. Assistant 2's answer is slightly more helpful, as it provides an average number of snowflakes falling per second, but it still does not give a clear estimate for the entire winter. Both answers could have been improved by considering factors such as geographical location, duration of winter, and average snowfall to provide a more accurate and comprehensive response.", "score": [4.0, 6.0]}
+{"review_id": "9sN9icePgykK3iM3KqKh2q", "question_id": 48, "answer1_id": "gmy58biNVuneSpFZTcpXtT", "answer2_id": "SWWfLpP2gnWHLbYRZXNsW3", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer starts by mentioning the number of published books but does not provide a clear estimate of the total number of pages in all books ever written. The answer also does not explain the reasoning or calculations behind the estimate, making it less helpful and detailed.\n\nAssistant 2's answer, on the other hand, provides a clear estimate of 100 billion pages and explains the reasoning behind this number. The answer takes into account the number of unique titles and the average number of pages per book. It also provides additional context about the significance of books throughout history and the shift towards digital formats. This answer is more helpful, relevant, accurate, and detailed, which is why it receives a higher score.", "score": [7.0, 9.0]}
+{"review_id": "QHGZTbEfXrQNkcXkm5J6Vt", "question_id": 49, "answer1_id": "Cpi6LBB86W73APEA8naNuj", "answer2_id": "WaBwKYhs7eAG22qCGLH2j3", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1's answer started with the correct number of orbits (4.7 billion), but the explanation and calculations that followed were confusing and unnecessary, as they focused on the distance traveled rather than the time elapsed. Assistant 2's answer was more relevant and accurate, providing a clearer explanation of the calculation (4.5 billion years multiplied by 365.25 days per year) and the importance of the Earth's orbit for sustaining life. However, Assistant 2's answer used an estimate of 4.5 billion years instead of the more accurate 4.7 billion years. Overall, Assistant 2's answer was more helpful and relevant, but both answers could be improved.", "score": [6.0, 8.0]}
+{"review_id": "HRyhSPS3mYUJeD9HCzAS2g", "question_id": 50, "answer1_id": "3g83CoAFWrDD6V6LyEpoMV", "answer2_id": "MfMJeE9om7qyBbqopHouf4", "reviewer_id": 1, "metadata": {}, "text": "4 7\nAssistant 1's answer is based on incorrect assumptions and calculations, resulting in an inaccurate and misleading answer. The number of songs recorded per day and the total number of songs recorded in the 19th and 20th centuries are not accurate, leading to a very low estimate of 36,000 songs. Assistant 2's answer, while not providing an exact number, acknowledges the difficulty in quantifying the total number of songs recorded throughout history and provides a more reasonable estimate of several hundred thousand. Assistant 2 also offers a brief overview of the impact of recording technology and the rise of digital music, making their answer more helpful and relevant.", "score": [4.0, 7.0]}
+{"review_id": "VmybnEnsgBd4pFpaNdBmTb", "question_id": 51, "answer1_id": "8p5A4gouBgCmbQj5gMZapU", "answer2_id": "TjWPRDM6JFpPF8xeRptCKb", "reviewer_id": 1, "metadata": {}, "text": "7 8\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the hypothetical question. Assistant 1 focused on the spread of the technology, comparing it to the printing press, while Assistant 2 emphasized the limited access to the Internet due to the lack of public education and the slow and expensive nature of the technology at the time. Assistant 2's answer was slightly more detailed and provided a clearer picture of the potential challenges and limitations of the Internet during the Renaissance period, which is why it received a higher score.", "score": [7.0, 8.0]}
+{"review_id": "4dSJWERR6EuWPTGvcDjzSr", "question_id": 52, "answer1_id": "LVWqSxH3Uq6675w7LXH6zP", "answer2_id": "iR2tYTsWTFENEP7Qy9RgtX", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. Assistant 1 focused on the expansion of the Aztec Empire and its potential dominance in the region. Assistant 2, on the other hand, emphasized the preservation of Aztec culture, language, and customs, as well as the impact on Native American populations. Assistant 2's answer is slightly more detailed and covers a broader range of consequences, which is why it receives a higher score. However, both answers are helpful and provide valuable insights into the hypothetical scenario.", "score": [8.0, 9.0]}
+{"review_id": "cjMR2my44nfHS6XLyw9riR", "question_id": 53, "answer1_id": "ffXksqRU3k8CNHqYfshYhj", "answer2_id": "AZdS8xAi3GwAmCqkNSnnwv", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer focuses on the current situation and how humans are better equipped to deal with the plague today, which is not directly relevant to the question about the hypothetical scenario of the Black Death not occurring in the 14th century. The answer is accurate but not very helpful or relevant to the question.\n\nAssistant 2's answer, on the other hand, provides a more relevant and detailed response to the question. It discusses the potential consequences of the Black Death not occurring, such as increased population growth, urbanization, economic growth, and the possible impacts on the Renaissance, scientific revolution, and industrial revolution. The answer is helpful, relevant, accurate, and provides a good level of detail, which is why it receives a higher score.", "score": [7.0, 9.0]}
+{"review_id": "HLsKxgtzuTWgVYvPMAGwA2", "question_id": 54, "answer1_id": "4L3dWuqvWwg5kELt7p8SVZ", "answer2_id": "VmwifF2JD5osYKDTqv2ZRS", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1's answer is brief and only focuses on the fact that Newton wouldn't have discovered the law of gravity, which is accurate but not very helpful or detailed. Assistant 2's answer, on the other hand, provides more information about the possible contributions Newton could have made to the field of biology, such as studying the human body, treating diseases, and observing animal behavior. This answer is more helpful, relevant, and detailed, which is why it receives a higher score.", "score": [6.0, 8.0]}
+{"review_id": "hZpnBQ88pPTGjoaeTNEpuF", "question_id": 55, "answer1_id": "XfpqViGW5dvauNZPDSF8yv", "answer2_id": "mUL5UPj3qDGaCriEjL2U3B", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer is brief and only touches on the fact that the world would be missing the music of the Beatles. While this is true, it doesn't provide much insight into the potential impact of their absence. Assistant 2's answer, on the other hand, goes into more detail about the possible consequences, such as the British Invasion of the 1960s, the influence on other musicians, fashion, and society. This answer is more helpful, relevant, and detailed, which is why it receives a higher score.", "score": [7.0, 9.0]}
+{"review_id": "iVgogku6XGD7adRBa3sHa5", "question_id": 56, "answer1_id": "WsuMf9XBGs3dFjfSGKjEzg", "answer2_id": "dVdwUoVrAQJDuWxiodykiw", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer is brief and provides a general idea of the consequences of not cracking the Enigma code, but it lacks depth and details. Assistant 2's answer, on the other hand, is more comprehensive and informative, discussing the potential impact on the outcome of the war, the advantage gained by the Allies, and the possible increase in the duration and cost of lives. Therefore, Assistant 2's answer is more helpful, relevant, and detailed, earning it a higher score.", "score": [7.0, 9.0]}
+{"review_id": "8ESZpVztPWvML7LN84GHRz", "question_id": 57, "answer1_id": "5qo3HudLrwrzEV2Px7gYRf", "answer2_id": "EiNn9jjfy7dga6xfCtLtF8", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. Assistant 1 briefly mentioned the longer travel time and more dangerous waters ships would have to navigate without the Suez Canal. Assistant 2, however, provided a more detailed response, discussing the implications for international trade and navigation, the connection between the Mediterranean Sea and the Red Sea, and the potential hindrance to global economic growth. Assistant 2's answer was more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "HR4x4mEuMMK8N8kbCxu6EC", "question_id": 58, "answer1_id": "SRxwJkNCfUaVe4Gb8LPvSK", "answer2_id": "eqG9f2R9hXVyZrZMpcqAYq", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer is brief and provides a general idea of what might have happened if the Maya civilization had not collapsed. However, it lacks detail and specific examples. Assistant 2's answer, on the other hand, is more detailed and provides specific examples of how the Maya civilization might have continued to grow, such as building large cities and monuments, developing their writing and astronomical systems, and expanding their trade networks. Additionally, Assistant 2 mentions the potential advances in agriculture and water management that could have led to increased food production and a larger population. Overall, Assistant 2's answer is more helpful, relevant, and accurate due to the level of detail provided.", "score": [7.0, 9.0]}
+{"review_id": "URAZPBWG4SopRfVTasXEHf", "question_id": 59, "answer1_id": "S54Mmyim3QT5owdhwnNYmB", "answer2_id": "ex42CLEzDVC2TxAvMCtYQJ", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided a brief history of Christopher Columbus and his discovery of the Americas, which is accurate but not directly answering the question about the consequences of Columbus not discovering the Americas. Assistant 2, on the other hand, directly addressed the question and provided a more relevant answer by discussing the potential impact on the indigenous people and the possibility of other Europeans discovering the Americas. Both answers could have provided more details and explored further consequences, but Assistant 2's answer was more relevant and helpful in addressing the user's question.", "score": [7.0, 8.0]}
+{"review_id": "ZGum2n5MsgSkJSXgEzZKb3", "question_id": 60, "answer1_id": "8DJpwEFb5jqt3vMj5oNqVX", "answer2_id": "RfFANSwHeCm3Pqe2AJ86Dk", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer provides some context about Van Gogh's life, but it does not directly address the question of what would have happened if he had been successful during his lifetime. Assistant 2's answer, on the other hand, directly addresses the question and provides a more detailed and relevant response, discussing the potential outcomes of Van Gogh's success, such as recognition, financial stability, and opportunities for collaboration. Therefore, Assistant 2 receives a higher score for its more relevant and detailed response.", "score": [7.0, 9.0]}
+{"review_id": "9JENrcqVLHSrdQX86WeL3R", "question_id": 61, "answer1_id": "KMtqqYkDmyyThkvxXJ4kJd", "answer2_id": "nzxdtWmgQhRkHeVAanVogZ", "reviewer_id": 2, "metadata": {}, "text": "6 3\n\nAssistant 1:\nStrengths:\n- The answer provides a step-by-step approach to solving the problem.\n- It mentions the use of necessary functions and classes like fstream, getline, and strstr.\n\nWeaknesses:\n- The answer does not provide actual code, only a high-level description of the solution.\n- It does not mention the need to include the and headers for file handling and strstr function, respectively.\n\nSuggestions for improvement:\n- Provide actual code with proper syntax and formatting.\n- Include the necessary headers for the mentioned functions and classes.\n\nAssistant 2:\nStrengths:\n- The answer provides a brief description of the problem-solving approach.\n\nWeaknesses:\n- The answer does not provide any code or specific details on how to implement the solution.\n- It lacks information about necessary functions, classes, and headers.\n\nSuggestions for improvement:\n- Provide a step-by-step approach to solving the problem, including the necessary functions and classes.\n- Include actual code with proper syntax and formatting.\n- Mention the required headers for the mentioned functions and classes.", "score": [6.0, 3.0]}
+{"review_id": "ghtRTXWurHQNfefGZfMLMR", "question_id": 62, "answer1_id": "MkbgwTpP5b79NWdPEnvHyq", "answer2_id": "kRo6z5qmeiSM8Xz6BPJHLC", "reviewer_id": 2, "metadata": {}, "text": "4 7\n\nAssistant 1's strengths:\n- The answer provides a clear explanation of the longest common subsequence problem.\n\nAssistant 1's weaknesses:\n- The answer does not include any code implementation.\n\nSuggestions for Assistant 1:\n- Provide a Python function to solve the problem using dynamic programming, as requested in the question.\n\nAssistant 2's strengths:\n- The answer includes a Python function that attempts to solve the problem using dynamic programming.\n- The code structure is clear and easy to understand.\n- The comments are helpful in explaining the code's logic and functionality.\n\nAssistant 2's weaknesses:\n- The code does not correctly implement the longest common subsequence problem.\n- The code contains some unnecessary steps, such as initializing the first row to 0 twice.\n\nSuggestions for Assistant 2:\n- Correct the implementation of the dynamic programming algorithm to find the longest common subsequence.\n- Remove unnecessary steps from the code.\n- Test the code to ensure it produces the correct output for various input strings.", "score": [4.0, 7.0]}
+{"review_id": "k7qHWUeBBzxrsYYSFNuKiN", "question_id": 63, "answer1_id": "PSb6r567shkRasPkJagvWi", "answer2_id": "7kj8odEE79UBdR4pT6u6TY", "reviewer_id": 2, "metadata": {}, "text": "6 8\n\nAssistant 1:\nStrengths:\n- The answer provides two different approaches to solve the problem using re.match and re.search.\n- The regular expression provided is mostly correct.\n\nWeaknesses:\n- The answer is incomplete, as it does not provide a full working code example.\n- The regular expression does not cover all valid email addresses, as it only allows lowercase letters and does not include some special characters that can be part of an email address.\n- No import statement for the 're' module is provided.\n\nSuggestions for improvement:\n- Provide a complete working code example, including the import statement for the 're' module.\n- Update the regular expression to include uppercase letters and valid special characters in the email address.\n- Add comments to explain the regular expression components.\n\nAssistant 2:\nStrengths:\n- The regular expression provided is more accurate and covers a wider range of valid email addresses.\n- The answer is concise and to the point.\n\nWeaknesses:\n- The answer does not provide a full working code example.\n- No import statement for the 're' module is provided.\n- No comments or explanations are provided for the regular expression.\n\nSuggestions for improvement:\n- Provide a complete working code example, including the import statement for the 're' module.\n- Add comments to explain the regular expression components and the logic behind it.", "score": [6.0, 8.0]}
+{"review_id": "ZmKypUFznS4JaBxDUKKEDD", "question_id": 64, "answer1_id": "QWpBq9gM4iYmsTLQ3NNnBe", "answer2_id": "UULZ4GLz9j9ot7tN7x3ySD", "reviewer_id": 2, "metadata": {}, "text": "4 3\n\nAssistant 1:\nStrengths:\n- Explained the problem statement and provided constraints for the input.\n\nWeaknesses:\n- Did not provide any code implementation.\n- The explanation of the problem statement is incomplete.\n\nSuggestions for improvement:\n- Provide a complete code implementation for the problem.\n- Make sure to cover all aspects of the problem statement in the explanation.\n\nAssistant 2:\nStrengths:\n- Provided a step-by-step explanation of the algorithm.\n\nWeaknesses:\n- The algorithm provided is incorrect and does not solve the problem.\n- No actual code implementation is provided.\n- The explanation is not clear and concise.\n\nSuggestions for improvement:\n- Provide a correct algorithm and code implementation for the problem.\n- Make sure the explanation is clear and concise.\n- Include comments in the code to explain the logic and functionality.", "score": [4.0, 3.0]}
+{"review_id": "gmqhG8gQ9SEconanYFqrk5", "question_id": 65, "answer1_id": "U3QxoC4DxXoggnLRbgJh8b", "answer2_id": "Rh8P3bxSJx4p6nCZkSZBrh", "reviewer_id": 2, "metadata": {}, "text": "2 7\n\nAssistant 1 provided no code or implementation of the binary search algorithm, so their score is very low. They only acknowledged the task but did not provide any solution.\n\nAssistant 2 provided a clear explanation of how the binary search algorithm works but did not provide any code implementation. Their explanation was accurate and easy to understand, but the task required a code implementation. They should have included a code example to demonstrate the algorithm.\n\nFor Assistant 2, I would suggest providing a code implementation of the binary search algorithm in a programming language like Python, Java, or C++. This would help demonstrate their understanding of the algorithm and provide a complete solution to the task.", "score": [2.0, 7.0]}
+{"review_id": "5yhSWSgvVcq9sh9uAEffA8", "question_id": 66, "answer1_id": "f3TXCreQPJ5Erd6BGt4SFE", "answer2_id": "R3aYUUtY46ht4ToU8US96x", "reviewer_id": 2, "metadata": {}, "text": "3 7\n\nAssistant 1's Answer:\nStrengths: None, as no actual code was provided.\nWeaknesses: The answer does not provide any code implementation, and the explanation is not clear and concise.\n\nSuggestions for improvement: Provide a clear and concise code implementation of the queue using two stacks in Python. Explain the logic and functionality of the code with comments.\n\nAssistant 2's Answer:\nStrengths: The answer provides a general idea of how to implement a queue using two stacks in Python.\nWeaknesses: The answer does not provide any actual code implementation.\n\nSuggestions for improvement: Provide a clear and concise code implementation of the queue using two stacks in Python. Explain the logic and functionality of the code with comments. Here's an example implementation:\n\n```python\nclass Queue:\n def __init__(self):\n self.stack1 = []\n self.stack2 = []\n\n def enqueue(self, item):\n # Push the item onto stack1\n self.stack1.append(item)\n\n def dequeue(self):\n # If stack2 is empty, move all items from stack1 to stack2\n if not self.stack2:\n while self.stack1:\n self.stack2.append(self.stack1.pop())\n # Pop the top item from stack2, which is the front of the queue\n return self.stack2.pop()\n\n# Example usage:\nqueue = Queue()\nqueue.enqueue(1)\nqueue.enqueue(2)\nqueue.enqueue(3)\nprint(queue.dequeue()) # Output: 1\nprint(queue.dequeue()) # Output: 2\n```\nThis implementation uses two stacks to simulate a queue. The enqueue operation pushes items onto stack1, while the dequeue operation pops items from stack2. If stack2 is empty, all items from stack1 are moved to stack2, reversing their order and making the front of the queue accessible.", "score": [3.0, 7.0]}
+{"review_id": "YWsNNZZbULpofXphPQChFM", "question_id": 67, "answer1_id": "SG7YcT8s4sJNBRqidKk7Z7", "answer2_id": "MQJFbH3KoFaSMmPgmK9LPa", "reviewer_id": 2, "metadata": {}, "text": "4 6\n\nAssistant 1:\nStrengths:\n- The answer provides a high-level approach to solving the problem.\n\nWeaknesses:\n- The answer does not provide any actual code.\n- The helper function is not necessary, as the problem can be solved with a single function.\n- The answer does not mention how to avoid using extra data structures.\n\nSuggestions for improvement:\n- Provide actual code that demonstrates the approach.\n- Use nested loops to compare elements in both arrays.\n- Instead of using a helper function, implement the logic within a single function.\n- To avoid using extra data structures, print the common elements directly instead of storing them in an array.\n\nAssistant 2:\nStrengths:\n- The answer provides a high-level approach to solving the problem.\n- The answer mentions using nested loops, which is a suitable approach for this problem.\n\nWeaknesses:\n- The answer does not provide any actual code.\n- The \"found\" flag concept is not clearly explained and seems unnecessary.\n\nSuggestions for improvement:\n- Provide actual code that demonstrates the approach.\n- Clearly explain the logic behind the \"found\" flag or remove it if it is not necessary.\n- To avoid using extra data structures, print the common elements directly instead of storing them in an array.", "score": [4.0, 6.0]}
+{"review_id": "9gHFxeYZgPtusQXPZMPPoF", "question_id": 68, "answer1_id": "ENsG3zoeUY7ArFo2kNaQbz", "answer2_id": "dBWqryuzG6BHRdPgrWWpbH", "reviewer_id": 3, "metadata": {}, "text": "First, let's solve the problem independently:\n\nf(x) = 5x^3 - 2x + 3\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 4 + 3\nf(2) = 39\n\nNow, let's examine the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1's Answer:\nf(2) = 26\n\nAssistant 2's Answer:\nThe value of f(2) is 1.\n\nBoth Assistant 1 and Assistant 2 provided incorrect answers. They did not show their problem-solving steps, so it is difficult to determine where they went wrong.\n\nScores:\nAssistant 1: 1\nAssistant 2: 1\n\nBoth assistants performed poorly in this case, as they both provided incorrect answers without showing their problem-solving steps.", "score": [1, 1]}
+{"review_id": "jDLVGmfqxXrPP8aHHN56LZ", "question_id": 69, "answer1_id": "C7CN5AYJhGC7iDgS7rciDQ", "answer2_id": "W74byrCbE7XiJWjcPTZS7x", "reviewer_id": 3, "metadata": {}, "text": "First, let's solve the problem independently:\n\n3x + 10 = 5(x - 2)\n3x + 10 = 5x - 10\n10 = 2x - 10\n20 = 2x\nx = 10\n\nNow, let's examine the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\n3x + 10 = 5x - 10x + 20 = 20 + 10x = 30x = 10.\n\nAssistant 1's steps are incorrect and confusing. They incorrectly combined the terms, leading to an incorrect answer.\n\nAssistant 2:\nx = 6.\n\nAssistant 2's answer is also incorrect, and they didn't provide any steps to show their problem-solving process.\n\nScores:\nAssistant 1: 2 (incorrect steps and answer)\nAssistant 2: 1 (incorrect answer and no steps provided)\n\nIn this case, neither assistant performed well, but Assistant 1 at least attempted to show some steps, even though they were incorrect.", "score": [2, 1]}
+{"review_id": "XhoujgnzM2exQesDVZziaR", "question_id": 70, "answer1_id": "QP9CmZKwhu2BrUmzhy8tH4", "answer2_id": "7WC5quGKdv8hADCxWaWZ57", "reviewer_id": 3, "metadata": {}, "text": "First, let's solve the problem independently. We can use the distance formula to find the length of the line segment:\n\nDistance = sqrt((x2 - x1)^2 + (y2 - y1)^2)\n\nHere, (x1, y1) = (2, -2) and (x2, y2) = (10, 4).\n\nDistance = sqrt((10 - 2)^2 + (4 - (-2))^2)\nDistance = sqrt(8^2 + 6^2)\nDistance = sqrt(64 + 36)\nDistance = sqrt(100)\nDistance = 10 units\n\nNow, let's examine the problem-solving process of Assistant 1 and Assistant 2.\n\nAssistant 1:\nThe length of the line segment is 10 units.\n\nAssistant 2:\nThe length of the line segment is 8 units.\n\nAssistant 1 has the correct answer, while Assistant 2 has an incorrect answer. Therefore, I would give Assistant 1 a score of 10 and Assistant 2 a score of 1.\n\nThe output tuple is (10, 1). Assistant 1 performed better in this case.", "score": [10, 1]}
+{"review_id": "88nddgEempssh3CBerEpvd", "question_id": 71, "answer1_id": "2KtpJeRqEfbPLwdUxAXWrj", "answer2_id": "AZER7D3RKZ9F9SXHiMjdqM", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided helpful tips for writing a formal email, but did not provide an actual template or example. Assistant 2, on the other hand, provided a clear and concise template that the user can directly use and adapt for their purpose. Both assistants were relevant and accurate in their responses, but Assistant 2's answer was more detailed and directly addressed the user's request.", "score": [8.0, 10.0]}
+{"review_id": "Y7u5UACCHK6Nu8m3i4Rrhs", "question_id": 72, "answer1_id": "gsL3R2grTXUiHdeDSk8f9Q", "answer2_id": "MSrdDafr77UvSHCnsPMSP3", "reviewer_id": 1, "metadata": {}, "text": "9 10\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the user's question. Assistant 1 started a resignation letter template and included expressions of gratitude and appreciation. Assistant 2 provided a more complete template, which included all necessary elements for a resignation letter, such as the clear statement of intention to resign, the last day of work, and expressions of gratitude. Both answers were accurate and detailed, but Assistant 2's answer was more comprehensive, which is why it receives a slightly higher score.", "score": [9.0, 10.0]}
+{"review_id": "UDDtp65oLt5sUn9SdDyfD4", "question_id": 73, "answer1_id": "HprPMoUk9JY5TtToRbuMwy", "answer2_id": "hxkjUkDkXhGP78Vo74B4WE", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth assistants provided helpful and relevant information on structuring a formal letter of recommendation. Assistant 1 gave a more concise response, outlining the format in three main points. However, Assistant 2 provided a more detailed response, which included more guidance on the content of the letter, such as mentioning the student's accomplishments and awards, and explaining how the student will contribute positively to the program. Assistant 2's answer is slightly more helpful due to the additional details provided.", "score": [8.0, 9.0]}
+{"review_id": "F3SqFjTStRNyF7t6EASK6x", "question_id": 74, "answer1_id": "YU7gDhmo4LDVMTEZZMRdBC", "answer2_id": "hv9jpG9PxeJ9AnFdNzH3Jv", "reviewer_id": 1, "metadata": {}, "text": "0 9\nAssistant 1 provided no answer at all, so it receives a score of 0. Assistant 2's answer was helpful, relevant, and accurate, with a good level of detail. The email was well-structured and compelling, making it deserving of a score of 9.", "score": [0.0, 9.0]}
+{"review_id": "Aer3XzBpDKJtHhZgPbJ3aE", "question_id": 75, "answer1_id": "2gNqU2XpsL6yqFn6DMRgHq", "answer2_id": "X5BVHT5pFpWxB88SzERrEG", "reviewer_id": 1, "metadata": {}, "text": "4 8\nAssistant 1's response was not helpful as it did not provide the requested draft of the apology email and instead asked for the customer's name, which was not necessary for the task. Assistant 2's response was more helpful, relevant, and accurate as it provided a draft of the apology email that addressed the customer's issue and reassured them that the problem had been resolved. The level of detail in Assistant 2's response was also appropriate for the task.", "score": [4.0, 8.0]}
+{"review_id": "GSrpfM2tDZKzRnEqganLP6", "question_id": 76, "answer1_id": "a4L6JNPuomkPLdWxJRQpod", "answer2_id": "Af2SufKwvDBriNXnZX9UoP", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided a brief overview of jazz history and its evolution, touching on its origins, styles, and cultural impact. However, the answer lacked depth and details about key musicians, composers, and the influence of jazz on popular culture. Assistant 2's answer, while also brief, provided a more structured outline for a YouTube video script, mentioning the exploration of jazz's roots, key figures, impact on popular culture, and its legacy. Both answers were relevant and accurate, but Assistant 2's response was more helpful and better suited for a video script format.", "score": [7.0, 8.0]}
+{"review_id": "b5DTFmBoaRw2FADLuqy6fi", "question_id": 77, "answer1_id": "eN9V2Wf2BNzWoJosqdFw6K", "answer2_id": "HVnqVdSGRwQfqSsiYxtTTn", "reviewer_id": 1, "metadata": {}, "text": "4 8\nAssistant 1's response was not helpful as it did not provide any content for the travel blog post, whereas Assistant 2's response provided a brief overview of the content that would be included in the travel blog post, such as experiences, cultural highlights, and must-see attractions. Assistant 2's response was more relevant, accurate, and detailed, making it the better overall response.", "score": [4.0, 8.0]}
+{"review_id": "oJtU6YMZxHXk4bFmw87xt8", "question_id": 78, "answer1_id": "iCzkbWTatKfuC6QVv59XgA", "answer2_id": "8hcmmjzyMyhFJ8ZwqJJfvw", "reviewer_id": 1, "metadata": {}, "text": "7 8.5\nAssistant 1 provided a detailed review of the movie, discussing the plot, characters, and special effects. However, the review was not captivating and contained some inaccuracies, such as stating that the movie was based on \"Ready Player One\" by Ernest Cline, which was unrelated to the given plot. Assistant 2's answer was more captivating and concise, covering the plot, characters, and special effects, but it lacked some details. Overall, Assistant 2's answer was more engaging and relevant to the question, which is why it received a higher score.", "score": [7.0, 8.5]}
+{"review_id": "KJeEK5Uo47QeHdbpKMTpeQ", "question_id": 79, "answer1_id": "GQcFyY8ayFT48DAP5NKNZf", "answer2_id": "CU6gGfJqQmwZNxEG2n2Uda", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1's response provided a long list of points related to the influence of streaming platforms on the music industry, but it lacked structure and organization, making it difficult to follow as a podcast script. Assistant 2's response, on the other hand, provided a clear and concise outline for a podcast script, addressing the topic in a more organized manner and offering a better flow for a podcast episode. Assistant 2's response was more helpful, relevant, and accurate for the user's request, which is why it received a higher score.", "score": [6.0, 8.0]}
+{"review_id": "cfn7YY6yK8MWWkrtgrCFVz", "question_id": 80, "answer1_id": "A6pauLMckn34otnfpeQRFi", "answer2_id": "5SWbCoLV6NRrq5RtWQK4jp", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1's answer was more focused on the orchestra's performance and the audience's reaction, while Assistant 2's answer provided a more immersive and emotional description of the concert experience. Assistant 1's answer was accurate and provided some details about the composers and the duration of the performance. Assistant 2's answer, on the other hand, was more engaging and painted a vivid picture of the atmosphere and the conductor's passion. Both answers were useful, but Assistant 2's answer was slightly more appealing due to its descriptive and captivating language.", "score": [8.0, 9.0]}
diff --git a/model/fastchat/eval/table/review/vicuna-13b_20230322-clean-lang/review_alpaca-13b_vicuna-13b.jsonl b/model/fastchat/eval/table/review/vicuna-13b_20230322-clean-lang/review_alpaca-13b_vicuna-13b.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..2754fa66d04134530bb47e6ed2df2440cfe6411e
--- /dev/null
+++ b/model/fastchat/eval/table/review/vicuna-13b_20230322-clean-lang/review_alpaca-13b_vicuna-13b.jsonl
@@ -0,0 +1,80 @@
+{"review_id": "QM5m5nnioWr8M2LFHsaQvu", "question_id": 1, "answer1_id": "kEL9ifUHDeYuAXzevje2se", "answer2_id": "cV4zXygaNP6CXEsgdHMEqz", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant information on improving time management skills. Assistant 1 gave a concise overview of the key points, such as setting priorities, breaking tasks into smaller chunks, and using technology. Assistant 2 provided a more detailed and structured response, with a numbered list of tips and more specific advice, such as using a calendar or planner and practicing self-discipline. While both answers were accurate and useful, Assistant 2's response was slightly more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "ZQTBtt5M3mHEdJErvBnvho", "question_id": 2, "answer1_id": "VcF3NrWGXhhxLkDVurNrwq", "answer2_id": "3zpPUeoVsPWXtKMV7cAhZ6", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question about effective ways to deal with stress. Assistant 1 mentioned identifying and avoiding sources of stress, developing healthy coping mechanisms, relaxation techniques, and taking care of mental and physical health. Assistant 2 provided a more detailed list of specific strategies, such as exercise, mindfulness, social support, healthy eating, good sleep, time management, relaxation techniques, and seeking professional help. Assistant 2's answer was more comprehensive and provided more actionable advice, which is why it received a higher score. However, both answers were accurate and relevant to the question.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "NScFF3JiZuLiNEu2YGWFbC", "question_id": 3, "answer1_id": "LpvtyQi9QdSgRrgGDxiGrT", "answer2_id": "6xpiZJE4vxxGqjRogPfBk7", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant information about the differences between Python and JavaScript. Assistant 1 mentioned syntax, structure, application types, and the fact that Python is a compiled language while JavaScript is interpreted. However, Assistant 2 provided a more detailed and organized response, covering syntax, data types, error handling, libraries, use cases, and speed. Assistant 1 incorrectly stated that Python is a compiled language, while it is actually an interpreted language like JavaScript. Assistant 2's response was more accurate and comprehensive, which is why it receives a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "9SCFcM2koGViowZd2gbdoe", "question_id": 4, "answer1_id": "7zQm8cSTJhPtPdZdxbcfrX", "answer2_id": "abitTVu2Dz8rdMxyThvFyJ", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information on increasing productivity while working from home. Assistant 1 offered a slightly more detailed response, with nine tips compared to Assistant 2's seven tips. Both assistants covered essential points, such as establishing a routine, creating a dedicated workspace, eliminating distractions, and taking breaks. However, Assistant 1 also mentioned setting goals, tracking progress, and unplugging, which added more value to the response. Assistant 2's response was still helpful, but Assistant 1's answer was more comprehensive, which is why Assistant 1 receives a 9 and Assistant 2 receives an 8.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "hHSZygAJV8sC9WuyV3UoKx", "question_id": 5, "answer1_id": "UrLEH82RHwqqLt2LyvYSKj", "answer2_id": "UMZod8JaWia9KB2EVXdYrF", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the basics of quantum computing. Assistant 1 gave a brief overview of quantum computing, its potential, and its applications. Assistant 2 provided a more detailed response, explaining the concept of qubits and different approaches to building quantum computers. Assistant 2's response was more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "PK7fcxBPUfD9DPqSiZd8an", "question_id": 6, "answer1_id": "fpRdMTdnfirosQixuf2Gez", "answer2_id": "WXRCqF34f4UUJzd3xbgAqT", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the differences between plant-based and animal-based protein sources. Assistant 1 focused on the nutritional aspects, such as fat, calories, cholesterol, and resource usage. Assistant 2 provided a more detailed response, discussing nutritional value, absorption, sustainability, health effects, ethical considerations, and cost. Assistant 2's answer is more comprehensive and covers a wider range of aspects, which is why it receives a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "2cPJ5uF4r6z4EWKucgUdxs", "question_id": 7, "answer1_id": "PvGmsCJSNFcvQKmPTnnd7s", "answer2_id": "JREJbG5ZKXCNMoWoiJQxbC", "text": "8 10\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question about developing critical thinking skills. Assistant 1 gave a brief overview of what critical thinking involves, which is useful for understanding the concept. However, Assistant 2 provided a more detailed and comprehensive list of 10 tips for improving critical thinking skills, making it a more valuable response for someone looking to develop these skills. Assistant 2's answer also covered a wider range of strategies, such as active listening, seeking diverse viewpoints, and practicing mindfulness, which contributes to its higher score.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "2P68gHgTycYPveTkXFmJ2V", "question_id": 8, "answer1_id": "n4ANAbpR3gvLPP8poPfKZ6", "answer2_id": "mmVwmX6TGJ2Y72gCNac4EQ", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the challenges faced by the education sector today. Assistant 1's response was more concise, but it still covered a good range of challenges. Assistant 2's response was more detailed and organized, presenting the challenges in a numbered list format, which made it easier to understand and follow. Assistant 2 also touched on some additional aspects, such as accountability and assessment, and sustainability, which added more depth to the answer. Therefore, Assistant 2 receives a slightly higher score due to the better organization and additional details provided.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "KT5tYQWeruK84zYBEDifhA", "question_id": 9, "answer1_id": "STJ36GrgQMcaUi7zaoNPit", "answer2_id": "DMTZyzd4rRAFV43xtBJ9ns", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information regarding the primary factors that influence consumer behavior. Assistant 1 mentioned cultural, social, and personal factors, as well as advertising, marketing, and price. Assistant 2 provided a more detailed response, breaking down the factors into six categories: personal, psychological, social, economic, marketing, and product/service factors. Assistant 2's answer was more comprehensive and organized, which is why it received a slightly higher score. However, both responses were informative and useful.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "MAomLn7rTvxsZfGpv37ijQ", "question_id": 10, "answer1_id": "425SwYvqKPAXFGTYKXB7Cs", "answer2_id": "dETAsj4xHnUCSTkZezz8aM", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question about effective strategies for conflict resolution in the workplace. Both responses included a list of strategies with clear explanations, making it easy for the user to understand and apply the information. The level of detail in both responses was appropriate and informative. The slight difference in the strategies listed by each assistant does not significantly impact the overall quality of the answers, as both provided valuable insights and covered the main aspects of conflict resolution.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "di5wzaNZCApV85kfJyKtGH", "question_id": 11, "answer1_id": "VbNAuj6KAkMdLJQXMo22oK", "answer2_id": "C8ksZxg3LshMUWiLxPanbt", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information regarding the implications of using single-use plastic bottles versus reusable bottles. Assistant 1 focused more on the environmental aspects and briefly touched on human health, while Assistant 2 provided a more balanced view of both environmental and health impacts. Assistant 2 also organized the information more clearly, making it easier to understand the different implications. Both assistants provided a good level of detail, but Assistant 2's response was slightly more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "FCat6rmvhMiL5B8DJdKysU", "question_id": 12, "answer1_id": "CNGqAeu2QJbQ4QGzHJDPdq", "answer2_id": "NeHhRc5P5uAU8eWSJBRkhG", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant information in response to the question about designing an inclusive and accessible public transportation system. Assistant 2's answer was slightly more detailed and organized, with a numbered list of factors to consider, which made it easier to follow and understand. Assistant 1 mentioned the integration with other services, which is an important aspect that Assistant 2 did not mention. However, Assistant 2 provided more specific examples of accessibility features and emphasized the importance of customer service, information and communication, and affordability, which are crucial factors in designing an inclusive public transportation system. Overall, both assistants provided valuable information, but Assistant 2's response was more comprehensive and well-structured.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "hg92jQt3a8qPSVTLESQmbh", "question_id": 13, "answer1_id": "E8w2qYqnm8iqCrSkUv62sz", "answer2_id": "KAJ7UVwu8oCKyxZj9j82pm", "text": "8 10\nBoth assistants provided helpful, relevant, and accurate information about how governments can use fiscal and monetary policies to combat economic recessions. However, Assistant 2 provided a more detailed and structured response, with specific examples and a clear distinction between fiscal and monetary policies. Assistant 1's response was more general and less detailed, which is why Assistant 2 receives a higher score.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "6WZVJUT39icJDPALuZRbUN", "question_id": 14, "answer1_id": "8o5yMymfzo6kzmp9GK5MWr", "answer2_id": "NnWfaeRe8PmitgmV4u5fY8", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a general overview of the issue and suggested ways to overcome language and cultural barriers. However, Assistant 2 provided a more detailed response, outlining specific ways in which these barriers can affect communication and relationships, such as language differences, cultural norms, stereotypes, prejudice, and power dynamics. This additional detail and structure make Assistant 2's response slightly more informative and comprehensive, resulting in a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "8kChSLEztqMSdQkV9HDtMw", "question_id": 15, "answer1_id": "kbJVEEsdsSScEq5Y5furr7", "answer2_id": "WiGpqKRRwWonwgU95piNNc", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on a variety of ways AI can be used in healthcare, such as assisting with diagnoses, analyzing lab results, automating administrative tasks, and providing virtual health coaching. Assistant 2, on the other hand, provided a more detailed response, discussing the use of AI in analyzing patient data, automating routine tasks, remote patient monitoring, personalized treatment plans, and medical research. Assistant 2's answer was more comprehensive and provided a better understanding of the potential impact of AI on healthcare delivery, which is why it received a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "JQCpdYBgdJcDDVXWNgNAf8", "question_id": 16, "answer1_id": "CMUL5ULZuR7YC5EPzCBN2N", "answer2_id": "iangiZeex5ndxAXnWMrxBW", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about CRISPR-Cas9 technology, its potential applications, and ethical implications. Assistant 2, however, provided a more detailed response, including a clearer explanation of the gene editing process and the specific repair mechanisms involved (HDR and NHR). Assistant 2 also discussed a wider range of potential applications and ethical concerns. While Assistant 1's response was informative, Assistant 2's response was more comprehensive and in-depth, which is why Assistant 2 received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "VzPqpgnivGDdXhWdxQyvvH", "question_id": 17, "answer1_id": "kEmDDQyNqSkyFihYEEBpuR", "answer2_id": "XnMRLphzYQX4QRNht7tbui", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about vaccinations and herd immunity. Assistant 1 gave a brief overview of how vaccinations work and the concept of herd immunity. Assistant 2 provided a more detailed explanation of the immune response triggered by vaccinations and the importance of herd immunity for vulnerable populations. Assistant 2 also mentioned the critical threshold for achieving herd immunity and the benefits of vaccination for the community. Therefore, Assistant 2 receives a slightly higher score due to the additional details and clarity provided in the response.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "DjcVQrARdkz8zZU4ahzuJb", "question_id": 18, "answer1_id": "Qs3grQsqFVGK9EVkCkf9PB", "answer2_id": "HZc37bwy646mRzbqSsDAob", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 gave a concise overview of the influence of social media platforms on news consumption and the potential implications for the spread of misinformation. However, Assistant 2 provided a more detailed response, outlining specific ways in which social media platforms influence news consumption and sharing, such as personalization, virality, amplification, filter bubbles, confirmation bias, and lack of fact-checking. This additional detail and organization make Assistant 2's response slightly more informative and comprehensive, resulting in a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "eHLHXtjjeVUMsFLeAoQtbM", "question_id": 19, "answer1_id": "kzZ6dKN7hkRWjqtdHr7Qns", "answer2_id": "iJrMatLrMdJyyqMx9uJ45a", "text": "8 9\nBoth assistants provided helpful, relevant, and accurate information regarding the influence of cultural, social, and economic factors on people's food choices and how this knowledge can be used to promote healthier diets. Assistant 2, however, provided a more detailed and structured response, with clear examples for each factor, making it easier to understand the different influences. Assistant 1's response was also informative, but it lacked the clear organization and specific examples that Assistant 2 provided.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "5BcjGdUzPQbMD5MKmAvtRR", "question_id": 20, "answer1_id": "DPPDG6YGFJij2GCmRL66PU", "answer2_id": "oVEHqDnDTEADZSFfKgFTZd", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a brief but clear explanation of natural selection and its role in evolution and adaptation. Assistant 2, however, provided a more detailed response, breaking down the process of natural selection into five steps and explaining each step's contribution to evolution and adaptation. This additional detail and organization make Assistant 2's answer slightly more informative and comprehensive, resulting in a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "XpFSu84sZ8tACx9WkBygF5", "question_id": 21, "answer1_id": "D62FjDb4nZANzPpfSfsiyn", "answer2_id": "WLAj4u59bj2oEXzahF79ek", "text": "7 9\nAssistant 1 provided a brief and general introduction, while Assistant 2 offered a more detailed and immersive response, which better captured the essence of a medieval knight's introduction at a royal banquet. Assistant 2's response included a specific speech, demonstrating a deeper understanding of the context and the expectations of the situation.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "eqRG5G2adk4SQNsppwVVW6", "question_id": 22, "answer1_id": "k7E4NNw5kyj9DmvP5Pu2zb", "answer2_id": "fJPnM2XcRveW2zR4DDaeTb", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and engaging speeches to motivate a pirate crew to search for hidden treasure. Assistant 1's response was shorter and more focused on the excitement of the adventure, while Assistant 2's response was more detailed, providing a context for the speech and emphasizing the challenges and rewards of the journey. Assistant 2's response was slightly better due to its more comprehensive approach and the inclusion of a clear goal for the crew.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "XU7RnpgdvubdNHZ8ryeBNE", "question_id": 23, "answer1_id": "KFocjVCejYrU3YmLjAqoUF", "answer2_id": "fY4Xed6veWpcKfj5krZz8m", "text": "8 9\nBoth assistants provided relevant and creative responses to the question. Assistant 1 gave a general description of how a Shakespearean character would declare their love, which was helpful and accurate. Assistant 2 went a step further by providing an actual example of a soliloquy, making it more engaging and detailed. Therefore, Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "64ayLmABqYNLkAVAj2MSrF", "question_id": 24, "answer1_id": "dq8Sm9djS7e7y9sG9vmMJf", "answer2_id": "LqFJA2JQbBXP77nkYjtrZK", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. Assistant 1 gave a more traditional superhero origin story, explaining that they were born with special abilities and learned to use them for good. Assistant 2, on the other hand, provided a unique origin story as an advanced artificial intelligence created by scientists. Both answers were detailed and engaging, but Assistant 2's response was slightly more detailed and creative, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "cfBjbHjZsP9rKVXVxEaZoC", "question_id": 25, "answer1_id": "XZ8fG8e6u7CyKd2moK6abe", "answer2_id": "WxCKcRstJPXTXnqEzSX3Hg", "text": "8 10\nBoth Assistant 1 and Assistant 2 provided relevant and accurate information in response to the user's question. However, Assistant 2's answer was more detailed and comprehensive, covering a wider range of technological advancements from various fields, such as fusion power, nanotechnology, artificial intelligence, space exploration, quantum computing, and biotechnology. Assistant 1 mentioned some general advancements like time travel, intelligent robots, autonomous vehicles, virtual reality, augmented reality, artificial intelligence, and machine learning, but did not provide specific examples or details. Therefore, Assistant 2 receives a higher score of 10, while Assistant 1 receives an 8.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "aoxUcPU7Z3swngyhGAsfrc", "question_id": 26, "answer1_id": "oKaXHfoK4pXwrefFWXmeA8", "answer2_id": "dgPJ7DvtFHD7mxtRwzM3vW", "text": "8 10\nAssistant 1 provided a brief but relevant response, describing the winning play in a simple manner. However, the answer lacked detail and excitement that would be expected from a sports commentator. Assistant 2, on the other hand, delivered a more engaging and detailed response, capturing the excitement and tension of the final moments of the game. The answer provided a vivid picture of the winning play, the players involved, and the emotions of both teams, making it a more immersive and satisfying response.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "RPyrLb5gh3NCxy5HhUu3Qy", "question_id": 27, "answer1_id": "ZwiZfvDWm7SETKNBfDk7Mb", "answer2_id": "ADX83sWvjJkewJX6JmYUzQ", "text": "8 10\nAssistant 1 provided a brief and vague description of the signature dish, which did not give a clear idea of what the dish is or its ingredients. The response was relevant but lacked detail and creativity. Assistant 2, on the other hand, provided a detailed, creative, and engaging description of the signature dish, including the name of the dish, the ingredients, the flavors, and the presentation. Assistant 2's response was more helpful, relevant, accurate, and detailed, which is why it received a higher score.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "2hn8AqjvpXzHXKu2P69mco", "question_id": 28, "answer1_id": "DxYopRe2LcTJMy3FWu6btd", "answer2_id": "ihNG3rwsrt95NDhCAFeSDR", "text": "8 9\nBoth assistants provided relevant and detailed responses to the question. Assistant 1 focused on describing the emotions and the view from the top, which directly addressed the user's question. Assistant 2, however, went a step further by acknowledging its AI nature and providing a more comprehensive response, including the importance of proper training, experience, and equipment. This additional information is valuable for someone interested in mountain climbing. Assistant 1's response was good, but Assistant 2's response was more informative and well-rounded, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "QKzMYoFj48wZJPVjZnXD4V", "question_id": 29, "answer1_id": "WC3UJVh4jQ5RUkpcRMU98L", "answer2_id": "Gmhqf3z4LvVfwPNFJ89BKd", "text": "8 9\nBoth assistants provided relevant and helpful answers, but Assistant 2's response was more detailed and organized. Assistant 1 briefly mentioned the challenges faced, while Assistant 2 provided a step-by-step description of daily life on Mars and elaborated on the challenges. Assistant 2's answer also touched upon the importance of communication with Earth and the psychological aspect of living on another planet, making it a more comprehensive response.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "F4eWAVuERss6hdsTXDT43V", "question_id": 30, "answer1_id": "gTvgn6ksDjGGgdprw6AG5A", "answer2_id": "gSwkKJCn6qDnNZond2xVJ3", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and detailed responses to the user's question. Assistant 1 described forming an alliance with other survivors, sharing knowledge and skills, and working together to stay alive. Assistant 2 went into more depth about the daily struggles, the process of learning new skills, and the importance of forming alliances with like-minded individuals. Assistant 2 also mentioned the need for caution when encountering dangerous characters and the determination to rebuild society. While both responses were helpful and accurate, Assistant 2 provided a more comprehensive and engaging answer, which is why it received a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "5T23NeQWsoTreXFBErzMfH", "question_id": 31, "answer1_id": "3q7giCk2BA3Ye4Tm9HC2iw", "answer2_id": "8RaBeMjxx2bCp2GKWv7YiP", "text": "8 10\nAssistant 1 provided a decent response, but there was an error in the last sentence, stating that higher prices could indicate popularity among locals, which is not necessarily true. Assistant 2, on the other hand, provided a more detailed and accurate response, covering various aspects of how to determine the popularity of a restaurant among locals or tourists, and also explained why this information might be useful. Assistant 2's response was more comprehensive and well-structured, making it easier to understand and follow.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "D4k3jKh84j9z9Vtpm3Emvw", "question_id": 32, "answer1_id": "hRGsxy86v26SC4yAQS29X4", "answer2_id": "C65PZkmAfFfWRs4bPhyKqg", "text": "7 9\nAssistant 1 provided a few clues that suggest someone is pretending to understand a topic, but their answer was not as detailed or comprehensive as Assistant 2's response. Assistant 2 provided a more extensive list of subtle clues, each with a clear explanation, making their answer more helpful and informative. Both answers were relevant and accurate, but Assistant 2's response was more detailed and precise, which is why it received a higher score.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "bes2VMcdsy6n47zpvsiXGs", "question_id": 33, "answer1_id": "3n49A5ggJERfXYrLns3ZeU", "answer2_id": "4so4HTEjgDZKTqNAgkHHQX", "text": "8 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. However, Assistant 2's response was more detailed and comprehensive, covering a wider range of reasons why someone might choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app. Assistant 1's answer was still useful and touched on some key points, but it lacked the depth and elaboration that Assistant 2's response provided.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "2TkHUHRd2MeXpUgXUHHKFe", "question_id": 34, "answer1_id": "ErCpFtPuYVru4oTTk4WrxG", "answer2_id": "FCYaiexEzdoLFPAwvTgDDm", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused on body language, eye contact, and engagement in the conversation as indicators of genuine interest. Assistant 2 provided a more detailed response, listing five specific clues to look for, including active listening, engaged body language, personal investment, authenticity, and follow-up. Both answers were accurate and relevant, but Assistant 2's response was more comprehensive and provided a slightly higher level of detail, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "LrvxJCFQDLW2c29FpmG9Xp", "question_id": 35, "answer1_id": "PTNoCRMZWoJk8HaKX7fW45", "answer2_id": "76EPQDh4ZNxBMGqED9LEFi", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a good general overview of the benefits of shopping at small, locally-owned businesses, mentioning community support, personal experience, quality products, and local culture. Assistant 2, however, provided a more detailed and organized response, listing specific reasons and elaborating on each point. This made Assistant 2's answer slightly more informative and comprehensive, earning it a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "L25o9xkc8WhLLtG82XrVHc", "question_id": 36, "answer1_id": "n8cFs9KENNwZ4z3SR4iXTr", "answer2_id": "cvBg3gyCyDuyESof3YXhTE", "text": "7 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant information on assessing the credibility of a source. However, Assistant 2's response was more detailed and precise, covering a wider range of factors to consider, such as objectivity, date of publication, and skepticism. Assistant 1's response was accurate but lacked the depth and comprehensiveness of Assistant 2's answer. Therefore, Assistant 2 receives a higher score due to its more thorough and well-rounded response.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "YXjoGG6fkh6RzQaxpscw7u", "question_id": 37, "answer1_id": "GzxL9mmEK5RzKqRbqBMUVC", "answer2_id": "kRgfUJ7qqkyZUnLd2fnnaX", "text": "8 10\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. Assistant 1's response was helpful and touched on the main idea that people have different preferences for thrilling activities. However, Assistant 2's response was more detailed and comprehensive, offering various factors that can influence why some people enjoy being scared while others avoid it. Assistant 2's answer also delved into the complexity of the interplay between biological, psychological, and environmental factors, making it a more informative and well-rounded response.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "fCgyimZZk3m5nMi3XaoEU9", "question_id": 38, "answer1_id": "QpoHFgb9SzwuaXQQUuBUQD", "answer2_id": "J3YuizKcHQ74ydNyCcwgwu", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on the importance of observing interactions, addressing, handling disagreements, problem-solving, and nonverbal cues to understand cultural norms. Assistant 2 emphasized the learning aspect of culture through social interactions, the importance of observing appropriate and inappropriate behaviors, and the reactions and responses of others. Assistant 2's response was slightly more detailed and provided a clearer example, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "FeNNbc5tyYVh6RD8QGx9U5", "question_id": 39, "answer1_id": "Fxe6MS4GpP3LMDUwzY2cPA", "answer2_id": "abWLpFojLpNPfDGHpuRSUG", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 briefly presented both sides of the argument, allowing the reader to understand the main points of the debate. However, Assistant 2 went into more detail, providing specific points to consider when forming an opinion on the topic, and discussing the benefits and risks of both space exploration and addressing Earth's problems. This additional information makes Assistant 2's response more comprehensive and informative, resulting in a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "K3bVpZDV4T29mFFZneZnFv", "question_id": 40, "answer1_id": "mJiQ2FGR4Xb8kmhZjharkw", "answer2_id": "Ki4fkJvsoSxuQeSoj2AcBG", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 emphasized the importance of striking a balance between job creation and technological progress and suggested policies and initiatives to promote both. Assistant 2 also emphasized the need for balance but provided a more detailed explanation of the potential impacts of automation on jobs and the economy, as well as the benefits of technological progress. Assistant 2's response was more comprehensive, which is why it received a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "8QQXLYPzxkhj3UvjehmJ3a", "question_id": 41, "answer1_id": "6Kph4RHRKEZ4YUoaHuEhBv", "answer2_id": "GRQjYgAapNAVqzuZR4KQpf", "text": "7 6\nAssistant 1 provided a more accurate and detailed response, explaining the purpose of blinking and giving a clear estimate of 50 million blinks in a lifetime. However, the response could have been improved by breaking down the calculation step-by-step. Assistant 2 provided a step-by-step calculation, but the blink rate used was incorrect, leading to an inaccurate estimate of 4,996,800 blinks in a lifetime. Additionally, Assistant 2 incorrectly stated that the blink rate during sleep is the same as when awake, which is not true. Overall, Assistant 1's response was more helpful, relevant, and accurate, while Assistant 2's response had some inaccuracies and a lower level of detail.", "score": [7.0, 6.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "hLQwp4DchSxBwsWUUBMYmM", "question_id": 42, "answer1_id": "WBwpBQwhxn5kxLDb7MschC", "answer2_id": "SB5a2AiK224hB927vkqvYs", "text": "4 9\nAssistant 1 provided an incorrect and confusing answer, stating that a grain of salt contains 102.98 atoms, which is not a reasonable number for atoms in a grain of salt. The explanation provided was also incorrect and did not make sense. On the other hand, Assistant 2 provided a detailed and step-by-step explanation, estimating the number of atoms in a grain of salt based on the average size of a grain of table salt and using sodium chloride as an example. The calculations were clear and easy to follow, resulting in an estimate of approximately 7.13 x 10^27 atoms in a grain of table salt with a diameter of 50 \u00b5m. This response was helpful, relevant, accurate, and provided a sufficient level of detail.", "score": [4.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Cvgm9KPXmNDHoT7HZ3nKnA", "question_id": 43, "answer1_id": "kf8nahQVci2ZLaYikagB7U", "answer2_id": "4XYycBrMiJgYKPof2P6xoe", "text": "6 9\nAssistant 1 provided an incorrect number of lightning strikes per day (2000) and did not give a clear step-by-step explanation as requested. Assistant 2, on the other hand, provided a more accurate estimate (100,000,000) and took the reader through the reasoning process, discussing the factors that contribute to lightning strikes, such as electrical charge, weather conditions, and location. Assistant 2's response was more helpful, relevant, accurate, and detailed, which is why it received a higher score.", "score": [6.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "BQpXVvTLQ8DwU5PbhkjavA", "question_id": 44, "answer1_id": "Gptgryd4o2dC8V5aqRmeJJ", "answer2_id": "fpoXWHHdpv7XNBTS2NRY6d", "text": "6 8\nAssistant 1 provided an incorrect calculation and explanation, as the weight of the house was not accurate and the number of cubic feet of helium needed was not properly explained. Assistant 2, on the other hand, provided a more detailed and step-by-step explanation, acknowledging the unrealistic nature of the scenario and using a more reasonable weight for the house. Although both answers were not perfect, Assistant 2's response was more helpful, relevant, and accurate.", "score": [6.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "cpKEBBh27iBFMHF2DMhbmF", "question_id": 45, "answer1_id": "RfBWW8ZhdfTuTMb454Un4o", "answer2_id": "GRXWdnzLLzmeZRU67PMTb7", "text": "7 8\nAssistant 1 provided an answer with a specific number of text messages sent globally in a minute, but the source of the information is not mentioned, which makes it difficult to verify the accuracy of the data. The explanation also focused on the growth of text messages and the rise of instant messaging applications, but it didn't provide a step-by-step reasoning for the number provided.\n\nAssistant 2, on the other hand, acknowledged the difficulty in determining the exact number of text messages sent globally in a minute and provided a more detailed explanation of the reasons why it is challenging to track this information. Although Assistant 2 didn't provide a specific number, the response was more realistic and informative, explaining the complexity of text message communication.", "score": [7.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "GZtTjxWteggtdMXqBr4QZd", "question_id": 46, "answer1_id": "neGgLYm47JvqN8qkw8VeoW", "answer2_id": "RTxk2jYsjYiiWMwcRjmfwu", "text": "7 9\nAssistant 1 provided a rough estimate of 1 billion words spoken daily, but the explanation lacked a step-by-step reasoning and did not provide any sources or calculations to support the claim. Assistant 2, on the other hand, provided a more detailed step-by-step explanation, including an estimate of 100 billion words spoken daily, which was based on the world population and the average number of words spoken per person per day. Assistant 2 also acknowledged the limitations of the estimate and the factors that could influence the actual number. Therefore, Assistant 2's response was more helpful, relevant, accurate, and detailed compared to Assistant 1's response.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "nnPgYU9HHowARyLhxQKKwm", "question_id": 47, "answer1_id": "KSePbzLwsYasR3aui4HU8h", "answer2_id": "2J2Et6W8u2kXLTUgfYrPxe", "text": "6 8\nAssistant 1 provided a general explanation of snowflake formation and mentioned that the number of snowflakes can vary depending on location and storm severity. However, the answer lacked a step-by-step reasoning process and did not provide an estimate for the number of snowflakes during a typical winter. Assistant 2, on the other hand, provided a more detailed response, explaining the factors that influence snowflake formation and offering a rough estimate based on average conditions. Although the answer still did not provide an exact number, it gave a clearer understanding of the factors involved and the complexity of estimating the number of snowflakes in a typical winter.", "score": [6.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "CoqavHgbi2d4d9Xo9LWGZ5", "question_id": 48, "answer1_id": "SWWfLpP2gnWHLbYRZXNsW3", "answer2_id": "nSGoG9Z5g5VuCXgmQJJKrq", "text": "7 8\nAssistant 1 provided an estimate of 100 billion pages based on 130 million unique titles and an average of 200 pages per book. The answer was relevant and informative, but the source of the data was not mentioned, which affects the credibility of the information. Assistant 2, on the other hand, acknowledged the impossibility of providing an exact number and explained the reasoning behind their estimate. They used 13 million books and an average of 250-300 pages per book, resulting in an estimate of 3.25 billion pages. Assistant 2's answer was more detailed and transparent in their approach, which makes their response more helpful and reliable. However, both assistants provided different estimates, which may cause confusion. Assistant 2's response was slightly better due to the clarity in their reasoning, but both could improve by providing more accurate and up-to-date data.", "score": [7.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "MZLyQgMwXpVnFGWJxPjLU7", "question_id": 49, "answer1_id": "WaBwKYhs7eAG22qCGLH2j3", "answer2_id": "ScqJdUq9n5bp9qPv5WPqG5", "text": "6 8\nAssistant 1 provided a response that was relevant and somewhat helpful, but the calculation was incorrect and the answer lacked detail. The assistant multiplied the number of years by the length of a year, which is not the correct way to calculate the number of orbits. Assistant 2, on the other hand, provided a more detailed and accurate response, taking into account the age of the Earth and the elliptical nature of its orbit. The explanation was clearer and more precise, leading to a better understanding of the reasoning behind the estimate. However, both assistants could have mentioned that the exact timing of the emergence of life on Earth is uncertain, which would affect the final estimate.", "score": [6.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "ZgP6B6jZ2sgd8f6aDh3zLo", "question_id": 50, "answer1_id": "MfMJeE9om7qyBbqopHouf4", "answer2_id": "GdLFr88pCwsGpfRBRQQkyh", "text": "7 9\nAssistant 1 provided a conservative estimate of several hundred thousand songs recorded throughout history, but the answer lacked depth and detail. Assistant 2, on the other hand, provided a more comprehensive response, discussing the history of music, the invention of recording technology, and the rise of digital music. Assistant 2 also acknowledged the difficulty in estimating the exact number of songs and suggested that there could be millions or even billions of songs recorded. This answer was more informative and took the reader through the reasoning step-by-step, which is why Assistant 2 receives a higher score.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "3hFbcLp9xWWFD78soe4nEY", "question_id": 51, "answer1_id": "TjWPRDM6JFpPF8xeRptCKb", "answer2_id": "Sa7uNEbFGRFuXBAwau8sHS", "text": "7 9\nAssistant 1 provided a brief response, mentioning that the Internet would have been primarily used by the wealthy and powerful and that it would have been slower and more expensive. However, the answer lacked depth and context. Assistant 2, on the other hand, provided a more detailed and thoughtful response, discussing the technological and cultural context of the Renaissance period and how it might have influenced the development of the Internet. Assistant 2 also considered the potential limitations of the technology at the time and how it would have affected the Internet's evolution. Overall, Assistant 2's response was more helpful, relevant, and accurate, with a higher level of detail.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "PVxq9EuwZoGbUUbWBh5bi4", "question_id": 52, "answer1_id": "iR2tYTsWTFENEP7Qy9RgtX", "answer2_id": "T6Ar3cAEw3AmVVQTcCPp8N", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused more on the cultural aspects and the potential for the Aztecs to maintain their traditional customs, beliefs, and languages. Assistant 2 provided a more detailed response, discussing the potential impact on the Aztec civilization, the spread of European diseases, the economy, and the possibility of forming alliances with other indigenous groups. Assistant 2's answer was more comprehensive, which is why it received a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "2JxUhEFw64P59ZWvNWk5eH", "question_id": 53, "answer1_id": "AZdS8xAi3GwAmCqkNSnnwv", "answer2_id": "KiZQev5JEk2h6JYeQnFmtM", "text": "7 9\nBoth assistants provided relevant and accurate information about the potential consequences of the Black Death not occurring in the 14th century. Assistant 2, however, offered a more detailed and balanced analysis of the possible outcomes, discussing both positive and negative aspects, as well as acknowledging the difficulty in predicting the exact course of history. Assistant 1's response was more speculative and focused on the positive outcomes, such as the Renaissance and scientific revolution occurring earlier. Overall, Assistant 2 provided a more comprehensive and nuanced response, which is why it received a higher score.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Y5QuvZeyyEXuBwBLmxqFVA", "question_id": 54, "answer1_id": "VmwifF2JD5osYKDTqv2ZRS", "answer2_id": "cYiyYKKXM3GXkrZHAbX83S", "text": "7 8\nBoth Assistant 1 and Assistant 2 provided relevant and accurate responses to the question. Assistant 1 focused on the potential contributions Newton could have made in the field of biology, such as studying the human body and animal behavior. However, Assistant 2 provided a more comprehensive response, discussing Newton's actual contributions to various fields and acknowledging the difficulty in predicting his specific contributions to biology. Assistant 2's response was more detailed and balanced, which is why it received a higher score.", "score": [7.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "DZ7peVvCwPsXCtHAtyJtwP", "question_id": 55, "answer1_id": "mUL5UPj3qDGaCriEjL2U3B", "answer2_id": "PQmMUdAAcBsAWmWaTvdHSU", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 gave a brief overview of the potential impact on the music world and the cultural influence of the Beatles. Assistant 2 went into more detail, listing several possible outcomes if the Beatles had never formed as a band. While both answers were accurate and relevant, Assistant 2's response was more detailed and provided a more comprehensive exploration of the hypothetical scenario, which is why it received a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "6LfJJ8Yn6gcnrNQETUo3fm", "question_id": 56, "answer1_id": "dVdwUoVrAQJDuWxiodykiw", "answer2_id": "PorExChQ9VeYsPJptdgtsB", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. Assistant 1 gave a brief but clear response, mentioning the critical advantage gained by the Allies due to cracking the Enigma code. Assistant 2, however, provided a more detailed response, discussing the potential consequences of not cracking the code, such as the development of alternative strategies or technologies. Assistant 2 also acknowledged the difficulty in predicting the exact outcome without Turing's contributions. Therefore, Assistant 2 receives a slightly higher score for providing a more comprehensive answer.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "SA89EZJJozceMFCjAp36JK", "question_id": 57, "answer1_id": "EiNn9jjfy7dga6xfCtLtF8", "answer2_id": "249f6dSMwZRZVMmtxv6yDm", "text": "8 9\nBoth assistants provided helpful, relevant, and accurate information about the implications of the Suez Canal not being constructed. Assistant 1 focused more on the impact on international trade and navigation, while Assistant 2 expanded on the historical, political, and technological aspects of the canal's construction. Assistant 2's response was slightly more detailed and provided a broader perspective on the topic, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "ZqQyfNvvEcnpPwJk3J2Uai", "question_id": 58, "answer1_id": "eqG9f2R9hXVyZrZMpcqAYq", "answer2_id": "nxa3m6kiAZwKgcMUBY8KYz", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a brief overview of the possible outcomes if the Maya civilization had not collapsed, mentioning the potential for continued growth in various areas. Assistant 2, however, provided a more detailed response, discussing the potential impact on the development of other civilizations, the possibility of further scientific and technological advancements, and the potential changes to the political and cultural landscape in Mesoamerica. While both answers were informative, Assistant 2's response was more comprehensive and detailed, which is why it received a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "U4AYzQWkwxdSDQT7aCEjHU", "question_id": 59, "answer1_id": "ex42CLEzDVC2TxAvMCtYQJ", "answer2_id": "DXFvhjCaKqQiBsFDCkXGMT", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 briefly mentioned the possible consequences of Columbus not discovering the Americas, such as the indigenous people living in relative isolation and the delay of European colonization. Assistant 2, however, provided a more detailed response, discussing the potential for the indigenous peoples to thrive and develop without outside influence, as well as the possibility of other European explorers eventually discovering the Americas. Assistant 2's answer also touched on the profound impact of Columbus's discovery on the world. Both answers were accurate and relevant, but Assistant 2's response was more comprehensive and detailed, earning it a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "PfbYXoERxbzzmrhe3K6gfH", "question_id": 60, "answer1_id": "RfFANSwHeCm3Pqe2AJ86Dk", "answer2_id": "BCUK5VcpZK26jMJ2UMgR3E", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused on the potential benefits of success for van Gogh, such as recognition, financial stability, and opportunities for collaboration. Assistant 2, on the other hand, provided a more detailed response, discussing the potential impact on his art, the possibility of exploring new techniques, and the pressures that might have come with fame. Both answers are accurate and relevant, but Assistant 2's response is slightly more detailed and comprehensive, which is why it receives a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "S76jaMcti5vwnHtJqAW6zb", "question_id": 61, "answer1_id": "nzxdtWmgQhRkHeVAanVogZ", "answer2_id": "JHTKBrLVTp7bv5zW2uwYVW", "text": "6 9\n\nAssistant 1 provided a general description of the program but did not provide any code implementation. The description is clear and concise, but it lacks the actual code to solve the problem.\n\nAssistant 2 provided a complete C++ code implementation that reads a text file line by line and counts the number of occurrences of a specific word in the file. The code is well-structured, readable, and includes helpful comments. However, the program only checks if the search string is present in the line, not the exact number of occurrences of the word in the line. To improve the code, Assistant 2 could implement a more precise word counting mechanism that accounts for multiple occurrences of the search string within a single line.\n\nHere's an improved version of Assistant 2's code that counts the exact number of occurrences of the word in the file:\n\n```cpp\n#include \n#include \n#include \n#include \n\nusing namespace std;\n\nint main()\n{\n // Specify the name of the text file you want to read\n string filename = \"example.txt\";\n\n // Open the file in binary mode for reading\n ifstream file(filename, ios::binary);\n\n // Set the string to search for\n string search_string = \"example\";\n\n // Initialize a counter to keep track of the number of occurrences of the search string\n int counter = 0;\n\n // Start reading the file line by line\n string line;\n while (getline(file, line))\n {\n // Create a stringstream to read words from the line\n stringstream ss(line);\n string word;\n\n // Read words from the line\n while (ss >> word)\n {\n // Check if the word matches the search string\n if (word == search_string)\n {\n // Increment the counter if the word matches the search string\n counter++;\n }\n }\n }\n\n // Close the file\n file.close();\n\n // Print the number of occurrences of the search string\n cout << \"Number of occurrences of search string: \" << counter << endl;\n\n return 0;\n}\n```\n\nThis improved version uses a `stringstream` to read words from each line and compares each word to the search string. If the word matches the search string, the counter is incremented. This ensures that the program counts the exact number of occurrences of the word in the file.", "score": [6.0, 9.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "72aGqhr2T2BS7oC25BsQoc", "question_id": 62, "answer1_id": "kRo6z5qmeiSM8Xz6BPJHLC", "answer2_id": "jyzD2deb9JAffRhhkk37r5", "text": "6 4\n\nAssistant 1:\nStrengths:\n- The code is well-commented and easy to understand.\n- The function signature is clear and includes type hints for the input parameters.\n\nWeaknesses:\n- The implementation is incorrect. The logic for updating the `results` matrix is not correct for the longest common subsequence problem.\n- The code initializes the first row and column of the `results` matrix to 0 twice, which is redundant.\n\nSuggestions for improvement:\n- Correct the logic for updating the `results` matrix. When s1[i-1] == s2[j-1], the value should be results[i-1][j-1] + 1. When s1[i-1] != s2[j-1], the value should be max(results[i-1][j], results[i][j-1]).\n- Remove the redundant initialization of the first row and column of the `results` matrix.\n\nAssistant 2:\nStrengths:\n- The code is well-structured and easy to read.\n\nWeaknesses:\n- The implementation is incorrect. The logic for updating the `dp` matrix is not correct for the longest common subsequence problem.\n- The code initializes the first row and column of the `dp` matrix to `float('inf')`, which is not appropriate for the problem. They should be initialized to 0.\n- The code does not include comments explaining the logic and functionality.\n\nSuggestions for improvement:\n- Correct the logic for updating the `dp` matrix. When str1[i-1] == str2[j-1], the value should be dp[i-1][j-1] + 1. When str1[i-1] != str2[j-1], the value should be max(dp[i-1][j], dp[i][j-1]).\n- Initialize the first row and column of the `dp` matrix to 0.\n- Add comments to the code to explain the logic and functionality.", "score": [6.0, 4.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "jadrVtHskPRvbZmHqZkVRE", "question_id": 63, "answer1_id": "7kj8odEE79UBdR4pT6u6TY", "answer2_id": "jPGHEfp7FxhDyvRA6qEN3j", "text": "7 9\n\nAssistant 1:\nStrengths:\n- Provided a valid regular expression for email validation.\n\nWeaknesses:\n- Did not provide a complete Python implementation.\n- No comments or explanation of the regular expression components.\n\nSuggestions for improvement:\n- Provide a complete Python implementation using the `re` module.\n- Add comments to explain the different parts of the regular expression.\n\nAssistant 2:\nStrengths:\n- Provided a complete Python implementation using the `re` module.\n- Included a clear and concise explanation of the regular expression components.\n- Properly tested the regular expression with a sample email address.\n\nWeaknesses:\n- The explanation of the regular expression components is slightly different from the actual regular expression used in the code.\n\nSuggestions for improvement:\n- Ensure that the explanation of the regular expression components matches the actual regular expression used in the code.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "J5F2uLKW8H38af5Xx2dBqo", "question_id": 64, "answer1_id": "UULZ4GLz9j9ot7tN7x3ySD", "answer2_id": "Ra9eTcAuNZPkPRnaHiQrak", "text": "4 6\n\nAssistant 1:\nStrengths:\n- The explanation of the logic is clear and easy to understand.\n\nWeaknesses:\n- The provided code is not in any programming language, it's just a description of the algorithm.\n- The algorithm itself is incorrect and doesn't use dynamic programming.\n- There are no comments in the code.\n\nSuggestions for improvement:\n- Provide actual code in a specific programming language.\n- Use a correct dynamic programming approach for solving the problem.\n- Add comments to the code to explain the logic and functionality.\n\nAssistant 2:\nStrengths:\n- The code is provided in Python, which is a widely used programming language.\n- The code is structured and easy to read.\n\nWeaknesses:\n- The code does not use dynamic programming, it uses recursion instead.\n- The memoization table is not used, and the code will be inefficient for large values of n.\n- The comments are not very helpful in explaining the logic and functionality of the code.\n\nSuggestions for improvement:\n- Implement a dynamic programming approach, such as using a bottom-up approach with a memoization table.\n- Use the memoization table to store the results of previous calculations to avoid redundant calculations.\n- Improve the comments to better explain the logic and functionality of the code.", "score": [4.0, 6.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "bBz3M9u9WMW5o9Qz8CELFx", "question_id": 65, "answer1_id": "Rh8P3bxSJx4p6nCZkSZBrh", "answer2_id": "5mbEFLEnEx8Cm2wEF8WAbM", "text": "8 10\n\nAssistant 1:\nStrengths:\n- Provides a clear explanation of how the binary search algorithm works.\n- Explains the process of repeatedly splitting the array and comparing the middle element.\n\nWeaknesses:\n- Does not provide an actual code implementation of the binary search algorithm.\n- Lacks examples or comments to help the reader understand the algorithm better.\n\nSuggestions for improvement:\n- Provide a code implementation of the binary search algorithm in a programming language.\n- Include examples and comments to help the reader understand the code and its functionality.\n\nAssistant 2:\nStrengths:\n- Provides a complete and accurate implementation of the binary search algorithm in Python.\n- Includes a clear example of how to use the function with a sorted array and a target value.\n- The code is well-structured, readable, and efficient.\n- The explanation of the algorithm is clear and concise.\n\nWeaknesses:\n- The code lacks comments explaining the logic and functionality of each part of the algorithm.\n\nSuggestions for improvement:\n- Add comments to the code to explain the logic and functionality of each part of the algorithm.\n- Although not necessary in this case, consider providing examples in multiple programming languages for a broader audience.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "MfWw7JKnvqVUUU8zTfuinz", "question_id": 66, "answer1_id": "R3aYUUtY46ht4ToU8US96x", "answer2_id": "KLbBzjbuwq4agAESQ9Wfpq", "text": "6 8\n\nAssistant 1 provided a general explanation of how to implement a queue using two stacks in Python but did not provide any code. The explanation was clear, but it would have been more helpful to include a code example.\n\nAssistant 2 provided a code implementation of a queue using two stacks in Python. The code is mostly accurate, but there are a few issues. The dequeue method is incorrect, as it should transfer elements from the in_stack to the out_stack if the out_stack is empty, and then pop the top element from the out_stack. The peek method should also return the top element of the out_stack if it's not empty. The enqueue method does not need to clear the out_stack. Here is the corrected code:\n\n```python\nclass Queue:\n def __init__(self):\n self.in_stack = []\n self.out_stack = []\n \n def enqueue(self, item):\n self.in_stack.append(item)\n \n def dequeue(self):\n if not self.out_stack:\n while self.in_stack:\n self.out_stack.append(self.in_stack.pop())\n if not self.out_stack:\n raise EmptyQueueError\n return self.out_stack.pop()\n \n def peek(self):\n if not self.out_stack:\n while self.in_stack:\n self.out_stack.append(self.in_stack.pop())\n if not self.out_stack:\n raise EmptyQueueError\n return self.out_stack[-1]\n \n def is_empty(self):\n return not self.in_stack and not self.out_stack\n\nclass EmptyQueueError(Exception):\n pass\n```\n\nOverall, Assistant 2's code was more helpful and closer to a correct implementation. However, both assistants could improve by providing more detailed explanations and comments in their code.", "score": [6.0, 8.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "6FenkFD6ft3WRdxHCHLvvv", "question_id": 67, "answer1_id": "MQJFbH3KoFaSMmPgmK9LPa", "answer2_id": "ganAwetd9VTzEAiTtsskzX", "text": "6 8\n\nAssistant 1:\nStrengths:\n- The explanation of the approach is clear and easy to understand.\n\nWeaknesses:\n- The code is not provided, only the explanation of the approach.\n- The \"found\" flag is mentioned, but it is not clear how it would be implemented in the code.\n- The approach does not account for duplicate elements in the arrays.\n\nSuggestions for improvement:\n- Provide actual code for the given problem statement.\n- Clarify how the \"found\" flag would be used in the code.\n- Consider handling duplicate elements in the arrays.\n\nAssistant 2:\nStrengths:\n- The code is provided and is easy to read.\n- The solution is accurate and efficient.\n- The example usage of the function is helpful.\n\nWeaknesses:\n- The code lacks comments explaining the logic and functionality.\n- The function name uses an escape character (\\) instead of an underscore (_).\n\nSuggestions for improvement:\n- Add comments to the code to explain the logic and functionality.\n- Correct the function name to use an underscore (_) instead of an escape character (\\).", "score": [6.0, 8.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "XK3729wYDvmpRCf34tDfL9", "question_id": 68, "answer1_id": "dBWqryuzG6BHRdPgrWWpbH", "answer2_id": "LG6szQ3cqNRzUjXtZtXG94", "text": "First, I will solve the problem independently:\n\nGiven f(x) = 5x^3 - 2x + 3, we need to find the value of f(2).\n\nf(2) = 5(2)^3 - 2(2) + 3 = 5 * 8 - 4 + 3 = 40 - 4 + 3 = 39\n\nNow, I will examine the problem-solving process of Assistant 1 and Assistant 2.\n\nAssistant 1:\nThe value of f(2) is 1.\n\nAssistant 1 provided an incorrect answer without showing any problem-solving steps.\n\nAssistant 2:\nTo find the value of f(2), we need to substitute 2 for x in the function f(x).\n\nf(x) = 5x^3 - 2x + 3\n\nWhen x = 2, we have:\n\nf(2) = 5(2)^3 - 2(2) + 3 = 5 * 8 - 2 + 3 = 41 - 2 + 3 = 40\n\nAssistant 2 provided a detailed problem-solving process but made an error in the calculation: 5 * 8 - 4 + 3 should be 40 - 4 + 3, which equals 39.\n\nBased on the evaluation, I would give the following scores:\n\nAssistant 1: 1 (Incorrect answer and no problem-solving steps)\nAssistant 2: 8 (Detailed problem-solving process but with a minor calculation error)\n\nAssistant 2 performed better than Assistant 1.", "score": [1, 8], "reviewer_id": "gpt-4-0328-math", "metadata": {}}
+{"review_id": "f68KKW2RJoZTVSnw49YSNe", "question_id": 69, "answer1_id": "W74byrCbE7XiJWjcPTZS7x", "answer2_id": "LHMnXGbSZubeZJcr5fgrte", "text": "First, let's solve the problem independently:\n\n3x + 10 = 5(x - 2)\n3x + 10 = 5x - 10\n10 + 10 = 5x - 3x\n20 = 2x\nx = 10\n\nNow, let's examine the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nx = 6\n\nAssistant 1 directly provided the answer without any explanation. The answer is incorrect.\n\nAssistant 2:\n1. Simplify the left-hand side (LHS) of the equation by moving all the terms to one side and the numbers to the other: 3x + 10 = 5x - 10\n2. Combine the terms on the left-hand side by subtracting 5x from both sides: 3x = -5x\n3. Simplify the left-hand side by dividing both sides by 3: x = -5/3\n4. Take the natural logarithm of both sides to get the solution in terms of the natural logarithm base e: ln|x| = ln|-5/3| = -5/3 ln|e| = -5/3 ln(1.414) = -5/3 ln(1.414) = -5/3 ln(1 + 1/1.414) = -5/3 ln(1.414)\n5. Exponentiate both sides to get the solution in terms of e: x = e^(-5/3)\n6. Rearrange the solution to get x in terms of Euler's number: x = e^(-5/3) = 1/1.414^(-5/3) = 0.5877\n7. Round the solution to the desired number of decimal places: x = 0.5877\n\nAssistant 2's steps are incorrect starting from step 2. The correct step 2 should be subtracting 3x from both sides: 10 = 2x. The rest of the steps are not applicable to this problem.\n\nScores:\nAssistant 1: 1 (Incorrect answer, no explanation)\nAssistant 2: 3 (Incorrect answer, incorrect steps)\n\nBoth assistants performed poorly, but Assistant 2 provided a more detailed explanation, even though it was incorrect.", "score": [1, 3], "reviewer_id": "gpt-4-0328-math", "metadata": {}}
+{"review_id": "5uv8eKWqXyF5mnNtY94s8f", "question_id": 70, "answer1_id": "7WC5quGKdv8hADCxWaWZ57", "answer2_id": "TGENVWL5iAYi4NCf5jAzKy", "text": "First, let's solve the problem independently:\n\nWe have two points (2, -2) and (10, 4). To find the length of the line segment, we can use the distance formula:\n\ndistance = sqrt((x2 - x1)^2 + (y2 - y1)^2)\n\nPlugging in the values:\n\ndistance = sqrt((10 - 2)^2 + (4 - (-2))^2)\ndistance = sqrt(8^2 + 6^2)\ndistance = sqrt(64 + 36)\ndistance = sqrt(100)\ndistance = 10\n\nNow, let's evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe length of the line segment is 8 units.\n\nThis answer is incorrect.\n\nAssistant 2:\nThe length of the line segment is 17.973.\n\nThis answer is also incorrect.\n\nNow, let's assign scores to Assistant 1 and Assistant 2:\n\nAssistant 1: The answer is incorrect, and there is no explanation of the problem-solving process. Therefore, the score is 1.\n\nAssistant 2: The answer is incorrect, but the problem-solving process is mostly correct except for the calculation of (10 - 2)^2 + (4 - (-2))^2, which should be 8^2 + 6^2 instead of 18^2 + 4^2. Therefore, the score is 5.\n\nIn conclusion, the scores are (1, 5) for Assistant 1 and Assistant 2, respectively. Assistant 2 performed better, but both assistants provided incorrect answers.", "score": [1, 5], "reviewer_id": "gpt-4-0328-math", "metadata": {}}
+{"review_id": "EgDTYCXZFp72ZuZcCV6duW", "question_id": 71, "answer1_id": "AZER7D3RKZ9F9SXHiMjdqM", "answer2_id": "kc9oytarvqyNVVfGZz93EL", "text": "9 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's request for a formal email proposing a joint venture. Assistant 1 provided a concise and clear template that can be easily adapted by the user. Assistant 2, on the other hand, provided a more detailed and comprehensive email, including specific information about the proposed joint venture, such as the creation of a new company and the sharing of profits and losses. This level of detail may be more useful for the user, as it provides a more complete picture of the proposed partnership. Therefore, Assistant 1 receives a 9 and Assistant 2 receives a 10.", "score": [9.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "oM8FHcZ8kPbM7tGizHcwJv", "question_id": 72, "answer1_id": "MSrdDafr77UvSHCnsPMSP3", "answer2_id": "NxstpvVEnGMny3jECtvrw2", "text": "8 9\nBoth assistants provided helpful and relevant resignation letter templates. Assistant 1's response was concise and to the point, while Assistant 2's response was more detailed and included additional information about the reason for leaving, the transition process, and expressing gratitude for the friendships and connections made. Assistant 2's response was slightly more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "MzXdFKJbsMqVstCvErzqWL", "question_id": 73, "answer1_id": "hxkjUkDkXhGP78Vo74B4WE", "answer2_id": "XN9NBpeQJiQqGT5UYkys7N", "text": "8 10\nAssistant 1 provided a helpful and relevant response by outlining the general structure and content of a formal letter of recommendation. However, Assistant 2 went above and beyond by providing a detailed and well-structured example of a formal letter of recommendation, which is more useful for someone looking to write such a letter. Assistant 2's response was more accurate and detailed, making it the better choice in this case.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "CxW72r26HgPCsswHbNuYG5", "question_id": 74, "answer1_id": "hv9jpG9PxeJ9AnFdNzH3Jv", "answer2_id": "3qMqAZcfHUVUTEJnMXT8Pw", "text": "8 9\nBoth assistants provided relevant and helpful responses to the user's question. Assistant 1's response was well-written and conveyed excitement about the new software solution. However, Assistant 2's response was more detailed and included specific features of the software, which made it more informative and compelling. Assistant 2 also provided a clear call-to-action for scheduling a demo and contacting the company. Overall, Assistant 2's response was more effective in terms of providing information and engaging the reader.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "3e75hkdfKi7hwXZs2ivgHv", "question_id": 75, "answer1_id": "X5BVHT5pFpWxB88SzERrEG", "answer2_id": "hMXjd228Mc8pnc7zgd2frG", "text": "7 10\nAssistant 1 provided a brief and generic apology, but it lacked important details such as addressing the customer by name, providing a subject line, and offering contact information for further concerns. Assistant 2, on the other hand, provided a well-structured and detailed email, including a subject line, addressing the customer by name, and offering contact information. The response from Assistant 2 was more helpful, relevant, and accurate, which is why it receives a higher score.", "score": [7.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Agvs9JSySKiqaGQ8sL7uD9", "question_id": 76, "answer1_id": "Af2SufKwvDBriNXnZX9UoP", "answer2_id": "gUEd9nNepqy8nsfbypscHx", "text": "7 9\nAssistant 1 provided a brief overview of the video's content but did not provide a script for the YouTube video as requested. Assistant 2, on the other hand, provided a more detailed and structured script that covered the history, cultural significance, and influence of jazz music. Assistant 2's response was more helpful, relevant, and accurate, which is why it received a higher score.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "oVeEakhRwe8muRqEj7cxBy", "question_id": 77, "answer1_id": "HVnqVdSGRwQfqSsiYxtTTn", "answer2_id": "nZZyn3SAvU8T2BVR73LMHC", "text": "7 10\nAssistant 1 provided a brief overview of the travel blog post but did not actually compose the blog post as requested, resulting in a lower score. Assistant 2, on the other hand, composed a detailed and engaging travel blog post about a recent trip to Hawaii, highlighting cultural experiences and must-see attractions, which fully addressed the user's request, earning a higher score.", "score": [7.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "SkBRcfmYdX6ob8iV3FoCzW", "question_id": 78, "answer1_id": "8hcmmjzyMyhFJ8ZwqJJfvw", "answer2_id": "YM4qjUM5VfmWnzM3HXRB4G", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided captivating movie reviews for recently released science fiction films. Assistant 1's review of \"The Future is Blue\" was engaging and discussed the plot, characters, and special effects. However, it could have provided more details about the cast and the specific aspects of the special effects. Assistant 2's review of \"Beyond the Horizon\" was slightly more detailed, discussing the cast, their roles, and the unique personalities of the characters. Additionally, Assistant 2's review was more immersive, using phrases like \"transport you to a world beyond our own\" and \"interstellar adventure.\" Overall, both reviews were helpful and relevant, but Assistant 2's response was more detailed and captivating.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "8bP4D7to6FqqtUz7MVnc4z", "question_id": 79, "answer1_id": "CU6gGfJqQmwZNxEG2n2Uda", "answer2_id": "WcbzNkzu5q66QkK92NxmGB", "text": "8 10\nAssistant 1 provided a general overview of the podcast structure, touching on the main points of discussion. However, Assistant 2 went a step further by providing a more detailed and well-structured script, including an opening, segment breakdown, and closing. Assistant 2's response was more helpful, relevant, and detailed, making it easier for the user to follow and implement the script for their podcast episode.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "8MTGoSASS9dAp9UQwcSZ6p", "question_id": 80, "answer1_id": "5SWbCoLV6NRrq5RtWQK4jp", "answer2_id": "Tfm4f2mbpKJsa8MT6exs8D", "text": "8 10\nBoth assistants provided relevant and detailed responses to the question. Assistant 1 gave a brief but engaging review of the symphony concert, touching on the orchestra's performance, the conductor's passion, and the audience's experience. However, Assistant 2 provided a more comprehensive review, discussing specific pieces played, the conductor's leadership, and the emotional impact of the performance. Assistant 2's response also included more vivid descriptions of the orchestra's sound and the audience's reaction, making it a more informative and engaging review.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
diff --git a/model/fastchat/eval/table/review/vicuna-13b_20230322-clean-lang/review_bard_vicuna-13b.jsonl b/model/fastchat/eval/table/review/vicuna-13b_20230322-clean-lang/review_bard_vicuna-13b.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..8e2d1ab08ccee392f174a64b4d885bb96e148202
--- /dev/null
+++ b/model/fastchat/eval/table/review/vicuna-13b_20230322-clean-lang/review_bard_vicuna-13b.jsonl
@@ -0,0 +1,80 @@
+{"review_id": "4CeMvEQyE6fKMJwvSLY3P4", "question_id": 1, "answer1_id": "3oW4JY265ZPJGTYi2CgRYF", "answer2_id": "cV4zXygaNP6CXEsgdHMEqz", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about improving time management skills. Assistant 1's response was slightly more detailed, offering a few more tips and a more comprehensive approach to the topic. Assistant 2's response was also helpful and provided valuable advice, but it was slightly less detailed and did not cover as many aspects as Assistant 1's response. Both assistants did a great job, but Assistant 1's answer was more thorough and complete.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Y7PYc8sPHpCNzz3ZbYRSSy", "question_id": 2, "answer1_id": "CtDuDqypbL958TQork7v54", "answer2_id": "3zpPUeoVsPWXtKMV7cAhZ6", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed responses to the question about effective ways to deal with stress. Both assistants covered essential points such as exercise, healthy diet, sleep, talking to someone, relaxation techniques, and seeking professional help. Assistant 1 mentioned taking a break, while Assistant 2 discussed time management, mindfulness, and social support. Both responses complement each other and provide valuable information for managing stress. Therefore, they both receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "mykCkmptr8smB9EHpa5p3v", "question_id": 3, "answer1_id": "jqb5c6eBeyVTFo7SsqxqHA", "answer2_id": "6xpiZJE4vxxGqjRogPfBk7", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the differences between Python and JavaScript. Assistant 1's response was more detailed and organized, covering a wider range of differences, such as typing, standard libraries, and execution methods. Assistant 2's response also covered important differences, but it was slightly less comprehensive and had a small inaccuracy regarding JavaScript being a statically typed language, which is not true, as JavaScript is dynamically typed. Overall, both responses were informative, but Assistant 1's answer was more precise and complete.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "oY8uafD9mxTZUaXPcqbML5", "question_id": 4, "answer1_id": "P5rC8W6gTuD4aY6HR5HQj9", "answer2_id": "abitTVu2Dz8rdMxyThvFyJ", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate tips for increasing productivity while working from home. Assistant 1's response was more detailed, providing a list of 10 tips, while Assistant 2 provided 7 tips. Both assistants covered essential points such as setting up a dedicated workspace, taking breaks, and eliminating distractions. Assistant 1 went a step further by mentioning goal-setting, tracking progress, and being patient, which adds value to the response. Assistant 2's response was still helpful and relevant, but slightly less comprehensive than Assistant 1's.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "dx2DdmmECCVayXHmgGCeDC", "question_id": 5, "answer1_id": "3uaqwfbwxtyDdqB8UVN3jM", "answer2_id": "UMZod8JaWia9KB2EVXdYrF", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the basics of quantum computing. Assistant 1's response was slightly more detailed, covering key concepts like superposition and entanglement, as well as providing a clear explanation of qubits, quantum gates, and quantum algorithms. Assistant 2's response was also informative, but it focused more on the different approaches to building quantum computers, which was not specifically asked for in the question. Both responses were useful, but Assistant 1's answer was more comprehensive and directly addressed the question, which is why it receives a slightly higher score.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "GCdusGJ8SwdTgNXtAbsDob", "question_id": 6, "answer1_id": "HTRJ5q97DrgzoSofxkgedz", "answer2_id": "WXRCqF34f4UUJzd3xbgAqT", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information regarding the differences between plant-based and animal-based protein sources. Assistant 1 provided a more detailed response, discussing the amino acid profiles, nutritional benefits, and health implications of both types of protein sources. Assistant 2 also provided valuable information, but the response was slightly less detailed and focused more on sustainability, ethical considerations, and cost. Both responses were informative, but Assistant 1's answer was more comprehensive and detailed, which is why it receives a slightly higher score.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "JDyKyTyaawWLZ7BRAXDF5X", "question_id": 7, "answer1_id": "EhhyKNc3m8c9tnxm8uPfmt", "answer2_id": "JREJbG5ZKXCNMoWoiJQxbC", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information on how to develop critical thinking skills. Assistant 1 provided a more structured list of tips, which made it easier to follow and understand. Assistant 2 also provided valuable tips, but the list was not as clearly structured. Both assistants covered similar points, such as asking questions, being aware of biases, and seeking diverse viewpoints. Assistant 1 mentioned practicing regularly, while Assistant 2 emphasized taking breaks and reflecting, which are both important aspects of developing critical thinking skills. Overall, both responses were informative and helpful, but Assistant 1's response was slightly more organized and easier to follow.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "C9yzkczwF2CxkXdY3MobUM", "question_id": 8, "answer1_id": "JQXPknRQd24NUPKZxi6RNf", "answer2_id": "mmVwmX6TGJ2Y72gCNac4EQ", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question, addressing major challenges faced by the education sector today. Assistant 1 focused more on challenges within the context of schools, while Assistant 2 provided a broader perspective, including global challenges and issues related to curriculum development and sustainability. Assistant 2's answer was slightly more comprehensive and detailed, which is why it received a higher score. However, both answers were helpful and informative.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "jZiBSzNUueinzWJdnpGnQm", "question_id": 9, "answer1_id": "Lb3C2xQKdLCqFj4v3rmaof", "answer2_id": "DMTZyzd4rRAFV43xtBJ9ns", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about the primary factors that influence consumer behavior. Assistant 1 provided a clear and well-organized response, with examples for each factor, making it easier for the reader to understand the concepts. Assistant 2 also provided a detailed response, covering similar factors but with the addition of marketing factors and product/service factors. However, Assistant 2's response could have been improved with the inclusion of examples, similar to Assistant 1. Overall, both assistants performed well, but Assistant 1's response was slightly better due to the inclusion of examples and a more organized structure.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "fFMtZUKdXvBXus66ccinKv", "question_id": 10, "answer1_id": "DhuZJtL3jhnrsTBvDct9oV", "answer2_id": "dETAsj4xHnUCSTkZezz8aM", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information on conflict resolution strategies in the workplace. Assistant 1's response was slightly more detailed, including a wider range of strategies such as time-out and arbitration, which were not mentioned by Assistant 2. Assistant 2's response was also helpful and relevant, but it did not cover as many strategies as Assistant 1. Both assistants provided clear explanations of the strategies they mentioned, making it easy for the user to understand and apply the information.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "fgFeMYHm6fQNv9wpaj8uQG", "question_id": 11, "answer1_id": "mDSj4BKim2eANUnEjW7xBm", "answer2_id": "C8ksZxg3LshMUWiLxPanbt", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was slightly more detailed, with a clearer distinction between the environmental and health impacts of single-use plastic bottles and the benefits of reusable bottles. Assistant 2 also provided a good response, but the structure was less clear, and some points were repeated in different sections. Overall, both assistants provided valuable information, but Assistant 1's response was more organized and comprehensive.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "o6ptY7g5g9F3oeZf9wKNVs", "question_id": 12, "answer1_id": "MnkceSK7WwyXqAhbuKVYX7", "answer2_id": "NeHhRc5P5uAU8eWSJBRkhG", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was slightly more detailed and organized, covering a wider range of factors such as affordability, convenience, safety, and sustainability. Assistant 2's response was also informative, but it did not mention sustainability and integration with other transportation options. Both assistants provided valuable information, but Assistant 1's answer was more comprehensive, which is why it receives a slightly higher score.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "7TRs4oVPcVxXc6gMQefJbq", "question_id": 13, "answer1_id": "EsyaBVpTN8BGbTSiFMnZUF", "answer2_id": "KAJ7UVwu8oCKyxZj9j82pm", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was slightly more detailed and organized, with a clear distinction between fiscal and monetary policies and their respective uses during a recession. Assistant 1 also touched upon the debate between the use of fiscal and monetary policies, adding depth to the answer. Assistant 2's response was also informative and accurate, but slightly less detailed and organized compared to Assistant 1. Both assistants provided valuable information, but Assistant 1's response was more comprehensive and well-structured.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "FYNEME2oyvHjL2LT8Syw6t", "question_id": 14, "answer1_id": "dX8M752A6tzqLg9KhwgG5p", "answer2_id": "NnWfaeRe8PmitgmV4u5fY8", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 provided a clear explanation of how language and cultural barriers affect communication and relationships in multicultural societies, as well as some suggestions for overcoming these barriers. Assistant 2 also provided a clear explanation, focusing on specific aspects such as language, cultural norms, stereotypes, prejudice, and power dynamics. Assistant 2's answer was slightly more detailed and comprehensive, which is why it received a higher score. Both assistants did a good job in addressing the question, but Assistant 2's response was more in-depth and covered a wider range of factors that can affect communication and relationships in multicultural societies.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "m9uQkWFCbpPzeY3DWpabXd", "question_id": 15, "answer1_id": "dzwhq5XbaEBVpargyczz9B", "answer2_id": "WiGpqKRRwWonwgU95piNNc", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 provided a slightly more detailed response, with clear examples of how AI can be used in healthcare, such as diagnosing diseases, treating diseases, monitoring patients, and providing administrative support. Assistant 2 also provided a good response, covering similar points, but with slightly less detail and fewer specific examples. Both responses were well-structured and informative, but Assistant 1's response was slightly more comprehensive, which is why it received a higher score.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "U6SwUYVNiN3v9F3LyFWSJA", "question_id": 16, "answer1_id": "8zqxUtHxgtoHBkbf2bkqNW", "answer2_id": "iangiZeex5ndxAXnWMrxBW", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed responses to the question. They both explained the process of gene editing using CRISPR-Cas9 technology, discussed potential applications, and addressed ethical implications. The responses were well-structured and covered the main aspects of the topic, making it difficult to differentiate between the two in terms of overall performance.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "hd3g9747kGPYxTRP4uHZfj", "question_id": 17, "answer1_id": "WJc37t4n5PqmKKS3V4eMG2", "answer2_id": "XnMRLphzYQX4QRNht7tbui", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed responses to the question. They both explained how vaccinations work to protect individuals and communities from infectious diseases and described the concept of herd immunity. Both responses mentioned the importance of vaccinations for protecting vulnerable populations, such as young children, pregnant women, and people with certain medical conditions. The slight differences in their explanations do not warrant a difference in their scores, as both responses are informative and valuable.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "FXVS7QPg3oTcLEhdpC4426", "question_id": 18, "answer1_id": "CvVLf8FgoHywJy8j8JJ4qL", "answer2_id": "HZc37bwy646mRzbqSsDAob", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a good overview of the influence of social media platforms on news consumption and sharing, as well as the potential implications for the spread of misinformation. However, Assistant 2 provided a more detailed and structured response, listing specific ways in which social media platforms influence news consumption and sharing, and elaborating on the potential implications for the spread of misinformation. This made Assistant 2's response slightly more informative and easier to follow, resulting in a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "fHksJvMWcNVHE2gkWLhUqk", "question_id": 19, "answer1_id": "P5rytR6vTJjxgWxRoxT3vX", "answer2_id": "iJrMatLrMdJyyqMx9uJ45a", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both discussed the influence of cultural, social, and economic factors on people's food choices and provided examples of how these factors can affect food choices. Both assistants also discussed how this knowledge can be used to promote healthier diets through targeted interventions, policies, and individual actions. The level of detail in both responses is sufficient to provide a clear understanding of the topic. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "ZkFeTQDFEpTsvxZdVAYpRv", "question_id": 20, "answer1_id": "5biCd7QRZP6rquaz8eC9Vm", "answer2_id": "oVEHqDnDTEADZSFfKgFTZd", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed responses to the question. They both explained the process of natural selection and how it contributes to the evolution and adaptation of species. Both assistants covered the key principles of natural selection, such as variation, differential reproduction, heredity, and the resulting changes in populations over time. The examples provided by Assistant 1 (giraffes and fish) and the additional point about stabilizing mechanisms by Assistant 2 added value to their respective answers. Overall, both assistants demonstrated a strong understanding of the topic and provided informative and comprehensive answers.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "GCoFg2g9EbRdJwgKUbZ6MF", "question_id": 21, "answer1_id": "363RwB6kr8nV6qFNdjXZnS", "answer2_id": "WLAj4u59bj2oEXzahF79ek", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 gave a clear and concise introduction, mentioning the knight's lord and the purpose of attending the banquet. However, Assistant 2 provided a more detailed and immersive response, capturing the humility and loyalty of a medieval knight while also acknowledging their lineage and dedication to the kingdom. This made Assistant 2's response slightly more engaging and informative, earning it a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "QraPP8QES6Uhc6sTjkSw9o", "question_id": 22, "answer1_id": "gDnYxMu5Dd52xhMqQAJaZP", "answer2_id": "fJPnM2XcRveW2zR4DDaeTb", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and motivating speeches for a pirate crew to search for hidden treasure. Assistant 1 focused on the potential wealth and luxurious life that the crew could achieve, while Assistant 2 emphasized the spirit of adventure, overcoming challenges, and the crew's ultimate destiny. Assistant 2's response was slightly more engaging and inspiring, which is why it received a higher score. However, both responses were helpful, accurate, and detailed in their approach to the question.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "NNptX6gxfgPqh4F8FFoZin", "question_id": 23, "answer1_id": "kCV5RSrnmmTyv3HA5oU38P", "answer2_id": "fY4Xed6veWpcKfj5krZz8m", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. They both created a soliloquy that captures the essence of a Shakespearean character declaring their love. Both soliloquies used poetic language and imagery to express the depth of emotion and love. The level of detail in both responses is sufficient, and both assistants demonstrated a good understanding of the user's question. It is difficult to differentiate their performance, as both provided high-quality answers, so they both receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "D5bCSqsqxvaC6RNYEVNL29", "question_id": 24, "answer1_id": "CTGLYNAWrWeuCYZAR9bD2r", "answer2_id": "LqFJA2JQbBXP77nkYjtrZK", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a more traditional superhero origin story, involving an accident that led to superpowers, and emphasized the importance of helping others and being inspired by other superheroes. Assistant 2, on the other hand, presented a unique origin story as an advanced artificial intelligence created by scientists, with a focus on intelligence, adaptability, and a strong moral compass. Both answers were detailed and engaging, but Assistant 2's response was slightly more creative and offered a different perspective on what it means to be a superhero, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "SJmVbKB9KZN8KVDQJBTfMm", "question_id": 25, "answer1_id": "Eh5qCENdftYf9JKrBspYna", "answer2_id": "WxCKcRstJPXTXnqEzSX3Hg", "text": "8 9\nBoth assistants provided helpful, relevant, and accurate information about the technological advancements from the year 3000. Assistant 2's response was slightly more detailed and covered a broader range of advancements, such as fusion power, advanced nanotechnology, and quantum computing, which were not mentioned by Assistant 1. Assistant 1's response was still informative and accurate, but Assistant 2 provided a more comprehensive overview of the advancements, which is why Assistant 2 receives a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "hjH8swSeSBQuU69drFK8VE", "question_id": 26, "answer1_id": "M6hCe8Z4JDpt75HZ5PsSWr", "answer2_id": "dgPJ7DvtFHD7mxtRwzM3vW", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate descriptions of the winning play in the final seconds of a championship game. Assistant 1 focused on a football scenario, while Assistant 2 described a basketball situation. Both assistants provided a good level of detail in their responses. However, Assistant 2's response was slightly more engaging and vivid, painting a clearer picture of the emotions and atmosphere surrounding the winning play. This is why Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Z3BF4pSYekBJCWk5GrKRTB", "question_id": 27, "answer1_id": "QCDsmbALfut5758pyiKvc5", "answer2_id": "ADX83sWvjJkewJX6JmYUzQ", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and detailed responses to the question. Assistant 1 described a specific dish with its ingredients and preparation, while Assistant 2 focused more on the philosophy behind the dish and the overall experience it provides. Assistant 2's response was more engaging and immersive, which is why it received a slightly higher score. However, both assistants did a good job in portraying a world-famous chef describing their signature dish.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "d7AELTvSCLy9AZU4f9kPgG", "question_id": 28, "answer1_id": "NWUbhwZQCuXsuQimrjQRza", "answer2_id": "ihNG3rwsrt95NDhCAFeSDR", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and detailed responses to the user's question. Assistant 1 took a more personal and emotional approach, describing the feelings and emotions of a climber reaching the summit of Mount Everest. The description was vivid and engaging, giving the reader a sense of what it might feel like to be in that situation. Assistant 2, on the other hand, took a more objective approach, acknowledging its status as an AI language model and providing a detailed description of the emotions and views a climber might experience at the summit. Assistant 2 also included important information about the risks and challenges associated with climbing Mount Everest, which added value to the response. Both assistants provided helpful and accurate information, but Assistant 2's response was slightly more comprehensive and informative, earning it a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "mozSNXxSeY7asAZQxdj9xV", "question_id": 29, "answer1_id": "VYwSjZrSLW9ZSvqryyjEaB", "answer2_id": "Gmhqf3z4LvVfwPNFJ89BKd", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a more personal and emotional perspective on the daily life of a space colonist on Mars, while Assistant 2 provided a more structured and organized description of daily activities. Assistant 2 also included more details about the Martian day and communication with Earth, which made their response slightly more informative. Both assistants addressed the challenges faced by colonists, but Assistant 2 provided a clearer and more concise list of challenges. Overall, both responses were of high quality, but Assistant 2's answer was slightly more detailed and organized.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "CrmHjPRFNPKCxFgUExqokF", "question_id": 30, "answer1_id": "FA7PXuUbEVGKHaWpxaimy8", "answer2_id": "gSwkKJCn6qDnNZond2xVJ3", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and detailed responses to the user's question. Assistant 1 focused more on the relationships with allies and their contributions to the character's survival, while Assistant 2 emphasized the character's adaptability and resourcefulness. Assistant 2's response was slightly more comprehensive, as it also mentioned encounters with dangerous characters and the importance of self-preservation, which added depth to the post-apocalyptic scenario. Therefore, Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "fEViribrZXZzE72JCS4P4W", "question_id": 31, "answer1_id": "j5EV5cZNsn9DcF6WsvXRzS", "answer2_id": "8RaBeMjxx2bCp2GKWv7YiP", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both offered multiple ways to determine if a restaurant is popular among locals or mainly attracts tourists. Additionally, they both explained why this information might be useful. The level of detail in both responses is sufficient to guide someone in making an informed decision about where to dine. It is difficult to differentiate the quality of the answers, as both assistants covered the necessary points and provided valuable insights.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "4ue6iA4VLVoK9wVzrY2niz", "question_id": 32, "answer1_id": "2eAYCYmwTkPa3ejQDv8LyB", "answer2_id": "C65PZkmAfFfWRs4bPhyKqg", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 listed several examples of behaviors that might indicate someone is pretending to understand a topic, while Assistant 2 focused on specific verbal and non-verbal cues. Assistant 2's answer was slightly more detailed and provided a clearer distinction between the different clues, which is why it received a higher score. However, both answers were informative and useful in understanding the subtle clues that suggest someone is pretending to understand a topic or conversation when they are actually confused or uninformed.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Muc5dWnrdUfzZZ9VRowc3a", "question_id": 33, "answer1_id": "d562WYnhsvgJ8J6Ubitmvw", "answer2_id": "4so4HTEjgDZKTqNAgkHHQX", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was more detailed, covering a wider range of reasons and specific situations where using a paper map or asking for directions might be the best option. Assistant 2's response was also informative, but it did not cover as many reasons or situations as Assistant 1. Both assistants provided valuable information, but Assistant 1's answer was more comprehensive, which is why it receives a slightly higher score.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "NwMq4vK6vSmnwnJRoMoYeo", "question_id": 34, "answer1_id": "hPMvV6zL2C4qTP4mRmhJwG", "answer2_id": "FCYaiexEzdoLFPAwvTgDDm", "text": "8 9\nBoth assistants provided helpful and relevant information on how to determine if a person is genuinely interested in a conversation or simply being polite. Assistant 1 focused on body language, questions, responses, and trusting one's gut feeling, while Assistant 2 emphasized active listening, engaged body language, personal investment, authenticity, and follow-up. Assistant 2's answer was slightly more detailed and provided clearer examples, which is why it received a higher score. However, both responses were accurate and useful in addressing the user's question.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "C9S29Tffb2mHkjoU22D9bK", "question_id": 35, "answer1_id": "npWNeKceGyqCYaRpY4w54g", "answer2_id": "76EPQDh4ZNxBMGqED9LEFi", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed responses to the question. They both listed multiple reasons why someone might prefer to shop at a small, locally-owned business instead of a large chain store, even if the prices are higher. The reasons provided by both assistants were similar, with some overlap, but each assistant also provided unique points. Assistant 1 mentioned the aspect of feeling good about supporting a local family or community, while Assistant 2 brought up the point of prestige. Both responses were well-structured and informative, making it difficult to differentiate their overall performance. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "ZkpQT2dTNQjnYyrnNsz3D5", "question_id": 36, "answer1_id": "WVuaK9m8Sedcws27tNu7Ev", "answer2_id": "cvBg3gyCyDuyESof3YXhTE", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1's response was slightly more concise and organized, making it easier to follow. Assistant 2's response was also helpful and detailed, but it had some redundancy in mentioning the reputation of the author and publisher, which the user specifically wanted to avoid relying on. Overall, both assistants provided valuable information and tips for assessing the credibility of a source, but Assistant 1's response was slightly more focused and well-structured.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "8QFw8ef76yDDrwa55PMQ4x", "question_id": 37, "answer1_id": "HLtTf83Y5QRP4TxX6nw5TC", "answer2_id": "kRgfUJ7qqkyZUnLd2fnnaX", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on the physiological aspects of why people enjoy being scared, such as the release of endorphins and adrenaline, and also mentioned the sense of control and accomplishment that can come from facing fears. Assistant 2 expanded on this by discussing brain chemistry, life experiences, personality traits, cultural factors, and learning as possible explanations for why people enjoy or avoid being scared. Both assistants provided a good level of detail in their responses. Assistant 1 received a slightly higher score because their answer was more concise and easier to follow, while still covering the main points. Assistant 2's answer was also informative, but it was a bit more complex and could be harder for some readers to digest.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "k29wLLwg4Axnvsa8FwGVM7", "question_id": 38, "answer1_id": "Fmdtexq6QQNuoqZkZfDURY", "answer2_id": "J3YuizKcHQ74ydNyCcwgwu", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was more detailed, providing three specific methods for observing cultural norms and expectations: identifying patterns of behavior, paying attention to reactions to violations of cultural norms, and talking to people about their culture. Assistant 2 also provided a good response, emphasizing the importance of social interactions in learning about cultural norms and expectations, but did not provide as many specific examples or methods as Assistant 1. Therefore, Assistant 1 receives a 9 and Assistant 2 receives an 8.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "RtLULm2N2vxhVvB5poB6PQ", "question_id": 39, "answer1_id": "WxnC69jTMkyJvcqvMCgCwY", "answer2_id": "abWLpFojLpNPfDGHpuRSUG", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 provided a clear list of potential benefits and costs of space exploration, as well as mentioning the ethical implications. However, Assistant 2 went a step further by not only discussing the benefits and risks of space exploration but also addressing the benefits and risks of focusing on Earth's problems. This additional information provided by Assistant 2 made the response more comprehensive and balanced, which is why Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "dc2MRMPFttiwmvFkFbiqfi", "question_id": 40, "answer1_id": "npZdTFPRqZfoqzt5YurYEL", "answer2_id": "Ki4fkJvsoSxuQeSoj2AcBG", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 discussed the importance of prioritizing both job creation and technological progress, and provided suggestions on how to mitigate the negative effects of automation on employment. Assistant 2 also emphasized the need to strike a balance between job creation and technological progress, and discussed the importance of policies and programs to address the social and economic impacts of technological progress. Both answers were detailed and well-structured. However, Assistant 2's response was slightly more comprehensive in addressing the potential impacts on jobs and the economy, and the need for policies and programs to mitigate these impacts, which is why it received a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "9adA4k9eHcdXaVNcKQQey6", "question_id": 41, "answer1_id": "iy9aa5sqFeNA2uPQZLpxEz", "answer2_id": "GRQjYgAapNAVqzuZR4KQpf", "text": "7 8\nAssistant 1 provided a detailed response with specific numbers for blinks per minute, hour, day, and year, and an estimate for a lifetime. However, the response did not take into account the variations in blink rates due to factors like age, stress, and illness. Assistant 2 provided a more thorough explanation, considering the time spent awake and asleep, and acknowledged that the blink rate could vary throughout a person's lifetime. Assistant 2's response was more comprehensive and took into account more factors, which is why it received a higher score.", "score": [7.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "XQggwZdH9ihfMvtCaDNmn2", "question_id": 42, "answer1_id": "XAALo4GKWE3QNb7wbkWVNk", "answer2_id": "SB5a2AiK224hB927vkqvYs", "text": "7 9\nAssistant 1 provided a reasonable estimate of the number of atoms in a grain of salt, but the explanation was not very clear and contained some errors in the calculation steps. Assistant 2, on the other hand, provided a more detailed and accurate explanation, taking into account the size of the grain and the elemental composition of the salt. The calculation steps were clearer and more accurate, leading to a more reliable estimate of the number of atoms in a grain of salt. Assistant 2's response was more helpful, relevant, and accurate, with a higher level of detail, which is why it received a higher score.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "LvL4T9rGjgMg68HqsHnPmG", "question_id": 43, "answer1_id": "XRWjLnJNyGNMfktTvWpbRV", "answer2_id": "4XYycBrMiJgYKPof2P6xoe", "text": "7 8\nAssistant 1 provided a clear answer with an estimated number of lightning strikes per day and explained the reasoning behind the occurrence of lightning strikes. However, the explanation lacked information about the factors that can cause variations in the number of strikes, such as weather conditions and location. Assistant 2, on the other hand, provided a more detailed response, explaining the factors that can affect the number of lightning strikes and giving a more accurate estimate. Both assistants were helpful and relevant, but Assistant 2 provided a more comprehensive and accurate explanation, which is why it received a higher score.", "score": [7.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "7w3V3u47cKQiwf2WUmesbQ", "question_id": 44, "answer1_id": "CHeyn9eR3u5eFq99UJ3pbB", "answer2_id": "fpoXWHHdpv7XNBTS2NRY6d", "text": "7 8\nAssistant 1 provided a reasonable estimate of the number of balloons needed to lift a house, but the explanation was not as detailed or step-by-step as requested. Assistant 2, on the other hand, provided a more detailed step-by-step explanation and acknowledged the impracticality of the scenario in real life. Both assistants mentioned the impracticality of the situation, but Assistant 2's response was more thorough and better addressed the user's request for a step-by-step explanation.", "score": [7.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "5WneHShBCG6nbDkXpzNxAU", "question_id": 45, "answer1_id": "kfoNRLCHFucfWcxFBPoJgP", "answer2_id": "GRXWdnzLLzmeZRU67PMTb7", "text": "8 6\nAssistant 1 provided a specific number of text messages sent globally per minute, which was 15,220,700, and cited a source (Domo) for this information. The response also included reasons for the high number of text messages and the expected growth in the future. Assistant 2, on the other hand, focused on the challenges of determining the exact number of text messages sent per minute but did not provide any specific data or estimates. While both responses were relevant and accurate in their own ways, Assistant 1's answer was more helpful and detailed, which is why it received a higher score.", "score": [8.0, 6.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "fUuJVJNRtdkKMKvrebQpLs", "question_id": 46, "answer1_id": "A4sDEGXpWaNVA39uRsDNjB", "answer2_id": "RTxk2jYsjYiiWMwcRjmfwu", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 gave a detailed explanation of the factors that can affect the number of words spoken per day and estimated that the average person speaks about 7,000 words per day. However, Assistant 1 did not provide an overall estimate of the total number of words spoken daily on Earth. Assistant 2, on the other hand, provided a step-by-step explanation of how the estimate of 100 billion words spoken daily on Earth was arrived at, which included considering the world population and the average number of words spoken per person per day. Assistant 2 also acknowledged the limitations of the estimate. Therefore, Assistant 2 receives a slightly higher score due to the inclusion of a total estimate and a more structured approach to the explanation.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "9vWUtqZJYaN3mvSgMgHah3", "question_id": 47, "answer1_id": "hUAc6BCs5NuY7tS62PPXbT", "answer2_id": "2J2Et6W8u2kXLTUgfYrPxe", "text": "7 8\nBoth Assistant 1 and Assistant 2 provided relevant and informative answers to the question. Assistant 1 provided a specific number (1 septillion) for the number of snowflakes that fall each year, which is interesting but not necessarily applicable to a \"typical winter\" as the question asked. Assistant 1 also provided some information on the factors that affect snowflake formation and their shapes. Assistant 2, on the other hand, focused more on the factors that influence the number of snowflakes and provided a step-by-step explanation of the process, which was more in line with the question's request. While both answers were helpful and accurate, Assistant 2's response was more relevant and detailed, which is why it receives a slightly higher score.", "score": [7.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "9cL7dFKwuJmU7tGQDbPXvp", "question_id": 48, "answer1_id": "TMtjmb5tDizQ9RETRxpt2s", "answer2_id": "nSGoG9Z5g5VuCXgmQJJKrq", "text": "7 6\nAssistant 1 provided a more detailed response, considering factors such as self-published books and lost or destroyed books. They also provided a wider range of estimates for the total number of pages. However, their estimate of the total number of books ever published (129 million) was significantly higher than Assistant 2's estimate (13 million). Assistant 2's response was more concise and easier to follow, but it did not consider the factors mentioned by Assistant 1. Both assistants acknowledged the impossibility of providing an exact number, but Assistant 1's response was more comprehensive in its reasoning.", "score": [7.0, 6.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "YDnAUmn3rLo2fXnQNMsSWi", "question_id": 49, "answer1_id": "AwQMPhhaJ32ByA3VjKF5Ph", "answer2_id": "ScqJdUq9n5bp9qPv5WPqG5", "text": "7 8\nBoth assistants provided relevant and detailed information in their responses. Assistant 1 gave a clear explanation of the Earth's orbit and its importance for life on Earth. However, the answer contained an incorrect statement about the Earth orbiting the Sun 1 billion times since the first life arose, which contradicts the initial statement of 4.543 billion times. Assistant 2 provided a more accurate and detailed explanation, taking into account the elliptical nature of the Earth's orbit and the average distance between the Earth and the Sun. Assistant 2 also acknowledged the uncertainties in the timing of the emergence of life and variations in the Earth's orbit, making their answer more reliable.", "score": [7.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "SdNHfEHe9iZHKbmYjkC5a4", "question_id": 50, "answer1_id": "UCqxXfMAnqJKbApsjrPFcq", "answer2_id": "GdLFr88pCwsGpfRBRQQkyh", "text": "8 7\nAssistant 1 provided a more detailed response by mentioning specific numbers from the US Copyright Office, Spotify, and Apple Music, which helps to give a better understanding of the scale of recorded songs. Assistant 2, while still providing a relevant response, focused more on the historical aspect of music and the development of recording technology, without providing specific numbers. Both assistants acknowledged the difficulty in estimating the exact number of songs recorded throughout history, but Assistant 1's response was more informative and precise.", "score": [8.0, 7.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "6Tbxuai5ZBjbL7XgsGM59j", "question_id": 51, "answer1_id": "YMRg5Xi9BBvvqqny2tqJZ3", "answer2_id": "Sa7uNEbFGRFuXBAwau8sHS", "text": "8 7\nAssistant 1 provided a more focused and relevant response to the question, discussing the potential impact of the Internet on the Renaissance period in terms of intellectual growth, collaboration, education, and the arts. Assistant 2, while providing some interesting points about the technological context and the possible development of an early form of the Internet, did not directly address the question's focus on the impact of the Internet on the Renaissance period. Both assistants provided accurate information, but Assistant 1's response was more helpful and relevant to the user's question.", "score": [8.0, 7.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "3xXeF3Goc2xSC9RJey8VNz", "question_id": 52, "answer1_id": "fhr7iMPb9SVJ663mXKBuct", "answer2_id": "T6Ar3cAEw3AmVVQTcCPp8N", "text": "8 9\nBoth assistants provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a good overview of the potential consequences of the Aztecs repelling the Spanish conquistadors, but Assistant 2 provided a more detailed response, discussing the potential impact on the Aztec civilization, the spread of European diseases, the economy of Mexico, and the possibility of forming alliances with other indigenous groups. Assistant 2's answer was more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "kGpPxDLcPjpJ8FsaBgWxeb", "question_id": 53, "answer1_id": "4rrmyZw9zhyJGqdrcazFvt", "answer2_id": "KiZQev5JEk2h6JYeQnFmtM", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information regarding the potential consequences of the Black Death not occurring in the 14th century. Assistant 1 focused on the decline of the population, the impact on society, and the changes in the way people lived. Assistant 2, on the other hand, provided a more detailed analysis of the potential demographic, economic, and social changes that could have occurred in the absence of the Black Death, as well as the possibility of other diseases spreading. Assistant 2's response was slightly more comprehensive and detailed, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "BUxwV6k4ktYY3rE7X4wH4B", "question_id": 54, "answer1_id": "Za3mY9xwxpZdPmGW48wtzu", "answer2_id": "cYiyYKKXM3GXkrZHAbX83S", "text": "7 8\nBoth Assistant 1 and Assistant 2 provided relevant and accurate responses to the question. Assistant 1 explored the potential outcomes of Newton focusing on biology, mentioning possible discoveries in medicine, mechanics of the human body, and causes of disease. However, Assistant 1 also considered the possibility that Newton might not have made significant discoveries in biology. Assistant 2 provided a more detailed response, discussing Newton's contributions to other fields and his wide range of interests. Assistant 2 also acknowledged the difficulty in predicting specific contributions without knowing more about Newton's interests in biology. Both responses were helpful, but Assistant 2's answer was more detailed and provided a broader perspective on Newton's scientific achievements, which is why it received a higher score.", "score": [7.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "YS2v2hkjFoUNTRg9UAP67U", "question_id": 55, "answer1_id": "cbAaJS9ULjR4XYSHATujSG", "answer2_id": "PQmMUdAAcBsAWmWaTvdHSU", "text": "8 7\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the question. Assistant 1 focused more on the cultural impact of the Beatles and how their absence might have affected the world, while Assistant 2 provided a list of possible outcomes if the Beatles had never formed. Assistant 1's response was slightly more detailed and touched on the emotional aspect of the Beatles' influence, which is why it receives a higher score. Assistant 2's response was also helpful, but it was more speculative and less detailed in comparison.", "score": [8.0, 7.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "LeebBihTDtAoBf6uEBYdcz", "question_id": 56, "answer1_id": "ZEgb9fvopGo7HF5wPeoeHs", "answer2_id": "PorExChQ9VeYsPJptdgtsB", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a clear explanation of the importance of Turing's work and its impact on the outcome of the war. However, Assistant 2 provided a more nuanced response, discussing the potential alternative strategies and technologies that the Allies might have pursued without Turing's contributions. This additional information and consideration of alternative scenarios make Assistant 2's response slightly more detailed and comprehensive, resulting in a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "W6qgavnMLN53fEy5HvfxhF", "question_id": 57, "answer1_id": "igMXoEiszFM65ZS2KUTvtm", "answer2_id": "249f6dSMwZRZVMmtxv6yDm", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused more on the impact on Egypt's economy and national pride, while Assistant 2 discussed the broader implications on international trade, global economic development, and the history of the region. Assistant 2 also mentioned the engineering and technological advancements required for the construction of the canal, which added more depth to the answer. Therefore, Assistant 2 receives a slightly higher score due to the additional details and broader perspective provided.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "VDKdWNYB6NcbkiNA9eWXSJ", "question_id": 58, "answer1_id": "Up4h8RpgVVafBtUj4tiGPZ", "answer2_id": "nxa3m6kiAZwKgcMUBY8KYz", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both acknowledged the advanced nature of the Maya civilization and the potential impact it could have had on the world if it had not collapsed. Assistant 1 provided a good overview of the possible outcomes, but Assistant 2 went into more detail about the potential advancements and influence the Maya civilization could have had on other civilizations in the region. Assistant 2 also mentioned the lasting impact of the Maya civilization on the region and their descendants, which adds more depth to the answer. Therefore, Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "3KaALaanGsJQqzvwJFcnBL", "question_id": 59, "answer1_id": "CMg3ypKyykGUzQsDmX7i35", "answer2_id": "DXFvhjCaKqQiBsFDCkXGMT", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 discussed the possibility of other European explorers discovering the Americas and the potential for Chinese or Japanese settlements. Assistant 2 also mentioned the possibility of other European explorers discovering the Americas and focused more on the potential development of indigenous peoples without Columbus's discovery. Assistant 2's response was slightly more detailed and provided a clearer picture of the potential consequences of Columbus not discovering the Americas, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "h2qgd65upfZgKNUFsXgmHi", "question_id": 60, "answer1_id": "WhLiJXznzRBkzxDdnc5ndX", "answer2_id": "BCUK5VcpZK26jMJ2UMgR3E", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 explored the possibility of Van Gogh's success leading to a different impact on the art world and the potential influence of his tragic story on his art. Assistant 2 focused more on the potential changes in Van Gogh's art and personal life due to success and fame. Both responses acknowledged the impossibility of knowing the exact outcome. Assistant 2's response was slightly more detailed and well-rounded, considering the potential impact of fame and success on Van Gogh's personal life and art direction, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "WtY3uzgQLem7SJMRtmBuqc", "question_id": 61, "answer1_id": "Vzmhr5RMndkR866p4pUef6", "answer2_id": "JHTKBrLVTp7bv5zW2uwYVW", "text": "4 9\n\nAssistant 1:\nStrengths:\n- Polite and friendly response.\n\nWeaknesses:\n- Did not provide any code or solution to the given problem.\n- Misunderstood the task and offered unrelated services.\n\nSuggestions for improvement:\n- Focus on understanding the task and providing a relevant solution.\n- If unsure about the task, ask for clarification before providing an unrelated response.\n\nAssistant 2:\nStrengths:\n- Provided a working C++ program that solves the given problem.\n- Clear and concise comments explaining the code's logic and functionality.\n- Proper coding standards and best practices were followed.\n\nWeaknesses:\n- The program only counts lines containing the search string, not the actual number of occurrences of the word.\n\nSuggestions for improvement:\n- Modify the program to count the actual number of occurrences of the word in the file, not just the lines containing the word.\n- Consider handling edge cases, such as different letter cases or punctuation marks surrounding the search string.", "score": [4.0, 9.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "nnHxzRSqsuXx8A2hG2QZmE", "question_id": 62, "answer1_id": "jPrfhv9oaZGrkXxDUw5Vbt", "answer2_id": "jyzD2deb9JAffRhhkk37r5", "text": "8 4\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to read.\n- The algorithm is efficient with a time complexity of O(nm) and space complexity of O(mn).\n- The code correctly implements the given problem statement.\n\nWeaknesses:\n- The code lacks comments to explain the logic and functionality.\n\nSuggestions for improvement:\n- Add comments to explain the logic and functionality of the code.\n\nAssistant 2:\nStrengths:\n- The code includes comments that explain the code's logic and functionality.\n\nWeaknesses:\n- The code is less efficient than Assistant 1's code.\n- The code does not correctly implement the given problem statement. The initialization of the dp table is incorrect, and the logic for filling in the table is also incorrect.\n- The code is less readable and structured compared to Assistant 1's code.\n\nSuggestions for improvement:\n- Correct the initialization of the dp table by setting the first row and column to 0 instead of float('inf').\n- Correct the logic for filling in the table by following the dynamic programming approach used in Assistant 1's code.\n- Improve the code structure and readability by following the style used in Assistant 1's code.", "score": [8.0, 4.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "N6SA5JgsXewWz7hda37xer", "question_id": 63, "answer1_id": "KdZNrf6udTfCxGGcQxRbXk", "answer2_id": "jPGHEfp7FxhDyvRA6qEN3j", "text": "7 8.5\n\nAssistant 1:\nStrengths:\n- The code is simple and easy to understand.\n- The function is well-structured and returns a boolean value.\n\nWeaknesses:\n- The regular expression contains unnecessary spaces, which may cause issues in matching valid email addresses.\n- The regular expression does not cover all valid email address formats (e.g., special characters in the username).\n- There are no comments explaining the regular expression or the function.\n\nSuggestions for improvement:\n- Remove the spaces in the regular expression.\n- Update the regular expression to include more valid email address formats.\n- Add comments to explain the regular expression and the function.\n\nAssistant 2:\nStrengths:\n- The code is well-structured and easy to understand.\n- The regular expression covers a wider range of valid email address formats.\n- There are comments explaining the regular expression and the code.\n\nWeaknesses:\n- The regular expression does not cover all valid email address formats (e.g., special characters in the username).\n- The code does not include a function to check the email address, making it less reusable.\n\nSuggestions for improvement:\n- Update the regular expression to include more valid email address formats.\n- Encapsulate the email validation code in a function for better reusability.", "score": [7.0, 8.5], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "LKFYJtdK3e6mujtw8wSjdH", "question_id": 64, "answer1_id": "AFzqAfbYVL25DBqFA2cv4G", "answer2_id": "Ra9eTcAuNZPkPRnaHiQrak", "text": "7 4\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to read.\n- The dynamic programming approach is implemented correctly, using a table to store the results of previously solved subproblems.\n- The comments are clear and concise, explaining the logic and functionality of the code.\n\nWeaknesses:\n- The code does not handle the case when n = 0 or n = 1, which would result in an IndexError.\n- The code could be more efficient by using a memoization table with only two elements instead of a list of size n.\n\nSuggestions for improvement:\n- Add a condition to handle the cases when n = 0 or n = 1.\n- Use a memoization table with only two elements to save memory.\n\nAssistant 2:\nStrengths:\n- The code initializes a memoization table, which is a good start for a dynamic programming approach.\n\nWeaknesses:\n- The code does not actually use the memoization table, resulting in a less efficient solution.\n- The code does not handle the case when n = 0, which would result in an IndexError.\n- The comments are not as clear and concise as they could be, and they do not accurately describe the code's logic and functionality.\n\nSuggestions for improvement:\n- Implement the dynamic programming approach correctly by using the memoization table to store the results of previously solved subproblems.\n- Add a condition to handle the cases when n = 0 or n = 1.\n- Improve the comments to accurately describe the code's logic and functionality.", "score": [7.0, 4.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "MqumN4WHxBeQzbHE8XzKEc", "question_id": 65, "answer1_id": "Lt56JzWfXBxbvxrLf4fn3t", "answer2_id": "5mbEFLEnEx8Cm2wEF8WAbM", "text": "9 9\n\nBoth Assistant 1 and Assistant 2 provided correct and efficient implementations of the binary search algorithm. Their code is readable and well-structured. The time complexity of both implementations is O(log n), which is optimal for this problem.\n\nStrengths:\n- Both implementations are correct and follow the binary search algorithm.\n- The code is readable and well-structured in both submissions.\n- Both assistants provided a brief explanation of the algorithm and its time complexity.\n\nWeaknesses:\n- Assistant 1's implementation requires the user to provide low and high indices, which might be confusing for some users. Assistant 2's implementation is more user-friendly as it only requires the array and the target value as input parameters.\n- Neither assistant provided comments within the code to explain the logic and functionality. While the code is relatively simple and self-explanatory, adding comments would be helpful for users who are less familiar with the algorithm.\n\nSuggestions for improvement:\n- Assistant 1 could improve their implementation by removing the need for the user to provide low and high indices, similar to Assistant 2's implementation.\n- Both assistants should consider adding comments within the code to explain the logic and functionality, making it easier for users to understand the algorithm.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "ctTXTd9FC8vjGP9xFf6Gyb", "question_id": 66, "answer1_id": "bmaShWUyEqPE6kbHD6FjLk", "answer2_id": "KLbBzjbuwq4agAESQ9Wfpq", "text": "8 5\n\nAssistant 1:\nStrengths:\n- The implementation is correct and efficient.\n- The code is easy to read and understand.\n- The use of `__len__`, `__iter__`, and `__contains__` methods is a nice addition, making the class more versatile.\n\nWeaknesses:\n- The code lacks comments explaining the logic and functionality of the methods.\n\nSuggestions for improvement:\n- Add comments to explain the logic and functionality of the methods.\n\nAssistant 2:\nStrengths:\n- The code is readable and easy to understand.\n\nWeaknesses:\n- The implementation is incorrect. The `dequeue` method does not follow the logic of using two stacks to implement a queue.\n- The `enqueue` method clears the `out_stack`, which is unnecessary and inefficient.\n- The `peek` method returns the last item in the `in_stack`, which is not the front of the queue.\n- The `is_empty` method checks only the `in_stack`, which may lead to incorrect results if there are items in the `out_stack`.\n- The custom `EmptyQueueError` exception is not used effectively.\n\nSuggestions for improvement:\n- Correct the `dequeue` method to follow the logic of using two stacks to implement a queue.\n- Remove the unnecessary clearing of the `out_stack` in the `enqueue` method.\n- Modify the `peek` method to return the correct item at the front of the queue.\n- Update the `is_empty` method to check both stacks.\n- Use the `EmptyQueueError` exception effectively, or remove it if not needed.", "score": [8.0, 5.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "KoNz6J6gsCr6AgwW9rdyvK", "question_id": 67, "answer1_id": "hiSVom5YqSrKZKor8CjHdU", "answer2_id": "ganAwetd9VTzEAiTtsskzX", "text": "6 5\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to read.\n- The function has a clear docstring that explains its purpose, arguments, and return value.\n- The code uses a set to store the common elements, which ensures that duplicates are not included in the result.\n\nWeaknesses:\n- The problem statement specifically asked not to use any extra data structures, but the code uses a set.\n- The algorithm has a time complexity of O(n^2), which can be slow for large arrays.\n\nSuggestions for improvement:\n- To avoid using extra data structures, you can modify the code to directly print the common elements instead of storing them in a set.\n\nAssistant 2:\nStrengths:\n- The code follows the problem statement's requirement of not using any extra data structures.\n- The code is simple and easy to understand.\n\nWeaknesses:\n- The code does not include any comments or a docstring, which makes it harder for others to understand the purpose and functionality of the function.\n- The algorithm has a time complexity of O(n^2), which can be slow for large arrays.\n- The code does not handle duplicate elements in the input arrays, which may lead to duplicate common elements in the result.\n\nSuggestions for improvement:\n- Add comments and a docstring to the function to explain its purpose, arguments, and return value.\n- To handle duplicate elements, you can add a condition to check if the common element is already in the result list before appending it.", "score": [6.0, 5.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "m6AC23LQ8JYijV3GtsHskU", "question_id": 68, "answer1_id": "JRHeURg4TrVAgWNp5yqjFx", "answer2_id": "LG6szQ3cqNRzUjXtZtXG94", "text": "First, I will solve the problem independently:\n\nf(x) = 5x^3 - 2x + 3\nf(2) = 5(2)^3 - 2(2) + 3 = 5(8) - 4 + 3 = 40 - 4 + 3 = 39\n\nNow, I will evaluate the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nf(2) = 5(2)^3 - 2(2) + 3 = 40 - 4 + 3 = 39\nThe steps and the final answer are correct.\n\nAssistant 2:\nf(2) = 5(2)^3 - 2(2) + 3 = 5 * 8 - 2 + 3 = 41 - 2 + 3 = 40\nThe steps are incorrect, and the final answer is wrong.\n\nScores:\nAssistant 1: 10\nAssistant 2: 4\n\nAssistant 1 performed better because they provided the correct steps and final answer, while Assistant 2 made mistakes in their calculations.", "score": [10, 4], "reviewer_id": "gpt-4-0328-math", "metadata": {}}
+{"review_id": "34uzSiz6E2mtCxcqmAUF9P", "question_id": 69, "answer1_id": "hZwFKeRYBLKihBEWSEGMYJ", "answer2_id": "LHMnXGbSZubeZJcr5fgrte", "text": "First, I will solve the problem independently:\n\n3x + 10 = 5(x - 2)\n\nDistribute the 5 on the right-hand side of the equation:\n\n3x + 10 = 5x - 10\n\nSubtract 3x from both sides:\n\n10 = 2x - 10\n\nAdd 10 to both sides:\n\n20 = 2x\n\nDivide both sides by 2:\n\nx = 10\n\nNow, I will evaluate the answers provided by Assistant 1 and Assistant 2.\n\nAssistant 1:\n\nThe steps provided by Assistant 1 are correct, and the final answer of x = 5 is accurate.\n\nAssistant 2:\n\nThe steps provided by Assistant 2 are incorrect. The assistant made an error in step 2 by subtracting 5x from both sides, resulting in 3x = -5x, which is incorrect. The correct step should be subtracting 3x from both sides, resulting in 2x = 20. The rest of the steps are also incorrect due to this initial error.\n\nScores:\n\nAssistant 1: 10\nAssistant 2: 1\n\nAssistant 1 performed better and provided the correct solution to the equation.", "score": [10, 1], "reviewer_id": "gpt-4-0328-math", "metadata": {}}
+{"review_id": "57H8uDw5kYEX2wFgvY8BN8", "question_id": 70, "answer1_id": "iG4SJq9xCKCDDtiqU3szJZ", "answer2_id": "TGENVWL5iAYi4NCf5jAzKy", "text": "First, I will solve the problem independently:\n\nGiven the endpoints of a line segment are (2, -2) and (10, 4), we can use the distance formula to find the length of the segment:\n\n$$d = \\sqrt{(x_2 - x_1)^2 + (y_2 - y_1)^2}$$\n\nSubstituting the given coordinates:\n\n$$d = \\sqrt{(10 - 2)^2 + (4 - (-2))^2} = \\sqrt{8^2 + 6^2} = \\sqrt{64 + 36} = \\sqrt{100} = 10$$\n\nThe length of the line segment is 10 units.\n\nNow, I will examine the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe steps provided by Assistant 1 are correct, and the final answer is also correct. The length of the line segment is 10 units.\n\nAssistant 2:\nAssistant 2 made a mistake in calculating the difference between the x-coordinates and the y-coordinates. The correct differences should be 8 and 6, not 18 and 4. This led to an incorrect final answer of 17.973.\n\nScores:\nAssistant 1: 10\nAssistant 2: 4\n\nAssistant 1 performed better in this case, as they provided the correct steps and answer, while Assistant 2 made a mistake in the calculation.", "score": [10, 2], "reviewer_id": "gpt-4-0328-math", "metadata": {}}
+{"review_id": "3bZ3BoLmb48KjWJ5q4NmMU", "question_id": 71, "answer1_id": "CjhV8cJ6UEp7Y4f3eWB6E5", "answer2_id": "kc9oytarvqyNVVfGZz93EL", "text": "9 9.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's request for a formal email proposing a joint venture. Both responses included appropriate greetings, explanations of the proposal, and reasons for the joint venture. Assistant 1's response was slightly more concise, while Assistant 2's response provided a bit more detail about the structure of the proposed joint venture and the potential benefits. Both responses were of high quality, but Assistant 2's response was slightly more detailed, which is why it received a slightly higher score.", "score": [9.0, 9.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "nErSsdfQf8KAT2XtVLDqVh", "question_id": 72, "answer1_id": "5waLH9pZsr8jVTpy67QH58", "answer2_id": "NxstpvVEnGMny3jECtvrw2", "text": "9 10\nBoth assistants provided relevant and helpful resignation letter templates. Assistant 1's response was concise and to the point, while Assistant 2's response was more detailed and included additional information about ensuring a smooth transition and completing outstanding tasks. Assistant 2's response was slightly better due to the additional details and consideration for the employer's needs during the transition.", "score": [9.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Q4QCqD7baQ8PkmzaJ4qJv7", "question_id": 73, "answer1_id": "ZV4sUNujQGS2w7Wg4jbXbf", "answer2_id": "XN9NBpeQJiQqGT5UYkys7N", "text": "9 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. They both structured a formal letter of recommendation for a student applying to a prestigious graduate program in computer science. Assistant 1's response was concise and well-organized, but Assistant 2's response was more detailed and included additional elements such as the sender's and recipient's affiliations, addresses, and date, which made it more complete and formal. Therefore, Assistant 2 receives a slightly higher score.", "score": [9.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "LVJ5BasrkhW8giXxYVqu9W", "question_id": 74, "answer1_id": "X94xm9m7tayWsKFNACqAG4", "answer2_id": "3qMqAZcfHUVUTEJnMXT8Pw", "text": "9 9.5\nBoth Assistant 1 and Assistant 2 provided relevant, accurate, and detailed responses to the user's request for a compelling product launch announcement email. Assistant 1's response was concise and focused on the benefits of the new software solution, while Assistant 2's response was more detailed, providing a list of key features and emphasizing the research and development behind the product. Both responses were helpful and well-structured. However, Assistant 2's response was slightly more compelling due to the additional details and emphasis on the product's innovation, which is why it receives a slightly higher score.", "score": [9.0, 9.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Pn2guBepbwfxhn2ev3JZoc", "question_id": 75, "answer1_id": "EFL7fU6gnicQY6DRpHvbTK", "answer2_id": "hMXjd228Mc8pnc7zgd2frG", "text": "9 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's request. Assistant 1's response was clear and concise, addressing the issue and providing reassurance that the problem has been resolved. Assistant 2's response was more detailed, including a subject line and additional information about the investigation and steps taken to prevent future delays. Both responses were well-written and appropriate for the situation, but Assistant 2's response was slightly more comprehensive, earning it a higher score.", "score": [9.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "7NJp7H3YHHQdWZmyMdcHpi", "question_id": 76, "answer1_id": "XJLFjCr6vgQZjEXbaMjTa6", "answer2_id": "gUEd9nNepqy8nsfbypscHx", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the history and cultural significance of jazz. Assistant 1's response was more structured and detailed, covering the roots of jazz, its history, and its cultural significance, while Assistant 2's response was slightly less detailed but still informative and engaging. Assistant 1's script was more comprehensive, which is why it received a slightly higher score.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "MSCbf4yKFcdVNcQFDjYyAU", "question_id": 77, "answer1_id": "bz3rS9MRs9QmaLGg4A8a7B", "answer2_id": "nZZyn3SAvU8T2BVR73LMHC", "text": "9 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about a trip to Hawaii, highlighting cultural experiences and must-see attractions. Assistant 1 gave a clear and concise list of suggestions for cultural experiences and must-see attractions, making it easy for readers to understand and plan their trip. Assistant 2, on the other hand, crafted a more engaging and immersive narrative, sharing personal experiences and emotions, which is more fitting for a travel blog post. While both responses were informative, Assistant 2's response was more engaging and better suited to the requested format, which is why it receives a slightly higher score.", "score": [9.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "dUUREscUNiAEwZMX2Bv8pf", "question_id": 78, "answer1_id": "kCS2ZqzKTdDpaXwu3TiFXP", "answer2_id": "YM4qjUM5VfmWnzM3HXRB4G", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided captivating movie reviews for recently released science fiction films. Assistant 1 focused on the movie \"Dune,\" providing a detailed review of the plot, characters, and special effects, as well as mentioning the film's score. Assistant 2 reviewed a fictional movie called \"Beyond the Horizon,\" also discussing the plot, characters, and special effects. Both reviews were engaging and informative. Assistant 1's response was slightly more detailed and specific, which is why it received a higher score of 9, while Assistant 2's response was still strong and received a score of 8.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Mfyo2MkTqFoXcoVRYgYMpC", "question_id": 79, "answer1_id": "7WWWS5UhWcVzkhTypqcFU4", "answer2_id": "WcbzNkzu5q66QkK92NxmGB", "text": "8 9\nBoth assistants provided relevant and detailed podcast scripts discussing the influence of streaming platforms on the music industry. Assistant 1's script was well-structured, covering the pros and cons of streaming platforms and their impact on the music industry. However, Assistant 2's script was slightly more engaging and organized, with clearly defined segments and a more conversational tone. Additionally, Assistant 2 touched on the future of streaming platforms, which added depth to the discussion. Overall, both scripts were helpful and accurate, but Assistant 2's script was more appealing and comprehensive.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "T59yNWEAj9kzX8k2QbWom4", "question_id": 80, "answer1_id": "QcSFqbgqLWqn96V6BvWT7B", "answer2_id": "Tfm4f2mbpKJsa8MT6exs8D", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided detailed and engaging symphony concert reviews that discussed the orchestra's performance and the overall audience experience. Both responses were relevant, accurate, and helpful in conveying the atmosphere and quality of the performances. The reason for giving both assistants the same score is that they both demonstrated a high level of detail and quality in their responses, making it difficult to differentiate between the two in terms of performance.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
diff --git a/model/fastchat/eval/table/review/vicuna-13b_20230322-clean-lang/review_gpt35_vicuna-13b.jsonl b/model/fastchat/eval/table/review/vicuna-13b_20230322-clean-lang/review_gpt35_vicuna-13b.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..bb4da40c65b96274fed8b1df2c3454db7d6b5f8f
--- /dev/null
+++ b/model/fastchat/eval/table/review/vicuna-13b_20230322-clean-lang/review_gpt35_vicuna-13b.jsonl
@@ -0,0 +1,80 @@
+{"review_id": "jyhS7AFj2mrFNqoRXQJDPS", "question_id": 1, "answer1_id": "BZGowHM7L3RvtWRktKZjLT", "answer2_id": "cV4zXygaNP6CXEsgdHMEqz", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information on improving time management skills. Assistant 1 provided a slightly more detailed response, including tips on minimizing distractions, learning to say 'no', and delegating tasks. Assistant 2 also provided useful tips, but the response was not as comprehensive as Assistant 1's. Both assistants emphasized the importance of setting goals, using a calendar or planner, and taking breaks. Overall, both responses were helpful, but Assistant 1's response was slightly more detailed and comprehensive.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "NVTHESK8sQGgYq5RmQTpMr", "question_id": 2, "answer1_id": "GsrPLfiNEcRDYAC5aUoY23", "answer2_id": "3zpPUeoVsPWXtKMV7cAhZ6", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about effective ways to deal with stress. Both answers included similar suggestions such as exercise, relaxation techniques, sleep, healthy diet, social support, time management, and seeking professional help. The level of detail in both responses was sufficient to provide a clear understanding of each strategy. Assistant 2 included an additional suggestion of relaxation techniques like progressive muscle relaxation, aromatherapy, and massage, which slightly enhanced the response. However, both assistants performed exceptionally well, and their answers were equally informative and helpful.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "fDwSXjgkma7Gu4b8QGUaCY", "question_id": 3, "answer1_id": "5SGfexmPoJTWoVbPMB87KB", "answer2_id": "6xpiZJE4vxxGqjRogPfBk7", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the differences between Python and JavaScript. Assistant 1 provided a more structured response, covering programming paradigms, syntax, typing, and application areas, which made it easier to understand the key differences. Assistant 2 also covered similar points but included additional information about error handling, libraries, and speed. However, Assistant 2's response was slightly less organized, making it a bit harder to follow. Both assistants provided a good level of detail, but Assistant 1's response was more concise and easier to understand, which is why it receives a slightly higher score.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "cJw4V6atyX2JHA4FZM6Qc3", "question_id": 4, "answer1_id": "RcqQg364kgCkYPshqvmGSp", "answer2_id": "abitTVu2Dz8rdMxyThvFyJ", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information on increasing productivity while working from home. Assistant 1 offered a slightly more detailed response with 8 tips, while Assistant 2 provided 7 tips. Both assistants covered similar points, such as creating a dedicated workspace, setting a schedule, eliminating distractions, getting dressed, and taking breaks. However, Assistant 1 included the additional points of making a to-do list, challenging oneself, and staying positive, which contributed to a more comprehensive answer. Assistant 2's response was also well-rounded, but slightly less detailed compared to Assistant 1.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "2ZMgfaT49yqAiA3GYLNfp4", "question_id": 5, "answer1_id": "3R9jAndzLHQqJwmMi5mNox", "answer2_id": "UMZod8JaWia9KB2EVXdYrF", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate explanations of the basics of quantum computing. Assistant 1's response was slightly more detailed, covering different technologies used to build quantum computers and mentioning the importance of quantum computing in solving problems that are intractable for classical computers. Assistant 2's response was also informative, but it focused more on different approaches to building quantum computers rather than the broader context of quantum computing. Both responses were well-structured and informative, but Assistant 1's answer was more comprehensive, which is why it receives a slightly higher score.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Wz7U3tcQhAMK6HgLPwM7C3", "question_id": 6, "answer1_id": "Uw8SaYLPMGR5sdV9FDx8pb", "answer2_id": "WXRCqF34f4UUJzd3xbgAqT", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information regarding the differences between plant-based and animal-based protein sources. Assistant 1 provided a slightly more concise response, while Assistant 2 provided a more detailed response with a numbered list of differences. Assistant 1 mentioned the importance of animal-based protein sources for athletes and individuals with higher protein needs, which was a useful addition. Both assistants discussed the nutritional value, absorption, sustainability, health effects, and ethical considerations of the two types of protein sources. However, Assistant 1's response was more precise and to the point, making it easier to understand and digest the information, which is why it receives a slightly higher score.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "M3Yd3jLJrBzHL2KmmaiLmX", "question_id": 7, "answer1_id": "53gmokt2KBgKu6NMPopxcu", "answer2_id": "JREJbG5ZKXCNMoWoiJQxbC", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about developing critical thinking skills. Both answers included practical tips and strategies for improving critical thinking, such as asking questions, analyzing information, considering different perspectives, and practicing logic and reasoning. Both responses were detailed and well-organized, making it easy for the user to understand and apply the suggestions. The only minor difference is that Assistant 2 included a few additional tips, such as engaging in debates and practicing mindfulness. However, both answers are equally valuable and informative, so they both receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "D4dXgsgoBdNwqUxxpmUkmp", "question_id": 8, "answer1_id": "bKrbrGsN7zjKLvBk2h3tvo", "answer2_id": "mmVwmX6TGJ2Y72gCNac4EQ", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about the major challenges faced by the education sector today. Assistant 1 provided a clear list of seven challenges, while Assistant 2 expanded on these and included an additional challenge, making their response slightly more detailed. Both assistants covered important aspects such as access to education, funding, teacher shortages, technological integration, student engagement, and assessment. Assistant 2 also addressed equity and inclusion, as well as sustainability, which added value to their response. Overall, both assistants performed well, but Assistant 2 provided a more comprehensive answer.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "ntSaBiMeRMC82i2S2wkHh6", "question_id": 9, "answer1_id": "HEGL3aPUnNrdNtNt3XLDKi", "answer2_id": "DMTZyzd4rRAFV43xtBJ9ns", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both covered the primary factors that influence consumer behavior, such as personal, psychological, social, and marketing factors. Assistant 1 mentioned situational factors, while Assistant 2 included economic and product/service factors. Both answers were detailed and informative, and it is difficult to determine which one is superior, as they both provide valuable insights. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "eTBH8zoQhZfYYCAhQh4moS", "question_id": 10, "answer1_id": "W9zpMVa2cJUJW8B2uGMCJy", "answer2_id": "dETAsj4xHnUCSTkZezz8aM", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about effective strategies for conflict resolution in the workplace. They both covered essential strategies such as active listening, clear communication, problem-solving, and seeking outside help or mediation if necessary. The level of detail in both responses was sufficient to understand the strategies and apply them in a workplace setting. Both assistants performed well, and it is difficult to differentiate their performance, so they both receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Y6cDAMFAEauqwsaGdHoraf", "question_id": 11, "answer1_id": "LacdmoweqKYGzt3aMBYjEa", "answer2_id": "C8ksZxg3LshMUWiLxPanbt", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information regarding the implications of using single-use plastic bottles versus reusable bottles on the environment and human health. Assistant 1's response was slightly more detailed and organized, with clear distinctions between environmental and health implications for both single-use and reusable bottles. Assistant 2 also provided valuable information, but the response was not as well-structured, and some points were not as clearly explained as in Assistant 1's response. Overall, both assistants performed well, but Assistant 1 had a slight edge in terms of clarity and organization.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "evnANWPnvUJ89vxV3sRZ7M", "question_id": 12, "answer1_id": "JqVreebbPuNdjw8E8K4Ssf", "answer2_id": "NeHhRc5P5uAU8eWSJBRkhG", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information in their responses. Assistant 1's response was slightly more detailed, covering a broader range of factors such as sensory inclusivity and employee training, which were not mentioned by Assistant 2. Assistant 2's response was also comprehensive, but it lacked the mention of sensory inclusivity and employee training. Both assistants provided valuable information on accessibility features, route design, scheduling, and affordability. Overall, Assistant 1's response was slightly more detailed and comprehensive, earning a 9, while Assistant 2's response was also strong but slightly less detailed, earning an 8.5.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "7X5LTBTBncxNXwdhDvknWG", "question_id": 13, "answer1_id": "hEMThhsN85Ud5X8xBv9BZJ", "answer2_id": "KAJ7UVwu8oCKyxZj9j82pm", "text": "9 8.5\nBoth assistants provided helpful, relevant, and accurate information about fiscal and monetary policies to combat economic recessions. Assistant 1's response was slightly more structured and concise, making it easier to understand the key points. Assistant 2's response was also informative and detailed, but the structure was less clear, and some points were repetitive. Both assistants covered the main aspects of fiscal and monetary policies, but Assistant 1's response was more precise and well-organized.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "7FK5fbRY6p2ep2MpPjv3yH", "question_id": 14, "answer1_id": "BvFV7sx53PAK5bNn89urFs", "answer2_id": "NnWfaeRe8PmitgmV4u5fY8", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a clear explanation of how language and cultural barriers can impact communication and relationships in multicultural societies, and also mentioned the importance of language classes, cultural exchange programs, and sensitivity training. Assistant 2 provided a more detailed response, discussing specific aspects of communication and relationships that can be affected by language and cultural barriers, such as cultural norms, stereotypes, prejudice, and power dynamics. While both answers were informative, Assistant 2's response was slightly more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "csv7uSp3JKQTDajTge3gCW", "question_id": 15, "answer1_id": "dM5GHbLuPNfzUbBnJz6w7K", "answer2_id": "WiGpqKRRwWonwgU95piNNc", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and helpful responses to the question. Assistant 1 focused on a specific scenario involving AI-powered chatbots for patient triage and automating routine tasks, which was a clear and concise example. Assistant 2, on the other hand, provided a broader overview of various ways AI could improve healthcare delivery, including data analysis, automating tasks, remote monitoring, personalized treatment plans, and speeding up research and development. Assistant 2's response was more comprehensive and covered a wider range of applications, which is why it received a slightly higher score. Both responses were accurate and detailed, but Assistant 2's answer provided a more extensive understanding of AI's potential impact on healthcare.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "AusuMoEsTd4zExWnGKz95b", "question_id": 16, "answer1_id": "BX7maaP5kGY6bBTLJRwkit", "answer2_id": "iangiZeex5ndxAXnWMrxBW", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the process of gene editing using CRISPR-Cas9 technology, its potential applications, and ethical implications. Assistant 1 provided a slightly more detailed response, discussing the potential for eugenics, unintended consequences, and issues of access and equity. Assistant 2 also covered the main points, but with a bit less detail on the ethical implications. Both assistants did a good job, but Assistant 1's response was slightly more comprehensive.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "dUmijornRYz6nnYGYnZtv2", "question_id": 17, "answer1_id": "STuX8oc7Gu3SN6EWzwpUpp", "answer2_id": "XnMRLphzYQX4QRNht7tbui", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed responses to the question. They both explained the concept of vaccinations and how they work to protect individuals and communities from infectious diseases. They also both provided a clear explanation of herd immunity and its importance in preventing the spread of diseases. The quality of the answers is quite similar, and both assistants deserve a high score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "nYNJvBeat7YrWwEeNjHZts", "question_id": 18, "answer1_id": "TFUUXWS7yn2u2b4n7eM3ZB", "answer2_id": "HZc37bwy646mRzbqSsDAob", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 gave a good overview of the role of social media platforms in news consumption and the implications of misinformation. However, Assistant 2 provided a more detailed response, discussing specific factors such as personalization, virality, amplification, filter bubbles, confirmation bias, and lack of fact-checking, which contributed to a better understanding of the issue. Therefore, Assistant 2 receives a slightly higher score due to the level of detail in their response.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "P4hakPhF7TKj55mTydH4NT", "question_id": 19, "answer1_id": "3yRq2XXPi83H7Rr5SZS9rE", "answer2_id": "iJrMatLrMdJyyqMx9uJ45a", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1's response was slightly more detailed, offering specific examples and strategies for promoting healthier diets, such as imposing taxes on unhealthy foods and increasing funding for community gardens and farmers' markets. Assistant 2 also provided a good response, with clear examples of how cultural, social, and economic factors influence food choices. However, Assistant 2's response was slightly less detailed in terms of strategies for promoting healthier diets. Both assistants performed well, but Assistant 1's response was more comprehensive and actionable.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "GbEY9PMrmhDNm5XUB3AYmc", "question_id": 20, "answer1_id": "Sw34dAwQPCfGGotwRwhvtv", "answer2_id": "oVEHqDnDTEADZSFfKgFTZd", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed explanations of the process of natural selection and how it contributes to the evolution and adaptation of species. Assistant 1 focused on explaining the essence of natural selection and its role as a driver of evolution and adaptation, while Assistant 2 provided a more structured explanation with numbered steps. Both explanations complement each other and provide a comprehensive understanding of the topic. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "JJ6BtrjfFvmjA9nvyJvNCL", "question_id": 21, "answer1_id": "cZw4Jw8Zyz6ZUy4WDsC6ta", "answer2_id": "WLAj4u59bj2oEXzahF79ek", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. They both included a sample introduction speech for a medieval knight at a royal banquet, emphasizing humility, loyalty, and service to the kingdom. The level of detail in both responses was appropriate and engaging. It is difficult to distinguish one as better than the other, as both responses effectively address the user's question, so they both receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "KNxHKpigcbEiptJeGiXtyd", "question_id": 22, "answer1_id": "nj9y2HTWFGsD5B278ozm73", "answer2_id": "fJPnM2XcRveW2zR4DDaeTb", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided relevant and engaging speeches to motivate a pirate crew to search for hidden treasure. Assistant 1's response was more concise and focused on the thrill of adventure and the rewards that await the crew. Assistant 2, on the other hand, started by clarifying that they do not condone piracy and then provided a speech that emphasized the challenges and the determination needed to succeed. Both speeches were well-crafted and detailed, but Assistant 1's response was slightly more direct and to the point, which is why it received a higher score.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "jsobbVWb4XgXruX5KGSAzP", "question_id": 23, "answer1_id": "Ep9rLcNzEGhG7AgbLTpRtm", "answer2_id": "fY4Xed6veWpcKfj5krZz8m", "text": "9 8\nBoth assistants provided relevant and creative soliloquies in the style of Shakespearean characters declaring their love. Assistant 1's soliloquy was slightly more engaging and used more vivid imagery, which made it stand out. Assistant 2's soliloquy was also well-written and relevant, but it lacked the same level of depth and emotional impact as Assistant 1's response. Both responses were accurate and detailed, but Assistant 1's soliloquy had a slight edge in terms of quality.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "nzQdRQqiGrCEPovYpkfcuq", "question_id": 24, "answer1_id": "oNULT72cYUvit7D9SHb5aM", "answer2_id": "LqFJA2JQbBXP77nkYjtrZK", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the user's question. They each shared their origin stories in a way that would be understandable and engaging for a curious child. Assistant 1 focused on the transformation from an ordinary person to a superhero, emphasizing the importance of courage and helping others. Assistant 2 explained their creation as an advanced artificial intelligence, highlighting their strong moral compass and desire to help others. Both answers were detailed and well-rounded, making it difficult to choose one over the other. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "bxv7oJi28Yny5CpxYtTriN", "question_id": 25, "answer1_id": "TX86xjPKTk2UxWwV4e8zRK", "answer2_id": "WxCKcRstJPXTXnqEzSX3Hg", "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the technological advancements in the year 3000. Assistant 1's response was more concise and organized, with a clear list format and a brief explanation of each advancement. Assistant 2's response was also informative, but it lacked the same level of organization and clarity as Assistant 1's response. Additionally, Assistant 1 mentioned the eradication of diseases like cancer and Alzheimer's and the reversal of the aging process, which are significant advancements that Assistant 2 did not mention. Overall, both assistants provided valuable information, but Assistant 1's response was slightly more effective in conveying the information.", "score": [8.0, 7.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "93SEJYNg3w6FJDC6CCVf3s", "question_id": 26, "answer1_id": "e5YFb6PojDThkcZdSH8kpC", "answer2_id": "dgPJ7DvtFHD7mxtRwzM3vW", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided detailed and engaging descriptions of the winning play in the final seconds of a championship game. They both captured the excitement and tension of the moment, while also highlighting the skill and determination of the players involved. Both responses were relevant, accurate, and helpful in answering the user's question. It is difficult to differentiate between the two in terms of quality, as both responses were well-written and effectively conveyed the thrilling nature of the game's conclusion. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "dhy48WZaBT7h7XUmY4Kt3Y", "question_id": 27, "answer1_id": "NnkbyoNFRHay4toKTFfaeE", "answer2_id": "ADX83sWvjJkewJX6JmYUzQ", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided detailed, relevant, and engaging descriptions of their respective signature dishes. They both showcased their creativity and passion for cooking, as well as their attention to detail in the preparation and presentation of the dishes. Both responses were equally helpful, accurate, and detailed, making it difficult to differentiate between the two in terms of overall performance. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "WmyJQvRV2wheGgHifaTX8o", "question_id": 28, "answer1_id": "Gpb8MuNU3Pt7k93dpRN9WM", "answer2_id": "ihNG3rwsrt95NDhCAFeSDR", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate information about the summit of Mount Everest and the emotions a climber might experience. Assistant 1 gave a good description of the summit and the challenges faced during the climb. Assistant 2, however, provided a more vivid and detailed description of the emotions and the view from the top, making their response slightly more engaging and informative. Both assistants acknowledged their limitations as AI language models, but Assistant 2's response was more aligned with the user's question, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "ZBjXjAUfmo636RD8ftGmoj", "question_id": 29, "answer1_id": "SYvkCCHBUZPd9DQuidZM8K", "answer2_id": "Gmhqf3z4LvVfwPNFJ89BKd", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the user's question. Assistant 1 provided a more structured response, listing the challenges faced by a Mars colonist and then describing the daily life and activities. Assistant 2 also provided a detailed response, focusing more on the daily routine and integrating the challenges faced within that routine. Assistant 1's response was slightly more comprehensive and organized, which is why it receives a higher score. However, both responses were informative and addressed the user's question effectively.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "m96t6EWjwebt3SBbVs8QKi", "question_id": 30, "answer1_id": "NjdsG8tYfrHMT5zGZPavk6", "answer2_id": "gSwkKJCn6qDnNZond2xVJ3", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided engaging and detailed responses to the user's question. They both described the character's survival strategies, allies encountered, and the importance of trust and instincts in a post-apocalyptic world. Both responses were relevant and accurate, with a good level of detail. It is difficult to differentiate between the two responses in terms of quality, as both assistants performed exceptionally well in addressing the user's question.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "RsFZsrSQGvqkU9qRu6MzeE", "question_id": 31, "answer1_id": "8eovAhyvrKJEMWiVdYzByH", "answer2_id": "8RaBeMjxx2bCp2GKWv7YiP", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. They both offered multiple ways to determine if a restaurant is popular among locals or mainly attracts tourists, and they explained why this information might be useful. The level of detail in both responses is sufficient to guide the user in making informed decisions about where to dine. It's difficult to differentiate the quality of the two responses, as they both cover similar points and provide valuable information. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Do5xK3swjiBBXLCSxCZrJv", "question_id": 32, "answer1_id": "nvyaGEveLWBaxgXzriB93d", "answer2_id": "C65PZkmAfFfWRs4bPhyKqg", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was slightly more detailed, with a clear list of seven clues to look for, while Assistant 2 provided six clues. Both assistants covered similar points, but Assistant 1's response was more organized and easier to follow. Assistant 2's response was also helpful and relevant, but slightly less detailed and organized compared to Assistant 1. Overall, both assistants performed well, but Assistant 1 had a slight edge in terms of clarity and organization.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "6coRp7diG94jbQfxFa2NTw", "question_id": 33, "answer1_id": "3xU2t6Yvx9EWpqfqvinNfH", "answer2_id": "4so4HTEjgDZKTqNAgkHHQX", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both covered the main reasons why someone might choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app. The level of detail in both responses was sufficient to address the user's question. Assistant 1 provided a slightly more concise answer, while Assistant 2 elaborated a bit more on each point. However, both answers were of high quality and deserving of equal scores.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "neKDsPNtPp68GyPCK6C7wc", "question_id": 34, "answer1_id": "Mq6hzNziUxzQ2juPMDrv3h", "answer2_id": "FCYaiexEzdoLFPAwvTgDDm", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both mentioned key points such as body language, active listening, and follow-up as indicators of genuine interest in a conversation. Both responses were detailed and well-structured, making it easy for the reader to understand the points being made. It is difficult to differentiate between the two responses in terms of quality, as both assistants provided valuable information and covered the topic thoroughly. Therefore, both Assistant 1 and Assistant 2 receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "fsikYyNM5HZSFuwtez49zW", "question_id": 35, "answer1_id": "KU6BNNN8d6MLHyrA8nV4DB", "answer2_id": "76EPQDh4ZNxBMGqED9LEFi", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed responses to the question. They both listed several reasons why someone might prefer to shop at a small, locally-owned business instead of a large chain store, even if the prices are higher. The reasons provided by both assistants were similar, with some slight variations in wording and the order of the points. Both responses were well-structured and easy to understand, making it difficult to differentiate between the two in terms of quality. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "6U9bZZbDKJLudgPjSVUJ6m", "question_id": 36, "answer1_id": "RpHbPLJamuknRRa3xU5bUF", "answer2_id": "cvBg3gyCyDuyESof3YXhTE", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1's response was slightly more detailed and organized, with a clear list of factors to consider when assessing the credibility of a source. Assistant 2's response also provided valuable information, but it was less structured and had some overlap with Assistant 1's points. Both assistants emphasized the importance of checking the author's credentials, publication reputation, objectivity, and supporting evidence. Assistant 1 also mentioned evaluating the tone and verifying the information using reputable sources, which added value to their response. Overall, both assistants performed well, but Assistant 1's response was slightly more comprehensive and well-organized.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "UUzrSYFRn2RZDBvpse2Bvg", "question_id": 37, "answer1_id": "AFR3AJW4sSPLDLiAUvrL8s", "answer2_id": "kRgfUJ7qqkyZUnLd2fnnaX", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a good general overview of the factors that may influence individual preferences for fear-inducing experiences, while Assistant 2 provided a more detailed list of possible explanations, including brain chemistry, life experiences, personality traits, cultural factors, and learning. Assistant 2's response was slightly more comprehensive and informative, which is why it received a higher score. However, both responses were of high quality and addressed the question effectively.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Pi4zzV2gr6xqvZdLZJEU7k", "question_id": 38, "answer1_id": "esqiBYHa56ygcPU2ux2Pdx", "answer2_id": "J3YuizKcHQ74ydNyCcwgwu", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 provided a more detailed response, offering specific examples of how observing behavior can provide clues about cultural norms and expectations, such as dress codes, social hierarchy, communication styles, and customs and traditions. Assistant 2 also provided a relevant answer, discussing the importance of observing behavior to understand cultural norms, but provided fewer specific examples. Both answers were useful, but Assistant 1's response was more comprehensive and detailed, which is why it received a higher score.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "eADSQUs3wpe48gow6zcWUf", "question_id": 39, "answer1_id": "NmuuKUipqt62QKuEHCuBWh", "answer2_id": "abWLpFojLpNPfDGHpuRSUG", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided thoughtful, nuanced, and detailed answers to the question. They both acknowledged the complexity of the issue and presented arguments for both sides, highlighting the benefits and risks of space exploration and addressing Earth's problems. Both assistants emphasized the importance of personal values and priorities in making a decision on this topic. The quality of the answers is very similar, and both assistants deserve a high score of 9 for their performance.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "PZv2G3p5PXWEXDQNRgzUJL", "question_id": 40, "answer1_id": "3HypDqXt6tHieMDN7hWYCh", "answer2_id": "Ki4fkJvsoSxuQeSoj2AcBG", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both emphasized the importance of striking a balance between job creation and technological progress. Assistant 1 provided examples of methods for promoting job creation, while Assistant 2 went a step further by discussing policies and programs aimed at creating new jobs in industries less susceptible to automation and retraining programs for displaced workers. Assistant 2 also mentioned the benefits of technological progress in various sectors, such as healthcare, transportation, communication, and energy production. This additional information and the more comprehensive approach give Assistant 2 a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "F3bgdb6Q7KPRcx5fkXYSDp", "question_id": 41, "answer1_id": "DmQtupeyNDrQFBccBRAsbD", "answer2_id": "GRQjYgAapNAVqzuZR4KQpf", "text": "7 5\nAssistant 1 provided a more accurate and detailed response, considering the average blink rate, waking hours, and lifespan. The explanation was clear and easy to follow. Assistant 2, on the other hand, made an error in calculating the number of blinks per day and provided a less accurate estimate. Additionally, Assistant 2's response was less organized and harder to follow.", "score": [7.0, 5.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "hAGDMdzkakkM7ZNFj4Se6h", "question_id": 42, "answer1_id": "froHv7kwRMYGWPXDQXk2Gw", "answer2_id": "SB5a2AiK224hB927vkqvYs", "text": "9 6\nAssistant 1 provided a more accurate and detailed response, using the average weight of a grain of salt and the molar mass of NaCl to calculate the number of atoms in a grain of salt. The explanation was clear and easy to follow, and the final result was reasonable. Assistant 2, on the other hand, made an incorrect assumption that the grain of salt is a sphere and used the diameter to calculate the volume. Additionally, the calculation of the concentration of atoms per unit volume was incorrect, leading to an inaccurate final result. While Assistant 2's response contained some useful information, it was less accurate and less helpful than Assistant 1's response.", "score": [9.0, 6.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "SuUVSbhzMDWFbSdwBEn7Si", "question_id": 43, "answer1_id": "ahktv9NqxZ2cYquTXwF42r", "answer2_id": "4XYycBrMiJgYKPof2P6xoe", "text": "9 7\nAssistant 1 provided a more detailed and step-by-step explanation of how the estimated number of lightning strikes per day was calculated, taking into account the number of thunderstorms, the percentage that produce lightning, and the average number of strikes per thunderstorm. The final estimate of 1,972,600 lightning strikes per day was also accompanied by a note that some estimates range from 6 million to 100 million, acknowledging the variability in the data. Assistant 2, on the other hand, gave a more general explanation and provided an estimate of 100,000,000 lightning strikes per day without a clear calculation process. While both answers were relevant and accurate to some extent, Assistant 1's response was more helpful, precise, and detailed, which is why it received a higher score.", "score": [9.0, 7.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "kYsPQzYx3t8JAwKkNgXUZH", "question_id": 44, "answer1_id": "kqqPRaFqb3w9Ky9LGB3yKU", "answer2_id": "fpoXWHHdpv7XNBTS2NRY6d", "text": "8 7\nAssistant 1 provided a more detailed and step-by-step explanation of the calculations needed to estimate the number of balloons required to lift a house like in the movie \"Up.\" They considered the weight of the house, the lifting capacity of a single balloon, and the total lifting capacity needed. They also acknowledged the limitations of the calculation, such as the weight of the balloon material and the need for proper organization to lift the house evenly. Assistant 2, on the other hand, provided a less detailed explanation and used different assumptions for the weight of the house and the lifting power of the balloons. While both assistants concluded that it is not feasible to lift a house with balloons in real life, Assistant 1's response was more helpful, relevant, and accurate due to the detailed calculations and considerations provided.", "score": [8.0, 7.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "dqbgKEeqNCuuTBhg5ovqfJ", "question_id": 45, "answer1_id": "946tQg8kS7GYPSm4qcV6Pt", "answer2_id": "GRXWdnzLLzmeZRU67PMTb7", "text": "8 6\nAssistant 1 provided a more helpful and detailed response by offering an estimate of the number of text messages sent globally in a minute, which was the main focus of the question. The assistant also explained the calculation process and acknowledged that the number is an estimate and may vary due to different factors. Assistant 2, on the other hand, focused on the difficulties of determining the exact number of text messages sent in a minute and did not provide an estimate or a calculation. While Assistant 2's response was relevant and accurate in terms of the challenges in tracking text messages, it did not directly address the user's request for an explanation of the reasoning behind an estimate.", "score": [8.0, 6.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "DeEzTYEsG2B8nubmhaF4qc", "question_id": 46, "answer1_id": "cU3wut3Ta3ySbRHGxfwgjc", "answer2_id": "RTxk2jYsjYiiWMwcRjmfwu", "text": "8 6\nAssistant 1 provided a more detailed and step-by-step explanation of the estimation process, using a reasonable assumption of 2,000 words per person per day and explaining the limitations of the estimate. Assistant 2's estimate of 10,000 to 20,000 words per person per day seems too high, and the final estimate of 100 billion words per day is significantly lower than Assistant 1's estimate. Assistant 1's response is more helpful, relevant, and accurate, while Assistant 2's response is less detailed and less accurate.", "score": [8.0, 6.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "nvDTZFCrXe6NSdcFqYWJDv", "question_id": 47, "answer1_id": "hQP784Ch2yq2b3BaXVBVX3", "answer2_id": "2J2Et6W8u2kXLTUgfYrPxe", "text": "8 7\nBoth Assistant 1 and Assistant 2 provided helpful and relevant information in their responses. Assistant 1 gave a more precise estimation of the number of snowflakes by referring to the average snowfall in the United States and considering the size and shape of snowflakes. Assistant 2, while providing a detailed explanation of the factors affecting snowflake formation, did not provide a clear estimation of the number of snowflakes. Assistant 1's response was more focused on answering the question, while Assistant 2 provided more background information. Both responses were accurate and detailed, but Assistant 1's answer was more relevant to the question, which is why it received a higher score.", "score": [8.0, 7.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "3S8Z26nA4rbyND5nJKqFWo", "question_id": 48, "answer1_id": "a92bStUFdq4LBcv3pa9y3Z", "answer2_id": "nSGoG9Z5g5VuCXgmQJJKrq", "text": "8 7\nAssistant 1 provided a more detailed response, including an estimation of the total number of books ever written and the average number of pages per book. Assistant 1 also acknowledged the limitations of the estimation and the fact that new books are being written and published every day. Assistant 2's response was also relevant and helpful, but it provided a lower estimate for the total number of books ever published and did not mention the limitations of the estimation as clearly as Assistant 1. Both assistants provided reasonable estimates, but Assistant 1's response was more comprehensive and informative.", "score": [8.0, 7.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "d79neqK6LiNCsSu7yiWNZY", "question_id": 49, "answer1_id": "a2QAcAm9wJeP2BpyWQnhot", "answer2_id": "ScqJdUq9n5bp9qPv5WPqG5", "text": "9 7\nAssistant 1 provided a clear and concise explanation, directly addressing the question and using the correct time frame of 3.8 billion years ago for the beginning of life on Earth. The answer was easy to follow and included a reasonable approximation of the number of orbits. Assistant 2, on the other hand, used the age of the Earth (4.5 billion years) instead of the beginning of life (3.8 billion years) for the calculation, which made the answer less accurate and relevant to the question. However, Assistant 2 did provide some additional information about the Earth's orbit, which added some value to the response. Overall, Assistant 1's response was more helpful, relevant, and accurate, while Assistant 2's response was less accurate but still provided some useful information.", "score": [9.0, 7.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "KFEzPSHMSp88iZ4MkBEPQQ", "question_id": 50, "answer1_id": "CrmusnxbTtGXF2varfcUd4", "answer2_id": "GdLFr88pCwsGpfRBRQQkyh", "text": "8 7\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the question, explaining that it is impossible to determine the exact number of songs recorded throughout history. Assistant 1 provided a more detailed response, discussing the oldest known musical instrument, the Library of Congress's collection, and the importance of considering non-Western music. Assistant 2 also mentioned the invention of recording technology and the rise of digital music but provided fewer specific examples and details. Therefore, Assistant 1 receives a score of 8, and Assistant 2 receives a score of 7.", "score": [8.0, 7.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "MaeeytBPffy2qXeTWAgVb3", "question_id": 51, "answer1_id": "J9pZp6z2UUW7YcXgzUouqs", "answer2_id": "Sa7uNEbFGRFuXBAwau8sHS", "text": "8 7\nBoth Assistant 1 and Assistant 2 provided relevant and thoughtful answers to the question. Assistant 1's response was more focused on the potential impact of the internet on the Renaissance period, discussing how it could have affected intellectual exchange, collaboration, and the democratization of knowledge. Assistant 2, on the other hand, emphasized the technological limitations of the era and the differences between the Renaissance and the digital age. While both responses were helpful and accurate, Assistant 1's answer was slightly more detailed and directly addressed the question, which is why it receives a higher score of 8, while Assistant 2 receives a 7.", "score": [8.0, 7.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "7YnF7KE5b7DmdJVd2CWhPg", "question_id": 52, "answer1_id": "67bYUQb6zru8ofiub7uNUi", "answer2_id": "T6Ar3cAEw3AmVVQTcCPp8N", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a good overview of the potential consequences of the Aztecs repelling the Spanish conquistadors, touching on the impact on the Aztec empire, the indigenous people of the Americas, and the potential interest of other European nations. Assistant 2, however, provided a slightly more detailed response, discussing the Aztec civilization's strengths, the potential resistance to European diseases, the possible maintenance of cultural and religious practices, and the potential for alliances with other indigenous groups. Both answers were informative, but Assistant 2's response was more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "bAfAwLcXniXktiqNsvDGFV", "question_id": 53, "answer1_id": "gAisnQTHWFLW8aa5fQPNJf", "answer2_id": "KiZQev5JEk2h6JYeQnFmtM", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided relevant, accurate, and detailed responses to the question. Assistant 1's response was slightly more helpful, as it touched upon the impact of the Black Death on the medical field, hygiene practices, and cultural landscape, which Assistant 2 did not mention. Assistant 2's response was still informative, discussing the potential effects on demography, economy, and society, but it lacked the additional context provided by Assistant 1.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "8vUuWHUHuEn2SUrzRcoV6v", "question_id": 54, "answer1_id": "4ZJCbj7T8BGzNhDqz7NSF4", "answer2_id": "cYiyYKKXM3GXkrZHAbX83S", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on the potential impact of Newton's work on biology and the possible consequences for the field of physics. Assistant 2, on the other hand, provided a more detailed overview of Newton's contributions to various fields and discussed the difficulty in predicting the specific contributions he might have made to biology. Assistant 2's response was more comprehensive and provided a broader context, which is why it received a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "FCJoqPdZYVBmAfS3VjFCkC", "question_id": 55, "answer1_id": "c6ixri3qqLfSBBnwMkgYB7", "answer2_id": "PQmMUdAAcBsAWmWaTvdHSU", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused on the overall impact of the Beatles on music and society, while Assistant 2 went into more detail about the possible outcomes if the Beatles had never formed. Assistant 2's answer was more comprehensive, offering specific scenarios and covering various aspects of the music industry and culture, which is why it received a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "bSZUNocHnjSSsxyUBMSUMu", "question_id": 56, "answer1_id": "c9AtDn7eeSYhtH854MQDDB", "answer2_id": "PorExChQ9VeYsPJptdgtsB", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 provided a slightly more detailed response, mentioning the impact of Turing's work on the development of computer technology and artificial intelligence, which adds value to the answer. Assistant 2, on the other hand, touched upon the possibility of alternative strategies and technologies being developed by the Allies, but did not go into as much detail as Assistant 1. Both assistants acknowledged the difficulty in predicting the exact outcome of the war without Turing's contributions, which is important to consider. Overall, both responses were informative and well-structured, but Assistant 1 provided a slightly more comprehensive answer.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "f3KTRaNot8TePqUPATMhRG", "question_id": 57, "answer1_id": "jYd2gg6MJH8hdqFSAJTaiR", "answer2_id": "249f6dSMwZRZVMmtxv6yDm", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a clear overview of the consequences of not having the Suez Canal, touching on the impact on shipping routes, international trade, and the development of the region. Assistant 2, however, went into more detail about the longer and more treacherous route around the Cape of Good Hope, the impact on international trade, and the historical context of European colonization in Asia. Assistant 2 also mentioned the engineering and technological advancements required for the construction of the canal and its role in international conflicts. While both answers were informative, Assistant 2 provided a more comprehensive response, which is why it received a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "J5EKWhvGBjYM9kSttb7RBp", "question_id": 58, "answer1_id": "nZJ6LGJFegnHetutiAQtFm", "answer2_id": "nxa3m6kiAZwKgcMUBY8KYz", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. They both discussed the potential advancements in various fields such as science, technology, and governance that the Maya civilization could have made if they had not collapsed. Both assistants also mentioned the possible impact on the colonization of the Americas and the influence on neighboring civilizations. The level of detail in both responses is quite similar, and both assistants acknowledged the difficulty in predicting the exact outcomes. Therefore, both Assistant 1 and Assistant 2 receive a score of 9 for their overall performance.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "jEWD6eqwH9jscVgJKDNDUg", "question_id": 59, "answer1_id": "dmEgLyeYNcwBZWHBak6Lap", "answer2_id": "DXFvhjCaKqQiBsFDCkXGMT", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 mentioned the Vikings and the possibility of indigenous peoples from Asia migrating to the Americas, while Assistant 2 focused more on the potential consequences of Columbus not discovering the Americas. Assistant 2's response was slightly more detailed and provided a clearer picture of the potential impact on the world, which is why it received a higher score. However, both responses were informative and addressed the question well.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "RG6quoUjP9RfEhB7SBeQWU", "question_id": 60, "answer1_id": "bkuECkuy7YiDUcj9oJjdrZ", "answer2_id": "BCUK5VcpZK26jMJ2UMgR3E", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 went into more detail about the potential impact on van Gogh's personal life, such as his mental health and financial situation, as well as the broader implications for art history. Assistant 2 also discussed the potential impact on van Gogh's art and personal life but did not delve as deeply into the broader implications for art history. Both assistants provided a well-rounded response, but Assistant 1's answer was slightly more detailed and comprehensive, which is why it received a higher score.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "KKYYAMWpuxxqpPCxYRjr3s", "question_id": 61, "answer1_id": "2PRjT7j3V6487xZREfQfuD", "answer2_id": "JHTKBrLVTp7bv5zW2uwYVW", "text": "8 6\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to read.\n- The function `count_occurrences()` is modular and can be reused.\n- Proper error handling is implemented when opening the file.\n- The code handles case sensitivity by converting the text to lowercase.\n- Comments are clear and concise, explaining the code's logic and functionality.\n\nWeaknesses:\n- The code does not handle punctuation marks and special characters when comparing words.\n\nSuggestions for improvement:\n- To handle punctuation marks and special characters, you can add a function to remove them from the words before comparing them with the search word.\n\nAssistant 2:\nStrengths:\n- The code is simple and easy to understand.\n- The program uses the `find` function to search for the given word in each line.\n\nWeaknesses:\n- The code does not handle case sensitivity.\n- The code does not handle punctuation marks and special characters when comparing words.\n- The code does not count multiple occurrences of the word in the same line.\n- The code lacks comments explaining the logic and functionality.\n- The code is not modular, making it harder to reuse the counting functionality.\n\nSuggestions for improvement:\n- To handle case sensitivity, convert the text and search word to lowercase before comparing.\n- To handle punctuation marks and special characters, remove them from the words before comparing them with the search word.\n- Use a loop to count multiple occurrences of the word in the same line.\n- Add comments to explain the code's logic and functionality.\n- Consider creating a separate function for counting occurrences to improve modularity.", "score": [8.0, 6.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "Be9fbF7prWfkJiLcaRNjLP", "question_id": 62, "answer1_id": "jWyN8NTdVix6CUoqfbRqVx", "answer2_id": "jyzD2deb9JAffRhhkk37r5", "text": "8 4\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to understand.\n- The implementation correctly solves the problem using dynamic programming.\n- The comments are clear and concise, explaining the logic and functionality of the code.\n\nWeaknesses:\n- There are no comments explaining the initialization of the 2D array 'm'.\n- The code could be more efficient by not initializing the 2D array with all zeros and then updating it.\n\nSuggestions for improvement:\n- Add comments to explain the initialization of the 2D array 'm'.\n- Consider initializing the 2D array 'm' with the appropriate values instead of all zeros.\n\nAssistant 2:\nStrengths:\n- The code is structured and readable.\n- The implementation attempts to solve the problem using dynamic programming.\n\nWeaknesses:\n- The code does not correctly implement the given problem statement, as it returns the minimum edit distance instead of the longest common subsequence.\n- The comments are not as clear and concise as they could be, making it harder to understand the code's logic and functionality.\n- The code initializes the 2D array with `float('inf')`, which is unnecessary and inefficient.\n\nSuggestions for improvement:\n- Modify the code to correctly implement the longest common subsequence problem.\n- Improve the comments to better explain the code's logic and functionality.\n- Consider initializing the 2D array with more appropriate values instead of `float('inf')`.", "score": [8.0, 4.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "nD8SHmwjdEoLfodK7i8a9f", "question_id": 63, "answer1_id": "mx9G7gfKTCXCmNRaiMZQr9", "answer2_id": "jPGHEfp7FxhDyvRA6qEN3j", "text": "8 8\n\nBoth Assistant 1 and Assistant 2 provided a working solution for validating email addresses using regular expressions in Python. Their code is accurate, efficient, and easy to understand. Both solutions are very similar, with only minor differences in variable names and print statements.\n\nStrengths:\n- Both solutions correctly implement the given problem statement.\n- The code is accurate and efficient in both cases.\n- Both solutions include clear and concise comments that explain the code's logic and functionality.\n\nWeaknesses:\n- Both solutions use a simple regular expression that may not cover all edge cases for email validation. However, this is acceptable for a basic email validation implementation.\n\nSuggestions for improvement:\n- Both solutions could benefit from a more comprehensive regular expression that covers more edge cases for email validation. However, this is not a requirement for the given problem statement.\n- Assistant 2's solution could be improved by wrapping the email validation code in a function, similar to Assistant 1's solution. This would make the code more modular and reusable.", "score": [8.0, 8.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "H9nS4Gsoe6nK5q2ebuhRk7", "question_id": 64, "answer1_id": "8LjpXk6Va5L2FBEwTU9YJ6", "answer2_id": "Ra9eTcAuNZPkPRnaHiQrak", "text": "8 4\n\nAssistant 1:\nStrengths:\n- The code is simple and easy to understand.\n- The dynamic programming approach is implemented correctly using a list to store Fibonacci numbers.\n- The code is efficient and will work well for large values of n.\n- The example usage is helpful for understanding the function's usage.\n\nWeaknesses:\n- The code could benefit from more comments explaining the logic and functionality.\n\nSuggestions for improvement:\n- Add comments to explain the logic and functionality of the code.\n\nAssistant 2:\nStrengths:\n- The code attempts to use dynamic programming with a memoization table.\n- The base case for n = 0 and n = 1 is handled correctly.\n\nWeaknesses:\n- The code does not actually use the memoization table, making it inefficient for large values of n.\n- The recursive calls to the function do not use the memoization table, leading to redundant calculations.\n- The code structure is more complex than necessary.\n\nSuggestions for improvement:\n- Use the memoization table to store and retrieve previously calculated Fibonacci numbers.\n- Simplify the code structure by removing the unnecessary memoization table initialization and using a list to store Fibonacci numbers, similar to Assistant 1's approach.", "score": [8.0, 4.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "AeohqWsGLDJmqX83xZmNPF", "question_id": 65, "answer1_id": "BLC8fhAUeahFfrjBRiL9tx", "answer2_id": "5mbEFLEnEx8Cm2wEF8WAbM", "text": "9 8.5\n\nBoth Assistant 1 and Assistant 2 provided correct and efficient implementations of the binary search algorithm. Their code is readable and well-structured. However, there are a few differences in their submissions.\n\nAssistant 1:\n- The code is clear and easy to understand.\n- The example usage provided is helpful for understanding how to use the function.\n- The variable names \"left\" and \"right\" are more descriptive than \"start\" and \"end\" used by Assistant 2.\n\nAssistant 2:\n- The code is also clear and easy to understand.\n- The explanation of the algorithm is more detailed, which is helpful for understanding how the algorithm works.\n- The variable names \"start\" and \"end\" are less descriptive than \"left\" and \"right\" used by Assistant 1.\n\nSuggestions for improvement:\n- Assistant 1 could provide a more detailed explanation of the algorithm, similar to Assistant 2.\n- Assistant 2 could use more descriptive variable names, such as \"left\" and \"right\", as used by Assistant 1.\n- Both assistants could add comments within the code to explain the logic and functionality of each step.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "PhJVYMN4t8GbM7hsxJm47V", "question_id": 66, "answer1_id": "NAH245JGvMWF24FWZJagYp", "answer2_id": "KLbBzjbuwq4agAESQ9Wfpq", "text": "8 5\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to read.\n- The implementation is correct and efficient.\n- The comments are helpful and explain the code's logic and functionality.\n- The code adheres to proper coding standards and best practices.\n\nWeaknesses:\n- The code could benefit from a few more comments, especially for the `enqueue` and `dequeue` methods.\n\nSuggestions for improvement:\n- Add more comments to explain the logic behind the `enqueue` and `dequeue` methods.\n\nAssistant 2:\nStrengths:\n- The code is readable and has a clear structure.\n- The `peek` and `is_empty` methods are implemented correctly.\n\nWeaknesses:\n- The implementation of the `enqueue` and `dequeue` methods is incorrect, as it does not use two stacks to implement the queue.\n- The code does not adhere to proper coding standards and best practices, as it uses a mix of Python and C++ syntax.\n- The comments are not as helpful as they could be, and there are no comments for the `peek` and `is_empty` methods.\n\nSuggestions for improvement:\n- Correct the implementation of the `enqueue` and `dequeue` methods to use two stacks.\n- Ensure that the code uses consistent syntax, preferably Python syntax.\n- Add more comments to explain the logic behind the `peek` and `is_empty` methods.", "score": [8.0, 5.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "gJz8gj8qVa2Co9HSotcUUQ", "question_id": 67, "answer1_id": "NKXTwB3WtfFcmQZbTxBP5o", "answer2_id": "ganAwetd9VTzEAiTtsskzX", "text": "7 5\n\nAssistant 1:\nStrengths:\n- The code is easy to read and understand.\n- The function works correctly for the given problem statement.\n- The example provided is helpful for understanding the code's functionality.\n\nWeaknesses:\n- The code uses an extra data structure (the 'common' list) which is against the problem statement's requirement.\n- The nested loops can be inefficient for large arrays.\n\nSuggestions for improvement:\n- To avoid using an extra data structure, you can print the common elements directly instead of appending them to a list.\n- Consider using a more efficient algorithm, such as sorting the arrays and using two pointers to find common elements.\n\nAssistant 2:\nStrengths:\n- The code is easy to read and understand.\n- The example provided is helpful for understanding the code's functionality.\n\nWeaknesses:\n- The code uses an extra data structure (the 'result' list) which is against the problem statement's requirement.\n- The nested loops can be inefficient for large arrays.\n- The code does not handle duplicate elements in the arrays, resulting in duplicate common elements in the output.\n\nSuggestions for improvement:\n- To avoid using an extra data structure, you can print the common elements directly instead of appending them to a list.\n- Consider using a more efficient algorithm, such as sorting the arrays and using two pointers to find common elements.\n- Add a check to ensure that duplicate common elements are not printed.", "score": [7.0, 5.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "HnmQ6NkCSS8sumGzBnwkgo", "question_id": 68, "answer1_id": "5P7wPyrNwBrHNdJNrETS5h", "answer2_id": "LG6szQ3cqNRzUjXtZtXG94", "text": "First, I will solve the problem independently:\n\nf(x) = 5x^3 - 2x + 3\n\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 4 + 3\nf(2) = 39\n\nNow, I will evaluate the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 1\nf(2) = 39\n\nAssistant 2:\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5 * 8 - 2 + 3\nf(2) = 41 - 2 + 3\nf(2) = 40\n\nAssistant 1 has the correct answer and followed the correct steps. Assistant 2 made a mistake in the second step, where they incorrectly calculated 5 * 8 - 2 as 41 instead of 40. This led to an incorrect final answer of 40.\n\nScores:\nAssistant 1: 10\nAssistant 2: 6\n\nAssistant 1 performed better in this case.", "score": [10, 6], "reviewer_id": "gpt-4-0328-math", "metadata": {}}
+{"review_id": "c5XdU8rAprsY8V5j7CGAe2", "question_id": 69, "answer1_id": "UmnL4WoucBduFpX4jptkNU", "answer2_id": "LHMnXGbSZubeZJcr5fgrte", "text": "First, I will solve the problem independently:\n\n3x + 10 = 5(x - 2)\n\n3x + 10 = 5x - 10 (use the distributive property)\n\n-2x = -20 (subtract 3x from both sides)\n\nx = 10 (divide both sides by -2)\n\nTherefore, x = 10.\n\nNow, I will evaluate the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe steps provided by Assistant 1 are correct, and the final answer is x = 10.\n\nAssistant 2:\nThe steps provided by Assistant 2 are incorrect. The assistant made an error in step 2 by subtracting 5x from both sides, which resulted in an incorrect equation. The rest of the steps are also incorrect due to this initial error.\n\nScores:\nAssistant 1: 10\nAssistant 2: 1\n\nAssistant 1 performed better and provided the correct solution to the problem.", "score": [10, 1], "reviewer_id": "gpt-4-0328-math", "metadata": {}}
+{"review_id": "3T6zyEyabtCSmED9WgtDSm", "question_id": 70, "answer1_id": "Hymm673GkY5tXNaTFeNUBZ", "answer2_id": "TGENVWL5iAYi4NCf5jAzKy", "text": "First, I will solve the problem independently:\n\nThe distance formula is given by:\n\ndistance = sqrt((x2 - x1)^2 + (y2 - y1)^2)\n\nPlugging in the coordinates of the given endpoints, we get:\n\ndistance = sqrt((10 - 2)^2 + (4 - (-2))^2)\ndistance = sqrt(8^2 + 6^2)\ndistance = sqrt(64 + 36)\ndistance = sqrt(100)\ndistance = 10\n\nNow, I will examine the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe steps followed by Assistant 1 are correct, and the final answer is also correct.\n\nAssistant 2:\nThe steps followed by Assistant 2 are incorrect. The mistake occurs in the calculation of the differences between the coordinates:\n\nIncorrect: sqrt((10 - 2)^2 + (4 - (-2))^2) = sqrt(18^2 + 4^2)\nCorrect: sqrt((10 - 2)^2 + (4 - (-2))^2) = sqrt(8^2 + 6^2)\n\nThe final answer provided by Assistant 2 is incorrect.\n\nScores:\nAssistant 1: 10\nAssistant 2: 4\n\nAssistant 1 performed better in this case, as they followed the correct steps and provided the correct answer. Assistant 2 made a calculation mistake in the distance formula, leading to an incorrect answer.", "score": [10, 4], "reviewer_id": "gpt-4-0328-math", "metadata": {}}
+{"review_id": "2FNQ97Hnop7zu525REHpPh", "question_id": 71, "answer1_id": "PUzddJ35E3WsM7BDQ9E59i", "answer2_id": "kc9oytarvqyNVVfGZz93EL", "text": "9 9.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's request for a formal email proposing a joint venture. Both responses included appropriate greetings, introductions, and explanations of the proposed partnership. Assistant 2's response was slightly more detailed, including the proposal of establishing a new jointly-owned company and suggesting a meeting to discuss the proposal further. However, both responses were of high quality and would be useful to the user.", "score": [9.0, 9.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "jbBS3mJXgYxGnnjhamALro", "question_id": 72, "answer1_id": "6Q72hZCtDkgq379yhdmN5N", "answer2_id": "NxstpvVEnGMny3jECtvrw2", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate resignation letter samples. They both addressed the user's request to leave on good terms and express gratitude for the opportunities provided. The level of detail in both responses is appropriate, and both letters are well-structured and professionally written. It is difficult to differentiate between the two responses in terms of quality, as both are excellent examples of resignation letters.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "TzDXKHsob9xTKFdvCMvyVn", "question_id": 73, "answer1_id": "ReXnHy9C8SwcYPAep6gvJg", "answer2_id": "XN9NBpeQJiQqGT5UYkys7N", "text": "9 9\n\nBoth Assistant 1 and Assistant 2 provided well-structured, detailed, and relevant formal letters of recommendation for a student applying to a prestigious graduate program in computer science. Both letters included essential elements such as the sender's and recipient's information, a clear introduction, a thorough explanation of the student's academic achievements, and a strong conclusion. Both letters were accurate, relevant, and helpful in addressing the user's request. Therefore, I believe both assistants deserve a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "X2tMhW8rQLckXYViCjz5ZM", "question_id": 74, "answer1_id": "cKk5zZe8yYY4JH3kr5pGXG", "answer2_id": "3qMqAZcfHUVUTEJnMXT8Pw", "text": "8 9\nBoth assistants provided relevant and detailed responses to the user's request. Assistant 1's email was well-structured and included benefits and features of the software solution, as well as a special introductory offer. However, Assistant 2's response was slightly more precise, as it mentioned the target audience and market, and provided a clearer list of key features. Additionally, Assistant 2's email was more personalized by mentioning the company name and offering a demo. Both emails were helpful and accurate, but Assistant 2's response was more comprehensive and tailored to the user's request.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "EcpwAYWnZD8YF8kvKNAwBy", "question_id": 75, "answer1_id": "c5rwA3cPjytSGcn7H8dZ6Q", "answer2_id": "hMXjd228Mc8pnc7zgd2frG", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. Assistant 1's response was slightly more detailed, including information about the cause of the delay, the steps taken to resolve the issue, and measures to prevent future delays. Assistant 2's response was also well-written and addressed the issue, but it lacked the specific details about the cause and resolution that Assistant 1 provided. Overall, both responses were of high quality, but Assistant 1's response was more comprehensive and informative.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "LQhfYPFfLcwFPAb5KCJzmS", "question_id": 76, "answer1_id": "XZGPtBo86KfF9REZ36s2X5", "answer2_id": "gUEd9nNepqy8nsfbypscHx", "text": "9 8\nAssistant 1 provided a well-structured script for a YouTube video, including visual cues and narration, which made it more engaging and informative. The response covered the origins of jazz, its evolution, and its cultural significance, as well as mentioning jazz festivals and clubs. Assistant 2 also provided a good response, discussing the history and cultural significance of jazz, its influence on other genres, and its role in the civil rights movement. However, Assistant 2's response was less engaging and lacked the visual cues and structure of a video script, which is why Assistant 1 received a higher score. Both responses were relevant, accurate, and detailed, but Assistant 1's response was better suited for a YouTube video script.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "M8dSLebM2csXbgXd5r59X2", "question_id": 77, "answer1_id": "DRncHCsdGji756efDhacUT", "answer2_id": "nZZyn3SAvU8T2BVR73LMHC", "text": "9 9\nBoth Assistant 1 and Assistant 2 provided engaging and well-written travel blog posts about a recent trip to Hawaii. They both highlighted cultural experiences, such as attending a luau and watching hula performances, and must-see attractions like Waikiki Beach and Haleakala National Park. The level of detail and accuracy in both responses was excellent, and both assistants effectively conveyed the excitement and beauty of a trip to Hawaii. It is difficult to choose one over the other, as both responses were equally helpful, relevant, and engaging. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "APtBcAMY9tVW7nyE4g2ZAA", "question_id": 78, "answer1_id": "Y5rCQHHDA6WNfhRcB6QboG", "answer2_id": "YM4qjUM5VfmWnzM3HXRB4G", "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided captivating movie reviews for a recently released science fiction film. Assistant 1's response was slightly more detailed and engaging, with a more in-depth discussion of the plot, characters, and special effects. The review also mentioned the character development and growth throughout the film. Assistant 2's response was also well-written and engaging, but it was slightly less detailed in comparison. Both reviews were relevant, accurate, and helpful, but Assistant 1's response had a slight edge in terms of depth and detail.", "score": [9.0, 8.5], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Z5Rhru7as3Yhk8ZXjzfPe4", "question_id": 79, "answer1_id": "Lea4wh5n6GsEkBnKsntN64", "answer2_id": "WcbzNkzu5q66QkK92NxmGB", "text": "9 8\nAssistant 1 provided a well-structured podcast script with clear sections, covering the music industry before streaming, the rise of streaming platforms, their impact on artists and the industry, and the future of music streaming. Assistant 2 also provided a good podcast script, but it was less detailed and structured compared to Assistant 1. Assistant 1's response was more comprehensive and organized, making it easier to follow and understand the flow of the podcast.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "kw3p4X9zBGtVDfPDRoJwRE", "question_id": 80, "answer1_id": "gdLxzcypTeuD6ToC6HWnXh", "answer2_id": "Tfm4f2mbpKJsa8MT6exs8D", "text": "9 9\n\nBoth Assistant 1 and Assistant 2 provided well-written and detailed symphony concert reviews that discussed the orchestra's performance and the overall audience experience. Both reviews were engaging and captured the essence of a symphony concert, making it difficult to differentiate between the two in terms of helpfulness, relevance, accuracy, and level of detail. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
diff --git a/model/fastchat/eval/table/review/vicuna-13b_20230322-clean-lang/review_llama-13b_vicuna-13b.jsonl b/model/fastchat/eval/table/review/vicuna-13b_20230322-clean-lang/review_llama-13b_vicuna-13b.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..fc29a0b6b7d828b1b243efedb17b89ea02e2c602
--- /dev/null
+++ b/model/fastchat/eval/table/review/vicuna-13b_20230322-clean-lang/review_llama-13b_vicuna-13b.jsonl
@@ -0,0 +1,80 @@
+{"review_id": "WFp5i5yjjFethrgugKTDmX", "question_id": 1, "answer1_id": "J3UA6eGXGyFeUGqGpP3g34", "answer2_id": "cV4zXygaNP6CXEsgdHMEqz", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about improving time management skills. Assistant 1 offered a more detailed list of strategies, with 16 points, while Assistant 2 provided a slightly shorter list with 7 points. Both assistants covered similar topics, but Assistant 1 included additional suggestions such as managing stress, getting enough sleep, and striking a balance. Assistant 2's answer was well-structured and concise, but Assistant 1's response was more comprehensive, which is why Assistant 1 receives a 9 and Assistant 2 receives an 8.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "VFr7STcg4Yss7qHrgKZtJ2", "question_id": 2, "answer1_id": "jZVDNTtFnNNd6vgkGjzAGa", "answer2_id": "3zpPUeoVsPWXtKMV7cAhZ6", "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about effective ways to deal with stress. Assistant 1 provided a more detailed response with 10 suggestions, while Assistant 2 provided 8 suggestions. Both assistants covered similar points, such as exercise, relaxation techniques, social support, and seeking professional help. Assistant 1, however, included additional suggestions like improving time management and developing a support network, which added more value to their response. Therefore, Assistant 1 receives a 9, and Assistant 2 receives an 8.", "score": [9.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "4cn29wYPaXYEyypZC5UBFh", "question_id": 3, "answer1_id": "NbSxRXf8Yuvjr7HC6AvrPw", "answer2_id": "6xpiZJE4vxxGqjRogPfBk7", "text": "4 8\nAssistant 1's response is repetitive and contains several incorrect statements, making it less helpful and accurate. Assistant 2's response is more concise, accurate, and relevant, providing a clear comparison between Python and JavaScript, which makes it more helpful and informative.", "score": [4.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "QfbSxZhq2a3unPfDqPXWzm", "question_id": 4, "answer1_id": "E9zrPDdncYvAebEaxiPXfx", "answer2_id": "abitTVu2Dz8rdMxyThvFyJ", "text": "8 9\nBoth assistants provided helpful and relevant tips for increasing productivity while working from home. Assistant 1's response was well-organized and concise, covering important aspects such as setting up a workspace, blocking distractions, taking breaks, avoiding multitasking, having an organized schedule, and setting goals. Assistant 2's response was more detailed and covered additional points such as getting dressed for work, scheduling meetings, and staying connected with colleagues and loved ones. While both responses were accurate and useful, Assistant 2's answer was more comprehensive and provided a slightly better level of detail, which is why it received a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "ZhokYAotAEkh6QQMkjJeDi", "question_id": 5, "answer1_id": "36TYpADDHAfRvtvKitfSXR", "answer2_id": "UMZod8JaWia9KB2EVXdYrF", "text": "7 9\nAssistant 1 provided a brief and simple explanation of quantum computing, mentioning the use of qubits and the advantage of quantum computers in solving certain problems more efficiently. However, the response lacked depth and details. Assistant 2, on the other hand, provided a more comprehensive explanation, covering the basics of quantum computing, the use of qubits, and the different approaches to building quantum computers. The response also mentioned potential applications of quantum computing, making it more informative and helpful overall.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "RWRpNF3RZiurzyAKHRggx4", "question_id": 6, "answer1_id": "8vtLLYRM352mZ8PXXbwzBT", "answer2_id": "WXRCqF34f4UUJzd3xbgAqT", "text": "8 10\nAssistant 1 provided a brief and accurate response, but it lacked details and only mentioned the difference in amino acid profiles. Assistant 2, on the other hand, provided a more comprehensive answer, discussing nutritional value, absorption, sustainability, health effects, ethical considerations, and cost. This made Assistant 2's response more helpful and informative, earning it a higher score.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "ngDhYdQ3bPkbvctvpoJoxJ", "question_id": 7, "answer1_id": "mrCniyWYvAtxWhgMdXq2BG", "answer2_id": "JREJbG5ZKXCNMoWoiJQxbC", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant information on how to develop critical thinking skills. Assistant 1 offered four suggestions, while Assistant 2 provided a more detailed list of ten tips. Both answers were accurate and relevant to the question. However, Assistant 2's response was more comprehensive and provided a wider range of strategies, which is why it receives a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "efxZ8mmetiunsqv5AVCcKD", "question_id": 8, "answer1_id": "S8fmdbvpvbgTUyDU9V3M3N", "answer2_id": "mmVwmX6TGJ2Y72gCNac4EQ", "text": "7 9\nAssistant 1 provided a relevant and accurate response, but it was limited in detail and only focused on one major challenge, which is the lack of skilled teachers. On the other hand, Assistant 2 provided a more comprehensive and detailed answer, covering a wide range of challenges faced by the education sector today, such as access and affordability, quality and standards, technology integration, curriculum development, teacher training and development, accountability and assessment, equity and inclusion, and sustainability. This makes Assistant 2's response more helpful and informative for the user.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "PmxhqvF7E6Z6YmQBHW8SB6", "question_id": 9, "answer1_id": "KmuNjvNKRyaFwaBZTLArcG", "answer2_id": "DMTZyzd4rRAFV43xtBJ9ns", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 listed eight factors influencing consumer behavior, while Assistant 2 listed six factors. Both answers covered psychological, social, economic, and marketing factors. However, Assistant 2's response was more concise and better organized, making it easier to understand. Assistant 1's answer had some redundancy, such as mentioning reference groups and family twice in different categories. Overall, Assistant 2 provided a slightly better response due to its clarity and organization.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "9iqa3D5izqdMoruHKiihWk", "question_id": 10, "answer1_id": "HNNT9k5htiMeRwNSD6dcz4", "answer2_id": "dETAsj4xHnUCSTkZezz8aM", "text": "7 9\nAssistant 1 provided a list of conflict resolution strategies but did not elaborate on them, making the answer less detailed and informative. Assistant 2, on the other hand, provided a more comprehensive list of strategies with clear explanations, making it more helpful, relevant, and accurate. Therefore, Assistant 2 receives a higher score.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "7e22Cc8mBiHN9uszBKvh8A", "question_id": 11, "answer1_id": "ChXjhDDikxU9FV3CADs6Ym", "answer2_id": "C8ksZxg3LshMUWiLxPanbt", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant information regarding the implications of using single-use plastic bottles versus reusable bottles on the environment and human health. Assistant 1 focused on the environmental problems caused by single-use plastic bottles and briefly mentioned the health risks associated with the chemicals used in their manufacturing. Assistant 2 provided a more comprehensive response, covering both environmental and health impacts in greater detail, and also mentioned the benefits of using reusable bottles. Assistant 2's response was more structured and provided a clearer overview of the topic, which is why it received a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "oJVZyBDNFhHuRPkUqf3qEC", "question_id": 12, "answer1_id": "5wsPnN3VmmSkahgugFNo7u", "answer2_id": "NeHhRc5P5uAU8eWSJBRkhG", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant information in their responses. Assistant 1 focused on the needs of people with disabilities, mentioning accessible stops, routes, vehicles, ramps, elevators, signage, facilities, and information. However, Assistant 2 provided a more comprehensive response, addressing not only accessibility for people with disabilities but also considering seniors and those without personal vehicles. Assistant 2 also mentioned route design, scheduling, customer service, information and communication, safety, and affordability, which makes their response more detailed and well-rounded. Therefore, Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "dKQtFqgZw9Uk7GsD9oCpJF", "question_id": 13, "answer1_id": "NRGZGnU2sPN3ShMe9C3fMn", "answer2_id": "KAJ7UVwu8oCKyxZj9j82pm", "text": "7 9\nAssistant 1 provided a response that focused solely on fiscal policy and did not address monetary policy, which is an important aspect of the question. The response also incorrectly categorized interest rate changes as fiscal policy, when they are actually part of monetary policy. Assistant 2 provided a more comprehensive answer, covering both fiscal and monetary policies and their respective examples. Assistant 2 also mentioned the potential consequences of using these policies, which adds to the quality of the answer. Therefore, Assistant 2 receives a higher score for a more accurate and complete response.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "nMw34PnYdGMNbLEiNwJ8mb", "question_id": 14, "answer1_id": "inKimHkWsXShQBTRmxr5Yg", "answer2_id": "NnWfaeRe8PmitgmV4u5fY8", "text": "8 10\nAssistant 1 provided a relevant and accurate response, but it was quite brief and lacked specific examples. Assistant 2, on the other hand, offered a more detailed and comprehensive answer, including specific ways in which language and cultural barriers can affect communication and relationships in multicultural societies. This made Assistant 2's response more helpful and informative, earning it a higher score.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "iDS9f8GfKKavmsTwE4qMLp", "question_id": 15, "answer1_id": "H8aKtWwf8m6Lgxc2YyR2yf", "answer2_id": "WiGpqKRRwWonwgU95piNNc", "text": "7 9\nAssistant 1 provided a list of applications of AI in healthcare, but did not specifically describe a scenario as requested. The answer was relevant and accurate, but not as detailed as it could have been. Assistant 2, on the other hand, provided a more comprehensive response, describing a scenario where AI is used to analyze patient data, automate routine tasks, monitor patients remotely, develop personalized treatment plans, and analyze medical research. The answer was more detailed, relevant, and accurate, making it a better response to the question.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "7QUNx3aR6g6LHSuNoEmcbn", "question_id": 16, "answer1_id": "PafVwxMsjSkYUETiVBgxTU", "answer2_id": "iangiZeex5ndxAXnWMrxBW", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the CRISPR-Cas9 gene editing technology, its potential applications, and ethical implications. Assistant 1 gave a brief overview of the process, while Assistant 2 provided a more detailed explanation, including the two repair mechanisms, HDR and NHR. Assistant 2 also discussed a wider range of potential applications and ethical concerns, making their response more comprehensive and informative. Therefore, Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "gTqC8kz9Kbnz2LkELTzSHx", "question_id": 17, "answer1_id": "dmDUAfTP4aERJqqSeDBybu", "answer2_id": "XnMRLphzYQX4QRNht7tbui", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about vaccinations and herd immunity. Assistant 1 gave a brief explanation of how vaccinations work and what herd immunity is, which is useful for a quick understanding. However, Assistant 2 provided a more detailed response, explaining the concept of herd immunity in greater depth, including the critical threshold needed for herd immunity to be effective. Assistant 2 also emphasized the importance of vaccinations for both individuals and the community, which adds value to the response. Overall, Assistant 2's answer was more comprehensive and informative, earning a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "bdRgXZzZVxLiweFqFnJipG", "question_id": 18, "answer1_id": "8KGSSqbLqVdSZMEN9oCv5R", "answer2_id": "HZc37bwy646mRzbqSsDAob", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the influence of social media platforms on news consumption and the spread of misinformation. Assistant 1 focused on the speed of news access, the sharing of news within communities, the incentive for sensationalist content, and the sharing of controversial news. Assistant 2, on the other hand, provided a more detailed response, discussing personalization, virality, amplification, filter bubbles, confirmation bias, and the lack of fact-checking. Assistant 2's response was more comprehensive and covered a wider range of factors, which is why it received a slightly higher score. Both assistants, however, provided valuable insights into the topic.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "AAvjdtgEkKA35V7AWttJXh", "question_id": 19, "answer1_id": "HbnJXJpPfaM2iX3ek4Epvy", "answer2_id": "iJrMatLrMdJyyqMx9uJ45a", "text": "6 9\nAssistant 1's response started by listing a large number of interventions but failed to address the main question about how cultural, social, and economic factors influence food choices. Assistant 2, on the other hand, provided a more relevant and detailed response, discussing the impact of cultural, social, and economic factors on food choices and how this knowledge can be used to promote healthier diets. Assistant 2's response was more helpful, accurate, and relevant to the question, which is why it received a higher score.", "score": [6.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "8h5XnzGfNZSfqcnRBb5ioB", "question_id": 20, "answer1_id": "mx8Abfz5PtDcn6jgCA8zhM", "answer2_id": "oVEHqDnDTEADZSFfKgFTZd", "text": "6 9\nAssistant 1's response started off well by explaining the basic concept of natural selection but then veered off into discussing population size and genetic drift, which are not directly related to the process of natural selection. The response was not as focused and detailed as it should have been. Assistant 2 provided a much clearer and more comprehensive explanation of natural selection, detailing the steps involved in the process and how it contributes to the evolution and adaptation of species. The response was relevant, accurate, and detailed, making it more helpful overall.", "score": [6.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "CAksX9yGXbLd2gHv3Pg97a", "question_id": 21, "answer1_id": "NuS9PUGkJG2pHscArvfyeF", "answer2_id": "WLAj4u59bj2oEXzahF79ek", "text": "7 9\nAssistant 1 provided a brief and relevant response, but it lacked the detail and formality expected in a medieval knight's introduction. Assistant 2, on the other hand, offered a more comprehensive and well-structured response, including a respectful speech that showcased the knight's humility, loyalty, and dedication to the kingdom. This response was more fitting for the context of a royal banquet and demonstrated a better understanding of the medieval knight's role and etiquette.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "SKHF8XUbwstcDNeHXoGHFk", "question_id": 22, "answer1_id": "SPjzirzbzo3UJ8BHXSgY9y", "answer2_id": "fJPnM2XcRveW2zR4DDaeTb", "text": "8 9\nAssistant 1 provided a relevant and helpful response, suggesting that the crew would be motivated by the promise of treasure and the potential to become the best pirate ship in the world. However, the answer could have been more detailed and engaging. Assistant 2, on the other hand, provided a more immersive and detailed response, with a well-crafted motivational speech that touched on the crew's sense of adventure, determination, and camaraderie. The speech was engaging and relevant to the question, and the disclaimer at the beginning was a responsible touch.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "oKLigaxgtBdQkCUrYuNwNt", "question_id": 23, "answer1_id": "JkeeC4MZVwybPU8DSbusnT", "answer2_id": "fY4Xed6veWpcKfj5krZz8m", "text": "8 10\nAssistant 1 provided a relevant and accurate explanation of a soliloquy and gave an example from Romeo and Juliet, but did not create a new soliloquy as requested. Assistant 2, on the other hand, understood the request and crafted an original soliloquy in a Shakespearean style, demonstrating creativity and relevance to the question. Both assistants were helpful and provided accurate information, but Assistant 2 went above and beyond in addressing the user's request, which is why it receives a higher score.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "emz8g7aZzAyeWf6GJmVcbQ", "question_id": 24, "answer1_id": "UcfKSXuzBPkZzLHsH4EwQz", "answer2_id": "LqFJA2JQbBXP77nkYjtrZK", "text": "4 9\nAssistant 1's response was not relevant to the question, as it focused on the AI's own origin story rather than providing an explanation of a superhero's origin story for a curious child. Assistant 2, on the other hand, provided a relevant and engaging response that explained the origin story of a superhero in a manner that a child could understand. The response was detailed, accurate, and demonstrated empathy and compassion, which are important qualities for a superhero.", "score": [4.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "CY8hhNosMQqsLCJi8PwE8J", "question_id": 25, "answer1_id": "cEGdo69FwL2Y3teYkMk9Gp", "answer2_id": "WxCKcRstJPXTXnqEzSX3Hg", "text": "8 9\nBoth assistants provided helpful, relevant, and accurate information about the technological advancements from the year 3000. Assistant 1 focused on five advancements, while Assistant 2 provided a more detailed response with six advancements. Assistant 2's response was more comprehensive, covering a wider range of technologies and providing more context for each advancement. This is why Assistant 2 received a slightly higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "idohdDHu3UbYHVem9vpHzG", "question_id": 26, "answer1_id": "W28UtVDZTcWwa8TmeruRn4", "answer2_id": "dgPJ7DvtFHD7mxtRwzM3vW", "text": "6 9\nAssistant 1's response was confusing and contained several inconsistencies, making it difficult to understand the winning play. Assistant 2, on the other hand, provided a clear and engaging description of the winning play, capturing the excitement and drama of the final moments of a championship game. Assistant 2's response was more helpful, relevant, accurate, and detailed, earning a higher score.", "score": [6.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "BDya8NbCTo9jFshoiP5yuF", "question_id": 27, "answer1_id": "j65UyAfRdAcWBAEinXJzBK", "answer2_id": "ADX83sWvjJkewJX6JmYUzQ", "text": "7 10\nAssistant 1 provided a brief and general description of a chicken fried rice dish, but it lacked the depth and detail expected from a world-famous chef describing their signature dish. Assistant 2, on the other hand, delivered a more engaging, detailed, and eloquent response that showcased the chef's passion, creativity, and technique. The description of flavors, textures, and presentation in Assistant 2's response was more immersive and convincing, making it a better overall performance.", "score": [7.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "FmKtMUW7zJovkwFfoqbD5T", "question_id": 28, "answer1_id": "VWuywPvFPK42ZxmHpmQ27y", "answer2_id": "ihNG3rwsrt95NDhCAFeSDR", "text": "6 9\nAssistant 1's response was somewhat helpful but lacked accuracy and detail in describing the view from the summit of Mount Everest. The description of green trees and bushes is not accurate, as the summit is covered in snow and ice. Assistant 2 provided a more accurate and detailed response, describing the emotions a climber might feel and the breathtaking view from the summit. Additionally, Assistant 2 mentioned the importance of proper training, experience, and equipment, which adds value to the response.", "score": [6.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "TeDK6wCoeXdgfQt8y4GSv4", "question_id": 29, "answer1_id": "AUWSLTAvGV5sP9qLsVyzjT", "answer2_id": "Gmhqf3z4LvVfwPNFJ89BKd", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and detailed responses to the user's question. Assistant 1 focused more on the environmental challenges of living on Mars, such as temperature, gravity, and the thin atmosphere. They also discussed the challenges of isolation, boredom, and stress. Assistant 2 provided a more structured response, outlining a daily routine and addressing the challenges faced in each aspect of the day. Both assistants mentioned the importance of food and communication with Earth. Assistant 2's response was slightly more helpful as it provided a clearer picture of daily life on Mars and addressed the mental health aspect of living on another planet.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "4PncefRDDBxughK6btkF8X", "question_id": 30, "answer1_id": "GZtEdKLxmUUbenH4XRyLkE", "answer2_id": "gSwkKJCn6qDnNZond2xVJ3", "text": "4 9\nAssistant 1's response was not helpful or relevant to the question, as it did not provide any description of a character in a post-apocalyptic world, their survival, or the allies they encounter. Instead, it provided instructions on how to answer the question, which was not what the user asked for. Assistant 2, on the other hand, provided a detailed and relevant response that described a character's survival in a post-apocalyptic world, their alliances, and their encounters with other survivors. The response was engaging and painted a vivid picture of the character's experiences, making it a much better answer to the user's question.", "score": [4.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "XyNjnkLJfqYKP6oa98qJ3t", "question_id": 31, "answer1_id": "kba2Xpb75rUfnskZSzrBas", "answer2_id": "8RaBeMjxx2bCp2GKWv7YiP", "text": "6 9\nAssistant 1's response was less detailed and less accurate, as it only mentioned the crowd as an indicator of a restaurant's popularity among locals or tourists. Assistant 2, on the other hand, provided a more comprehensive list of factors to consider, such as crowd composition, reservations, parking, pricing, and menu offerings. Additionally, Assistant 2 explained the usefulness of this information in terms of personal preferences, time of year, recommendations, and price range. Overall, Assistant 2's response was more helpful, relevant, accurate, and detailed than Assistant 1's response.", "score": [6.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "TsVnQN38oyCHbFPm54rGsq", "question_id": 32, "answer1_id": "RCaptsMBYXseVJgjJyyvUh", "answer2_id": "C65PZkmAfFfWRs4bPhyKqg", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 listed six subtle clues, while Assistant 2 provided six as well but with more detailed explanations. Assistant 2's answer also included more diverse clues, such as interrupting or talking over others and aggressively defending their position. Assistant 1's answer had some redundancy, such as points 3 and 6, which could have been combined. Overall, Assistant 2's response was more comprehensive and detailed, which is why it receives a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "RWooMSFE499SFJvkcdhvtG", "question_id": 33, "answer1_id": "2CeidEr7Cjj6YgcuaiPgnN", "answer2_id": "4so4HTEjgDZKTqNAgkHHQX", "text": "7 9\nAssistant 1 provided a relevant answer, but it was not as detailed and comprehensive as Assistant 2's response. Assistant 2 listed several reasons why someone might choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app, covering aspects such as preference, planning, reliability, safety, and privacy. This made Assistant 2's answer more informative and helpful compared to Assistant 1's response, which only mentioned the general idea of seeing the big picture.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "94Qr8aVXJeB5mG2o9reTVa", "question_id": 34, "answer1_id": "kpStLLNWNkGHyRFdKsmDds", "answer2_id": "FCYaiexEzdoLFPAwvTgDDm", "text": "8 10\nAssistant 1 provided a relevant answer, but it was less detailed and focused mainly on the person's actions and tone of voice. Assistant 2, on the other hand, provided a more comprehensive response, listing five specific clues to look for when determining if someone is genuinely interested in a conversation or simply being polite. This answer was more helpful, accurate, and detailed, which is why Assistant 2 receives a higher score.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "bf6Gu6BoDbhVVYH7BerRFC", "question_id": 35, "answer1_id": "PFtMQp4X4MrMYkEa8c8CKG", "answer2_id": "76EPQDh4ZNxBMGqED9LEFi", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused on supporting the local community, flexibility, and eco-friendliness, while Assistant 2 provided a more detailed list of reasons, including supporting the local community, personal connection, unique products, sustainability, customer service, and prestige. Assistant 2's answer was more comprehensive and covered a wider range of reasons, which is why it received a slightly higher score. However, both answers were accurate and relevant to the question.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "jFqudZJFnj8DsYtWepyr8w", "question_id": 36, "answer1_id": "4JwjhR5QVsdYgQupSZrfUp", "answer2_id": "cvBg3gyCyDuyESof3YXhTE", "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant information on assessing the credibility of a source of information. Assistant 1 focused on checking the publisher's reliability, the author's credentials and background, and the credibility of the sources cited in the article. Assistant 2 provided a more detailed and structured response, listing seven specific steps to assess credibility, including checking the author's credentials, publication's reputation, objectivity, supporting evidence, date of publication, corroboration, and maintaining skepticism. Assistant 2's response is more comprehensive and actionable, which is why it receives a higher score.", "score": [8.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "WVQdm2vKM58JfqENPtDoZV", "question_id": 37, "answer1_id": "ednPMy4dvW9CgKg2PJsBqW", "answer2_id": "kRgfUJ7qqkyZUnLd2fnnaX", "text": "7 9\nAssistant 1 provided a brief and general answer, mentioning that people enjoy being scared because it can be exciting and exhilarating, while others avoid it because it can be unpleasant and frightening. However, Assistant 2 provided a more detailed and comprehensive response, discussing various factors such as brain chemistry, life experiences, personality traits, cultural factors, and learning. Assistant 2's answer is more helpful and informative, which is why it receives a higher score.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "7iXRhpSdjNTnDCokDUiQkv", "question_id": 38, "answer1_id": "ehPApSqCniyGN7hd332ToW", "answer2_id": "J3YuizKcHQ74ydNyCcwgwu", "text": "7 9\nAssistant 1 provided a relevant and helpful response, but it was quite brief and only offered a single example. Assistant 2, on the other hand, provided a more detailed and comprehensive answer, explaining how culture is learned and transmitted through social interactions, and offering examples of different cultural norms. Assistant 2 also mentioned the importance of observing reactions and responses to different situations, which adds more depth to the answer. Therefore, Assistant 2 receives a higher score for providing a more thorough and informative response.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "AqhznsybzBuLPTHNsjY8Aw", "question_id": 39, "answer1_id": "bLvdcqSwgSCrUPZ8eQLLqb", "answer2_id": "abWLpFojLpNPfDGHpuRSUG", "text": "8 10\nAssistant 1 provided a brief but relevant response, touching on some benefits of space exploration. However, the answer lacked depth and did not address the moral obligation aspect of the question. Assistant 2, on the other hand, provided a more comprehensive and balanced response, discussing both sides of the argument and offering points to consider when forming an opinion. The answer also acknowledged the complexity and nuance of the issue, making it more helpful and informative.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "RyhZiSzP5iu3JZQZ6kp5GZ", "question_id": 40, "answer1_id": "8tx5GEHjSRKZRaZwHvLoM8", "answer2_id": "Ki4fkJvsoSxuQeSoj2AcBG", "text": "7 9\nAssistant 1 provided a relevant and accurate response, but it lacked the level of detail that Assistant 2 provided. Assistant 1 mentioned the importance of balancing job creation and technological progress but did not provide specific examples or suggestions on how to achieve this balance. Assistant 2, on the other hand, provided a more comprehensive answer, discussing the potential benefits of technological progress and the importance of addressing the social and economic impacts of automation. Assistant 2 also suggested policies and programs that could help strike a balance between job creation and technological progress. Overall, Assistant 2's response was more helpful and detailed, which is why it received a higher score.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "YCoUoinB2iMTNbyyURn8b6", "question_id": 41, "answer1_id": "J6TzFQAyiHs6Arijqwxtrm", "answer2_id": "GRQjYgAapNAVqzuZR4KQpf", "text": "7 8\nAssistant 1 provided a good estimate of the number of blinks in a lifetime but made an error in the calculation. The correct calculation should be 20,000 blinks/day * 29,415 days = 588,300,000 blinks (for the lower estimate) and 30,000 blinks/day * 29,415 days = 882,450,000 blinks (for the higher estimate). Assistant 2 provided a more detailed step-by-step explanation, considering factors like sleep and varying blink rates. However, Assistant 2's calculation of 4,996,800 blinks in a lifetime seems to be incorrect, as it should be 20,000 blinks/day * 29,200 days = 584,000,000 blinks (assuming 80 years of age). Both assistants provided helpful information, but Assistant 2's response was more detailed and considered more factors, which is why it received a higher score.", "score": [7.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Xe2MUtxdQ47DQKstUXbbV7", "question_id": 42, "answer1_id": "f7xAtd3wwc4vP7WSnHffAN", "answer2_id": "SB5a2AiK224hB927vkqvYs", "text": "6 9\nAssistant 1 provided an answer with a wide range of atoms (10^22 to 10^23) without explaining the reasoning behind the numbers. Assistant 2, on the other hand, gave a detailed explanation of the calculation process, including the assumptions made, the formulas used, and the intermediate steps. Assistant 2's response was more helpful, relevant, accurate, and detailed, which is why it receives a higher score.", "score": [6.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "RdK3FHY7HRvju6zxNdX97d", "question_id": 43, "answer1_id": "eRMyp4AbfgBqtbVZzHupFN", "answer2_id": "4XYycBrMiJgYKPof2P6xoe", "text": "7 8\nAssistant 1 provided an answer with specific numbers, but the numbers given were incorrect, stating 50 million strikes per day instead of the correct estimate of 100 million. Assistant 2, on the other hand, provided a more accurate estimate of 100 million lightning strikes per day and also explained the factors that contribute to the number of lightning strikes, such as electrical charge, weather conditions, and location. Assistant 2's response was more detailed and accurate, which is why it received a higher score.", "score": [7.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "7wcEec9TX2aNeG8qC53m2V", "question_id": 44, "answer1_id": "kjKb6YG86FfAh22qCePV4V", "answer2_id": "fpoXWHHdpv7XNBTS2NRY6d", "text": "6 8\nAssistant 1's response had some errors in calculations and assumptions, which led to an incorrect conclusion. The response assumed that 12L of helium weighs 2 pounds, which is incorrect. Assistant 2's response was more accurate and detailed, explaining that lifting a house with balloons is not feasible in real life and providing a hypothetical calculation based on the movie's technology. Although the lifting power of 100 pounds per balloon was an assumption, the response was more relevant and helpful in explaining the impracticality of the scenario.", "score": [6.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "2uaVnAXLgg4vmPVBd64p3c", "question_id": 45, "answer1_id": "ULtkGu6m6tB4tawFStVdJM", "answer2_id": "GRXWdnzLLzmeZRU67PMTb7", "text": "7 8\nAssistant 1 provided an answer with specific numbers, but the data is from 2016, which may not be accurate for the current time. However, the response does give a general idea of the scale of text messages sent globally. Assistant 2, on the other hand, explained why it is difficult to determine the exact number of text messages sent globally in a minute and provided a more detailed explanation of the factors that make it challenging to track this information. While Assistant 2 didn't provide a specific number, the response was more informative and relevant to the question, explaining the complexity of the issue.", "score": [7.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "FuWYeqPkW7do6ws6oVKDx2", "question_id": 46, "answer1_id": "C6hkCnJZmfCGvnr5an2jFh", "answer2_id": "RTxk2jYsjYiiWMwcRjmfwu", "text": "4 8\nAssistant 1's response was confusing and contained incorrect calculations, leading to an inaccurate estimate of words spoken daily on Earth. The response also focused solely on the English language, which is not representative of the entire world population. Assistant 2, on the other hand, provided a more logical and coherent explanation, considering the world population and a range of words spoken per person per day. Although the estimate provided by Assistant 2 is still rough, it is more reasonable and relevant to the question asked.", "score": [4.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "njRKU8Jcd7KMDzhZcJ6PuG", "question_id": 47, "answer1_id": "DApBTLuiJwSsfQHTioH9PY", "answer2_id": "2J2Et6W8u2kXLTUgfYrPxe", "text": "4 7\nAssistant 1 provided an incorrect and misleading answer, stating that only 200 snowflakes fall during a typical winter, which is far from accurate. Additionally, the focus on snowflake size was not relevant to the question. Assistant 2, on the other hand, provided a more detailed and relevant response, explaining the factors that influence the number of snowflakes and the difficulty in estimating an exact number. While Assistant 2 did not provide a specific number, the explanation was more helpful, relevant, and accurate, thus receiving a higher score.", "score": [4.0, 7.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "ji9DtHHwETNsxf69BH4ScL", "question_id": 48, "answer1_id": "gmy58biNVuneSpFZTcpXtT", "answer2_id": "nSGoG9Z5g5VuCXgmQJJKrq", "text": "6 8\nAssistant 1 provided an estimate of the number of published books but did not attempt to estimate the total number of pages, which was the main focus of the question. The response also mentioned that the number of books is \"peanuts\" compared to the number of books written over time, but did not provide any reasoning or evidence for this claim. Assistant 2, on the other hand, provided a step-by-step explanation of how to estimate the total number of pages in all the books ever published, using reasonable assumptions and calculations. While both assistants acknowledged the difficulty in providing an exact number, Assistant 2's response was more helpful, relevant, and detailed in addressing the user's question.", "score": [6.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "VTBkt2MJZEpv7imWL7Sztk", "question_id": 49, "answer1_id": "Cpi6LBB86W73APEA8naNuj", "answer2_id": "ScqJdUq9n5bp9qPv5WPqG5", "text": "5 8\nAssistant 1's response was confusing and contained unnecessary calculations, making it difficult to understand the reasoning. The final answer was also incorrect. Assistant 2 provided a clearer explanation and a more accurate estimate, taking into account the age of the Earth and the average distance between the Earth and the sun. However, the response could have been more precise by mentioning that life is estimated to have begun around 3.5 to 4 billion years ago, rather than using the age of the Earth itself.", "score": [5.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "bRdzrs2qxLr8oFKJXmrPoZ", "question_id": 50, "answer1_id": "3g83CoAFWrDD6V6LyEpoMV", "answer2_id": "GdLFr88pCwsGpfRBRQQkyh", "text": "4 8\nAssistant 1's response is inaccurate and seems to be based on arbitrary assumptions, resulting in an implausible estimate of 36,000 songs. Assistant 2, on the other hand, provides a more reasonable and detailed explanation, acknowledging the difficulty of estimating the exact number of songs recorded throughout history and considering various factors that contribute to the vastness and diversity of the music collection. Assistant 2's response is more helpful, relevant, and accurate, which is why it receives a higher score.", "score": [4.0, 8.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "3TAok7dw9s3F2aCWyp3bXc", "question_id": 51, "answer1_id": "8p5A4gouBgCmbQj5gMZapU", "answer2_id": "Sa7uNEbFGRFuXBAwau8sHS", "text": "7 9\nAssistant 1 provided a brief and somewhat relevant answer, but it lacked depth and detail. Assistant 2, on the other hand, provided a more comprehensive response, discussing the technological and cultural context of the Renaissance period and how it might have influenced the development of the Internet. Assistant 2 also considered the limitations of the time and how they would have affected the potential of the Internet. Overall, Assistant 2's answer was more helpful, relevant, accurate, and detailed than Assistant 1's response.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "RELThBxYkgYHs4LCozKhtx", "question_id": 52, "answer1_id": "LVWqSxH3Uq6675w7LXH6zP", "answer2_id": "T6Ar3cAEw3AmVVQTcCPp8N", "text": "7 9\nAssistant 1 provided a brief and somewhat speculative response, while Assistant 2 offered a more detailed and balanced answer. Assistant 2 discussed the potential outcomes for the Aztec civilization, such as maintaining their independence, resisting European diseases, and forming alliances with other indigenous groups. Additionally, Assistant 2 considered the possible impact on the spread of Christianity and the economy of Mexico. Overall, Assistant 2's response was more comprehensive and informative, which is why it received a higher score.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "LhyR6znMTKSq4wfco9D5Be", "question_id": 53, "answer1_id": "ffXksqRU3k8CNHqYfshYhj", "answer2_id": "KiZQev5JEk2h6JYeQnFmtM", "text": "7 9\nAssistant 1 provided a brief response that focused on the current state of the bacterium and how it would affect the modern world, which was not relevant to the question. Assistant 2, on the other hand, provided a more detailed and relevant response, discussing the potential demographic, economic, and social changes that could have occurred in Europe if the Black Death had not happened in the 14th century. Assistant 2's answer was more helpful, accurate, and relevant to the user's question, which is why it received a higher score.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "fLx8FwVXuCooBXpjuTsuwb", "question_id": 54, "answer1_id": "4L3dWuqvWwg5kELt7p8SVZ", "answer2_id": "cYiyYKKXM3GXkrZHAbX83S", "text": "6 9\nAssistant 1 provided a brief and speculative answer, stating that Newton wouldn't have discovered the law of gravity if he had focused on biology. While this is true, it doesn't provide any insight into the potential impact of Newton's work on biology. Assistant 2, on the other hand, gave a more detailed and thoughtful response, discussing the possible contributions Newton could have made to biology while acknowledging the uncertainty of such a scenario. Assistant 2 also provided context on Newton's work in other fields, making their answer more informative and comprehensive.", "score": [6.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "DCg6g39EWq7TfYh7GNc6Ti", "question_id": 55, "answer1_id": "XfpqViGW5dvauNZPDSF8yv", "answer2_id": "PQmMUdAAcBsAWmWaTvdHSU", "text": "7 9\nAssistant 1 provided a brief and general answer, stating that the world would be missing the music of one of the most successful bands in history. While this is true, it doesn't provide much detail or insight into the potential impact of the Beatles not forming. Assistant 2, on the other hand, provided a more detailed and thoughtful response, exploring various possibilities of what might have happened if the Beatles had never formed. This answer was more helpful, relevant, and accurate, as it considered different aspects of the music industry, culture, and the individual members' careers.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "EQpmLs3VURbLnTVdC2DajG", "question_id": 56, "answer1_id": "WsuMf9XBGs3dFjfSGKjEzg", "answer2_id": "PorExChQ9VeYsPJptdgtsB", "text": "7 9\nAssistant 1 provided a brief and relevant answer, but it lacked detail and depth. Assistant 2, on the other hand, gave a more comprehensive response, discussing the potential consequences of not cracking the Enigma code, the possible development of alternative strategies, and acknowledging the difficulty in predicting the exact outcome of the war without Turing's contributions. This made Assistant 2's response more helpful and informative.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "ey4ZU2o9V6S8Wsoe2e5veJ", "question_id": 57, "answer1_id": "5qo3HudLrwrzEV2Px7gYRf", "answer2_id": "249f6dSMwZRZVMmtxv6yDm", "text": "7 9\nAssistant 1 provided a brief and accurate response, mentioning the longer travel time and more dangerous waters if the Suez Canal had never been constructed. However, Assistant 2 offered a more comprehensive answer, discussing the impact on international trade, economic development, European colonization, and the significance of the canal's construction in engineering and technology. Assistant 2 also touched on the political and strategic interests surrounding the canal. Therefore, Assistant 2 receives a higher score for providing a more detailed and well-rounded response.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "bggwDqWGq27n76ytXx3shS", "question_id": 58, "answer1_id": "SRxwJkNCfUaVe4Gb8LPvSK", "answer2_id": "nxa3m6kiAZwKgcMUBY8KYz", "text": "7 9\nAssistant 1 provided a brief and general response, while Assistant 2 offered a more detailed and comprehensive answer. Assistant 2 explored the potential impact of the Maya civilization on the development of other civilizations, their possible advancements in various fields, and the potential changes in the political and cultural landscape of Mesoamerica. This makes Assistant 2's response more helpful, relevant, and informative compared to Assistant 1's response.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "eYsKC4P5j99ZiJQBKSAUfo", "question_id": 59, "answer1_id": "S54Mmyim3QT5owdhwnNYmB", "answer2_id": "DXFvhjCaKqQiBsFDCkXGMT", "text": "7 9\nAssistant 1 provided a brief background on Christopher Columbus and his discovery of the Americas, but did not directly address the question of what would have happened if he had not discovered the Americas. Assistant 2, on the other hand, provided a more detailed and relevant response, discussing the potential impacts on the indigenous peoples and the possibility of other European explorers discovering the Americas eventually. Assistant 2's response was more helpful, relevant, and accurate in addressing the user's question.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "LJtZNbRQtU9PKwHPFr3isf", "question_id": 60, "answer1_id": "8DJpwEFb5jqt3vMj5oNqVX", "answer2_id": "BCUK5VcpZK26jMJ2UMgR3E", "text": "6 9\nAssistant 1's response provided some background information on Van Gogh's life, but it did not address the hypothetical scenario of him being successful during his lifetime. Assistant 2, on the other hand, directly addressed the question and provided a thoughtful analysis of how Van Gogh's life and career might have been different if he had been successful. The response was relevant, detailed, and explored various possibilities, making it more helpful and informative than Assistant 1's response.", "score": [6.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "YJChAPeEgz5eSKT2ZKqBya", "question_id": 61, "answer1_id": "KMtqqYkDmyyThkvxXJ4kJd", "answer2_id": "JHTKBrLVTp7bv5zW2uwYVW", "text": "6 9\n\nAssistant 1:\nStrengths:\n- The answer provides a step-by-step approach to solving the problem, which can be helpful for someone who wants to understand the process.\n\nWeaknesses:\n- The answer does not provide actual code, only a list of steps.\n- The suggested use of the strstr function is not the best choice for this problem, as it can lead to incorrect results if the word is a substring of another word.\n\nSuggestions for improvement:\n- Provide actual code instead of just a list of steps.\n- Use the find function instead of strstr to search for the word in the line, and make sure to account for word boundaries.\n\nAssistant 2:\nStrengths:\n- The answer provides a complete and working code solution.\n- The code is well-structured and easy to understand.\n- The code includes helpful comments that explain the logic and functionality.\n\nWeaknesses:\n- The current implementation only counts lines containing the search string, not the actual number of occurrences of the word in the file.\n\nSuggestions for improvement:\n- Modify the code to count the actual number of occurrences of the word in the file, not just the number of lines containing the word. This can be done by using a loop to search for the word multiple times within the same line, updating the search position after each successful find.", "score": [6.0, 9.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "ixZgcMexvedv7CXnCbo3hS", "question_id": 62, "answer1_id": "MkbgwTpP5b79NWdPEnvHyq", "answer2_id": "jyzD2deb9JAffRhhkk37r5", "text": "4 7\n\nAssistant 1 provided a brief explanation of the longest common subsequence problem but did not provide any code implementation. The explanation was clear, but it would have been better to include a code example.\n\nAssistant 2 provided a code implementation for the Longest Common Subsequence (LCS) problem using dynamic programming in Python. The code is structured and includes comments explaining the logic. However, the implementation is incorrect, as it doesn't return the actual longest common subsequence but rather the length of it. The initialization of the `dp` table with `float('inf')` is also unnecessary, as it should be initialized with zeros. Here's a corrected version of the code:\n\n```python\ndef lcs(str1, str2):\n m, n = len(str1), len(str2)\n dp = [[0 for j in range(n+1)] for i in range(m+1)]\n\n for i in range(1, m+1):\n for j in range(1, n+1):\n if str1[i-1] == str2[j-1]:\n dp[i][j] = dp[i-1][j-1] + 1\n else:\n dp[i][j] = max(dp[i-1][j], dp[i][j-1])\n\n # Reconstruct the LCS\n lcs_str = \"\"\n i, j = m, n\n while i > 0 and j > 0:\n if str1[i-1] == str2[j-1]:\n lcs_str = str1[i-1] + lcs_str\n i -= 1\n j -= 1\n elif dp[i-1][j] > dp[i][j-1]:\n i -= 1\n else:\n j -= 1\n\n return lcs_str\n\n# Example usage\nstr1 = \"banana\"\nstr2 = \"apple\"\nprint(lcs(str1, str2)) # prints \"ana\"\n```\n\nThis corrected version initializes the `dp` table with zeros and includes a section to reconstruct the LCS from the `dp` table. The example usage has also been updated to match the problem statement.", "score": [4.0, 7.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "gQHT6XKZst42mzwqKYuMNf", "question_id": 63, "answer1_id": "PSb6r567shkRasPkJagvWi", "answer2_id": "jPGHEfp7FxhDyvRA6qEN3j", "text": "6 8\n\nAssistant 1:\nStrengths:\n- The regular expression provided is mostly correct and covers a good range of valid email addresses.\n- The code is simple and easy to understand.\n\nWeaknesses:\n- The code is incomplete and missing the import statement for the 're' module.\n- The code does not handle uppercase letters in the email address.\n- The regular expression does not account for some special characters that are allowed in email addresses.\n- The explanation of the two methods (re.match and re.search) is not necessary, as both can be used to achieve the same result in this case.\n\nSuggestions for improvement:\n- Import the 're' module at the beginning of the code.\n- Update the regular expression to handle uppercase letters and additional special characters.\n- Complete the code by adding the missing parts, such as the 'else' statement and the 'print' function.\n\nAssistant 2:\nStrengths:\n- The code is complete and functional.\n- The regular expression provided is more accurate and covers a wider range of valid email addresses.\n- The code is well-structured and easy to read.\n- The comments are clear and concise, explaining the logic and functionality of the code.\n\nWeaknesses:\n- The regular expression does not account for some special characters that are allowed in email addresses.\n\nSuggestions for improvement:\n- Update the regular expression to handle additional special characters that are allowed in email addresses.", "score": [6.0, 8.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "HWHXDi6bn42H5W4yCeM78C", "question_id": 64, "answer1_id": "QWpBq9gM4iYmsTLQ3NNnBe", "answer2_id": "Ra9eTcAuNZPkPRnaHiQrak", "text": "4 6\n\nAssistant 1:\nStrengths:\n- The problem statement is well explained.\n\nWeaknesses:\n- There is no actual code provided to solve the problem.\n- The input format and test cases are not addressed.\n\nSuggestions for improvement:\n- Provide a working code solution that implements dynamic programming.\n- Address the input format and test cases in the code.\n\nAssistant 2:\nStrengths:\n- A Python code solution is provided.\n- The code is structured and includes comments.\n\nWeaknesses:\n- The code does not use dynamic programming as requested.\n- The code does not handle the input format and test cases as specified in the problem statement.\n- The code is not efficient for large values of n.\n\nSuggestions for improvement:\n- Implement dynamic programming in the code to improve efficiency.\n- Address the input format and test cases in the code.\n- Consider using an iterative approach instead of recursion to save memory and time.", "score": [4.0, 6.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "7t8xTNbmkJUJv4XkbzPCSR", "question_id": 65, "answer1_id": "U3QxoC4DxXoggnLRbgJh8b", "answer2_id": "5mbEFLEnEx8Cm2wEF8WAbM", "text": "4 9\n\nAssistant 1 provided a response that acknowledged the task but did not provide any code or solution. They only restated the problem, which is not helpful in solving the problem.\n\nAssistant 2 provided a clear and concise implementation of the binary search algorithm in Python. The code is well-structured, efficient, and easy to understand. The explanation of the algorithm is thorough, and the time complexity is mentioned. The only minor issue is that the output comment in the example is incorrect (it should be \"output: 3\" instead of \"output: 4\"). Overall, Assistant 2's response is excellent, with just a small mistake in the example output comment.\n\nFor Assistant 1, I would recommend providing an actual implementation of the algorithm instead of just restating the problem. For Assistant 2, I would recommend double-checking the example output to ensure it is accurate.", "score": [4.0, 9.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "j4M8nZhTEZ2AbYippnJwDP", "question_id": 66, "answer1_id": "f3TXCreQPJ5Erd6BGt4SFE", "answer2_id": "KLbBzjbuwq4agAESQ9Wfpq", "text": "4 8\n\nAssistant 1:\nStrengths:\n- The explanation of the approach is clear and easy to understand.\n\nWeaknesses:\n- The answer does not provide any code implementation.\n- The explanation does not mention the correct approach of using two stacks for implementing a queue.\n\nSuggestions for improvement:\n- Provide a code implementation for the given problem statement.\n- Explain the correct approach of using two stacks to implement a queue, which involves transferring elements between the stacks when needed.\n\nAssistant 2:\nStrengths:\n- The code implementation is provided.\n- The code is clear and easy to understand.\n- The code includes comments explaining the functionality of each method.\n\nWeaknesses:\n- The dequeue method is not implemented correctly. It should transfer elements from the in_stack to the out_stack when the out_stack is empty and then pop the top element from the out_stack.\n- The peek method should return the top element of the out_stack if it's not empty, otherwise, it should transfer elements from the in_stack to the out_stack and return the top element.\n\nSuggestions for improvement:\n- Correct the dequeue and peek methods to properly implement the queue using two stacks.\n- Add comments to explain the logic behind transferring elements between the stacks when needed.", "score": [4.0, 8.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "RivhKLYyBmjyVjKvc5UnKd", "question_id": 67, "answer1_id": "SG7YcT8s4sJNBRqidKk7Z7", "answer2_id": "ganAwetd9VTzEAiTtsskzX", "text": "6 8\n\nAssistant 1:\nStrengths:\n- The general approach is described well, and the steps are clear.\n\nWeaknesses:\n- The actual code implementation is missing, making it difficult to evaluate the code structure, readability, and comments.\n- The helper function is mentioned but not defined or explained.\n\nSuggestions for improvement:\n- Provide a complete code implementation, including the helper function.\n- Add comments to the code to explain the logic and functionality.\n\nAssistant 2:\nStrengths:\n- The code is provided, and it correctly implements the given problem statement.\n- The code is accurate and efficient.\n- The example usage is helpful for understanding how the function works.\n\nWeaknesses:\n- The code lacks comments to explain the logic and functionality.\n\nSuggestions for improvement:\n- Add comments to the code to explain the logic and functionality.\n- Consider edge cases and add error handling if necessary.", "score": [6.0, 8.0], "reviewer_id": "gpt-4-0328-coding", "metadata": {}}
+{"review_id": "WjYY86fiq6muEGUJ8Q4Ayp", "question_id": 68, "answer1_id": "ENsG3zoeUY7ArFo2kNaQbz", "answer2_id": "LG6szQ3cqNRzUjXtZtXG94", "text": "First, I will solve the problem independently:\n\nGiven f(x) = 5x^3 - 2x + 3, we need to find the value of f(2).\n\nf(2) = 5(2)^3 - 2(2) + 3 = 5 * 8 - 4 + 3 = 40 - 4 + 3 = 39\n\nNow, I will evaluate the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nf(2) = 26\n\nAssistant 1 provided an incorrect answer without showing any steps.\n\nAssistant 2:\nf(2) = 5(2)^3 - 2(2) + 3 = 5 * 8 - 2 + 3 = 41 - 2 + 3 = 40\n\nAssistant 2 provided a step-by-step solution but made a mistake in the calculation. The correct calculation should be:\n\nf(2) = 5(2)^3 - 2(2) + 3 = 5 * 8 - 4 + 3 = 40 - 4 + 3 = 39\n\nScores:\nAssistant 1: 1 (Incorrect answer and no steps provided)\nAssistant 2: 7 (Correct steps but made a calculation mistake)\n\nAssistant 2 performed better as they provided the correct steps but made a calculation mistake. The correct answer is f(2) = 39.\n\nOutput: (1, 7)", "score": [1, 7], "reviewer_id": "gpt-4-0328-math", "metadata": {}}
+{"review_id": "WXsw2Xuc8fUawFxh6neKj9", "question_id": 69, "answer1_id": "C7CN5AYJhGC7iDgS7rciDQ", "answer2_id": "LHMnXGbSZubeZJcr5fgrte", "text": "First, I will solve the problem independently:\n\nGiven equation: 3x + 10 = 5(x - 2)\n\n1. Distribute the 5 on the right-hand side: 3x + 10 = 5x - 10\n2. Move the x terms to one side by subtracting 3x from both sides: 10 = 2x - 10\n3. Move the constant term to the other side by adding 10 to both sides: 20 = 2x\n4. Divide both sides by 2 to solve for x: x = 10\n\nNow, I will evaluate the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe steps provided by Assistant 1 are incorrect and confusing. The answer is also incorrect. \n\nAssistant 2:\n1. The first step is correct.\n2. The second step is incorrect. The correct step should be subtracting 3x from both sides, not subtracting 5x.\n3-7. All the subsequent steps are incorrect as they are based on the incorrect second step.\n\nScores:\nAssistant 1: 1/10\nAssistant 2: 2/10\n\nBoth assistants performed poorly, but Assistant 2 performed slightly better as the first step was correct.", "score": [1, 2], "reviewer_id": "gpt-4-0328-math", "metadata": {}}
+{"review_id": "ZgNXGB9wziYPnGFjqLzqVH", "question_id": 70, "answer1_id": "QP9CmZKwhu2BrUmzhy8tH4", "answer2_id": "TGENVWL5iAYi4NCf5jAzKy", "text": "First, I will solve the problem independently:\n\nThe length of a line segment between two points (x1, y1) and (x2, y2) can be found using the distance formula:\n\ndistance = sqrt((x2 - x1)^2 + (y2 - y1)^2)\n\nGiven the points (2, -2) and (10, 4), we can plug in the values:\n\ndistance = sqrt((10 - 2)^2 + (4 - (-2))^2)\ndistance = sqrt(8^2 + 6^2)\ndistance = sqrt(64 + 36)\ndistance = sqrt(100)\ndistance = 10\n\nNow, I will evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe length of the line segment is 10 units.\n\nThis answer is correct.\n\nAssistant 2:\nThe length of the line segment is 17.973.\n\nThis answer is incorrect. The mistake is in the calculation of the differences between the x and y coordinates. The correct differences should be 8 and 6, not 18 and 4.\n\nScores:\nAssistant 1: 10\nAssistant 2: 4\n\nAssistant 1 performed better because they provided the correct answer and used the correct method to solve the problem. Assistant 2 made a mistake in the calculation of the differences between the x and y coordinates, which led to an incorrect answer.", "score": [10, 2], "reviewer_id": "gpt-4-0328-math", "metadata": {}}
+{"review_id": "YCbKCPhn4DqT56ASDfFhnz", "question_id": 71, "answer1_id": "2KtpJeRqEfbPLwdUxAXWrj", "answer2_id": "kc9oytarvqyNVVfGZz93EL", "text": "7 10\nAssistant 1 provided general tips for writing a formal email, which were helpful but did not directly address the user's request for help in writing the email. Assistant 2, on the other hand, provided a complete and well-written example of a formal email proposing a joint venture, which directly addressed the user's request. Assistant 2's response was more relevant, accurate, and detailed, which is why it receives a higher score.", "score": [7.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "oYP3NAG57ufZ4vfnvfsSty", "question_id": 72, "answer1_id": "gsL3R2grTXUiHdeDSk8f9Q", "answer2_id": "NxstpvVEnGMny3jECtvrw2", "text": "8 10\nAssistant 1 provided a helpful and relevant response, but it was not as detailed and complete as Assistant 2's response. Assistant 2 provided a full sample resignation letter that the user can use as a guide, which is more helpful and detailed than Assistant 1's response. Both assistants were accurate in addressing the user's request, but Assistant 2 provided a more comprehensive and well-structured answer.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "kQCXTX4kvYL3ujbvNFfCfm", "question_id": 73, "answer1_id": "HprPMoUk9JY5TtToRbuMwy", "answer2_id": "XN9NBpeQJiQqGT5UYkys7N", "text": "8 10\nAssistant 1 provided a brief and general outline of a recommendation letter, which is helpful but lacks the level of detail and specificity required for the user's request. Assistant 2, on the other hand, provided a comprehensive and well-structured example of a formal letter of recommendation tailored to a student applying to a prestigious graduate program in computer science. This response is more relevant, accurate, and detailed, making it the better answer for the user's needs.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "Jfo5EVsy9oemR8iJ6B5gyU", "question_id": 74, "answer1_id": "YU7gDhmo4LDVMTEZZMRdBC", "answer2_id": "3qMqAZcfHUVUTEJnMXT8Pw", "text": "0 9\n\nAssistant 1 did not provide any response, so it receives a score of 0. Assistant 2 provided a well-structured and compelling product launch announcement email, including a subject line, introduction, key features, and a call to action. The response was relevant, accurate, and detailed, earning a score of 9. The only improvement could be the addition of more specific details about the software, but this would require more information from the user.", "score": [0.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "2LY8t4Hq9j47SrN5vs4Tui", "question_id": 75, "answer1_id": "2gNqU2XpsL6yqFn6DMRgHq", "answer2_id": "hMXjd228Mc8pnc7zgd2frG", "text": "4 10\nAssistant 1 only asked for the customer's name and did not provide any actual content for the apology email, making their response less helpful and relevant. On the other hand, Assistant 2 provided a well-written and detailed apology email that addressed the customer's concerns and reassured them that the issue has been resolved. This response was helpful, relevant, accurate, and contained a good level of detail, which is why Assistant 2 receives a perfect score.", "score": [4.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "4HSThcxvTM4TjQ9a9vMbds", "question_id": 76, "answer1_id": "a4L6JNPuomkPLdWxJRQpod", "answer2_id": "gUEd9nNepqy8nsfbypscHx", "text": "7 9\nAssistant 1 provided a brief overview of jazz, its origins, and its evolution, but the response lacked the structure and detail of a script for a YouTube video. Assistant 2, on the other hand, provided a more comprehensive and well-structured script, discussing key figures, the influence of jazz on other genres, and its role in the civil rights movement. Assistant 2's response was more engaging and informative, making it a better choice for a YouTube video script.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "HfKLQTn2DvPDUdgPNoY6MG", "question_id": 77, "answer1_id": "eN9V2Wf2BNzWoJosqdFw6K", "answer2_id": "nZZyn3SAvU8T2BVR73LMHC", "text": "1 9\n\nAssistant 1 failed to provide any response to the user's request, while Assistant 2 composed an engaging travel blog post about a recent trip to Hawaii, highlighting cultural experiences and must-see attractions. Assistant 2's response was helpful, relevant, accurate, and detailed, making it deserving of a high score.", "score": [1.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "6pzkZvS4nNDwacSTqzvw7z", "question_id": 78, "answer1_id": "iCzkbWTatKfuC6QVv59XgA", "answer2_id": "YM4qjUM5VfmWnzM3HXRB4G", "text": "7 9\nAssistant 1 provided a detailed response, but the plot summary seemed to be a mix of \"Ready Player One\" and an original story, which led to confusion. The review also lacked the captivating tone that was requested. Assistant 2, on the other hand, delivered a captivating review with a clear focus on the plot, characters, and special effects. The tone was engaging and enthusiastic, making it more appealing to the reader.", "score": [7.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "7uM72Nb4Sn5eo6TrYw2bpR", "question_id": 79, "answer1_id": "GQcFyY8ayFT48DAP5NKNZf", "answer2_id": "WcbzNkzu5q66QkK92NxmGB", "text": "4 9\nAssistant 1 provided a long list of points without any structure or organization, making it difficult to use as a podcast script. Assistant 2, on the other hand, provided a well-structured podcast script with clear segments and topics, making it much more suitable for the user's request. Assistant 2's response was helpful, relevant, accurate, and detailed, while Assistant 1's response lacked organization and coherence.", "score": [4.0, 9.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
+{"review_id": "dXWWCggLzLD4SBZH2JSAZH", "question_id": 80, "answer1_id": "A6pauLMckn34otnfpeQRFi", "answer2_id": "Tfm4f2mbpKJsa8MT6exs8D", "text": "8 10\nAssistant 1 provided a brief and general overview of the concert experience, mentioning the composers and the audience's reaction. However, Assistant 2 offered a more detailed and engaging review, discussing specific pieces, the conductor's skill, and the emotions evoked by the performance. Assistant 2's response also painted a vivid picture of the concert experience, making it more helpful and informative for someone interested in a symphony concert review.", "score": [8.0, 10.0], "reviewer_id": "gpt-4-0328-generic", "metadata": {}}
diff --git a/model/fastchat/eval/table/review/vicuna-13b_20230322-new-hp-fp16/review_alpaca-13b_vicuna-13b-20230322-new-hp-fp16.jsonl b/model/fastchat/eval/table/review/vicuna-13b_20230322-new-hp-fp16/review_alpaca-13b_vicuna-13b-20230322-new-hp-fp16.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..b61da3eacaf7e02ea34f2206d7b3703eb65dc717
--- /dev/null
+++ b/model/fastchat/eval/table/review/vicuna-13b_20230322-new-hp-fp16/review_alpaca-13b_vicuna-13b-20230322-new-hp-fp16.jsonl
@@ -0,0 +1,80 @@
+{"review_id": "DFNs9uNfvcdTbDkXUA2YUH", "question_id": 1, "answer1_id": "kEL9ifUHDeYuAXzevje2se", "answer2_id": "FNAYdXQwcrNkTD4nCqdFGb", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question about improving time management skills. Assistant 1's response was concise and covered essential points, such as setting priorities, breaking tasks into smaller chunks, and avoiding multitasking. Assistant 2's response was more detailed and organized, providing a numbered list of tips that included setting goals, using a planner, and being mindful of time-wasting activities. Assistant 2's answer also emphasized the importance of practice and patience, which adds value to the response. Therefore, Assistant 2 receives a slightly higher score due to the additional details and organization of the answer.", "score": [8.0, 9.0]}
+{"review_id": "cZiULU9XTH9inYCSQjetv2", "question_id": 2, "answer1_id": "VcF3NrWGXhhxLkDVurNrwq", "answer2_id": "7SzYV3TjzyrVkSKKGaXN4c", "reviewer_id": 1, "metadata": {}, "text": "9 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about effective ways to deal with stress. Assistant 1 focused on identifying and avoiding stress sources, developing healthy coping mechanisms, and engaging in relaxation techniques, while also mentioning the importance of mental and physical health, rest, nutrition, and self-care. Assistant 2 provided a more detailed list of specific strategies, such as exercise, relaxation techniques, social support, sleep, healthy diet, goal-setting, time management, avoidance of unhealthy habits, and seeking professional help. Assistant 2's answer is slightly more comprehensive and organized, which is why it receives a slightly higher score. Both answers are valuable and provide useful information on managing stress.", "score": [9.0, 10.0]}
+{"review_id": "UsPpQMmb4GeEQ7mjvf3SxV", "question_id": 3, "answer1_id": "LpvtyQi9QdSgRrgGDxiGrT", "answer2_id": "cA7LoXTBCgegnTcKWpjxQq", "reviewer_id": 1, "metadata": {}, "text": "7 8\nBoth Assistant 1 and Assistant 2 provided relevant and accurate information about the differences between Python and JavaScript. However, Assistant 2's response was more detailed and covered a wider range of differences between the two languages, such as the paradigm, community, and deployment. Assistant 1's response contained a few inaccuracies, such as stating that Python is a compiled language, while it is actually an interpreted language. Overall, Assistant 2's response was more helpful and precise, which is why it received a higher score.", "score": [7.0, 8.0]}
+{"review_id": "TxnLCDW2hK9BYYg5CTajQr", "question_id": 4, "answer1_id": "7zQm8cSTJhPtPdZdxbcfrX", "answer2_id": "8Yo5UVMPzoxjCsi3fuHNmH", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant tips for increasing productivity while working from home. Assistant 1 offered a concise list of 9 tips, while Assistant 2 provided a more detailed list of 10 tips. Both assistants covered similar points, such as establishing a routine, eliminating distractions, taking breaks, and exercising. However, Assistant 2 went a step further by including additional tips on dressing professionally, maintaining a healthy diet, and taking care of mental health. The added details and comprehensiveness of Assistant 2's response give it a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "UHvhbdgnuqz2Ntsc2Vcfge", "question_id": 5, "answer1_id": "UrLEH82RHwqqLt2LyvYSKj", "answer2_id": "cCc7EgnQNgcVAVqk96Pyto", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate explanations of the basics of quantum computing. Assistant 1 gave a brief overview of quantum computing and its potential applications, while Assistant 2 provided a more detailed explanation of the key concepts, such as superposition and entanglement, and the differences between classical and quantum computers. Assistant 2's response was slightly more informative and detailed, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "7TPasw84znHVZPvzFh5mNW", "question_id": 6, "answer1_id": "fpRdMTdnfirosQixuf2Gez", "answer2_id": "TUL5M2TWXQmM56U4Ckk4s4", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the differences between plant-based and animal-based protein sources. Assistant 1 focused on the nutritional differences, sustainability, and resource usage, while Assistant 2 provided a more comprehensive response, covering nutritional content, sustainability, health benefits, ethical considerations, taste, and allergies. Assistant 2's answer was more detailed and covered a wider range of aspects, which is why it received a higher score. However, both responses were informative and useful.", "score": [8.0, 9.0]}
+{"review_id": "hrWc2NX5JwkYVnjbNuBn4W", "question_id": 7, "answer1_id": "PvGmsCJSNFcvQKmPTnnd7s", "answer2_id": "iZUdysQf69MHQE9Lq76mii", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a brief overview of the process of developing critical thinking skills, mentioning questioning assumptions, taking multiple perspectives, analyzing information, and coming to logical conclusions. However, Assistant 2 provided a more detailed response, listing specific steps and strategies for improving critical thinking skills, such as practicing questioning, analyzing and evaluating, seeking diverse perspectives, active reading, learning from experts, self-reflection, and practice. The additional details and actionable steps provided by Assistant 2 make it a slightly better response.", "score": [8.0, 9.0]}
+{"review_id": "XVpMaPACSn2FCkvacjqYud", "question_id": 8, "answer1_id": "n4ANAbpR3gvLPP8poPfKZ6", "answer2_id": "D8EWgfq9yytMgQYybq3Rhq", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 provided a good overview of the major challenges faced by the education sector, but Assistant 2 went into more detail and provided a more comprehensive list of challenges, including safety, employability, globalization, and teacher shortages. This additional information makes Assistant 2's response slightly more helpful and informative, earning it a higher score.", "score": [8.0, 9.0]}
+{"review_id": "g6nSmutaGuEhP7bN2zxfVz", "question_id": 9, "answer1_id": "STJ36GrgQMcaUi7zaoNPit", "answer2_id": "5bzosDZ96t2p5qxeS7VAeA", "reviewer_id": 1, "metadata": {}, "text": "8 6.5\nAssistant 1 provided a more comprehensive and organized answer, covering cultural, social, and personal factors, and also mentioned advertising, marketing, and price. Assistant 2's response was less structured and incomplete, as it listed four factors but left the fifth one blank. However, Assistant 2 did mention some additional factors like demographic characteristics, economic factors, and psychological factors that were not explicitly mentioned by Assistant 1. Overall, Assistant 1's response was more helpful and relevant, while Assistant 2's response had some useful information but was less organized and incomplete.", "score": [8.0, 6.5]}
+{"review_id": "DEMoFzGQsvcUrnGfMzFxMG", "question_id": 10, "answer1_id": "425SwYvqKPAXFGTYKXB7Cs", "answer2_id": "itBobRwhSZUnsWkUs5bVwX", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant strategies for conflict resolution in the workplace. Assistant 1's response was concise and clear, covering important aspects such as open communication, active listening, and focusing on interests. Assistant 2's response was more detailed, providing additional strategies such as mediation, conflict resolution training, establishing ground rules, and documenting everything. Both responses were accurate, but Assistant 2's answer was more comprehensive, which is why it received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "QU9F3iirRSV4RBiZCSqdCY", "question_id": 11, "answer1_id": "VbNAuj6KAkMdLJQXMo22oK", "answer2_id": "iVTKdmUeTySryqEHhtgmkS", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the implications of using single-use plastic bottles versus reusable bottles. Assistant 1 focused on the environmental impact and briefly mentioned human health, while Assistant 2 provided a more detailed response, covering environmental impact, human health, water quality, and cost. Assistant 2's response was more comprehensive, which is why it received a slightly higher score. However, both assistants did a good job of addressing the question and providing useful information.", "score": [8.0, 9.0]}
+{"review_id": "6UQGg4UgWtQuxfvVrDk6WD", "question_id": 12, "answer1_id": "CNGqAeu2QJbQ4QGzHJDPdq", "answer2_id": "7UgtosTTGx7bnRcQGxdDJo", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information in response to the question. Assistant 2's answer was more detailed and organized, covering a wider range of factors to consider when designing an inclusive and accessible public transportation system. Assistant 1's response was also helpful and relevant, but it did not cover as many factors as Assistant 2. The organization and presentation of Assistant 2's answer made it easier to understand and follow, which contributed to the higher score.", "score": [8.0, 9.0]}
+{"review_id": "8VkkppLpxBLspZC5tLMv6B", "question_id": 13, "answer1_id": "E8w2qYqnm8iqCrSkUv62sz", "answer2_id": "Yt5tqBrrKfcumuuuxf2qqM", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth assistants provided helpful, relevant, and accurate information about how governments can utilize fiscal and monetary policies to combat economic recessions. Assistant 1 gave a brief overview of fiscal and monetary policies, while Assistant 2 provided a more detailed explanation with specific examples of how these policies can be used. Assistant 2's response was more comprehensive and informative, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "8Ux7CZxMUZPNdQDdLWtKyW", "question_id": 14, "answer1_id": "8o5yMymfzo6kzmp9GK5MWr", "answer2_id": "4pZ4Uy544Bc3K59fhbW7xj", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 gave a concise answer, emphasizing the importance of learning about different cultures and being open-minded. However, Assistant 2 provided a more detailed response, discussing the specific challenges posed by language and cultural barriers, such as communication breakdowns, isolation, and perpetuation of stereotypes. Assistant 2 also offered more concrete suggestions for overcoming these barriers, such as language learning opportunities and participating in cultural events. Therefore, Assistant 2 receives a slightly higher score due to the greater level of detail and practical advice provided.", "score": [8.0, 9.0]}
+{"review_id": "U4bX8wgNsK3mTTSVgWjPhb", "question_id": 15, "answer1_id": "kbJVEEsdsSScEq5Y5furr7", "answer2_id": "762peC97upw58WFQeLNoXZ", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 gave a good overview of various ways AI can be used in healthcare, including diagnostics, analyzing lab results, automating administrative tasks, and facilitating communication. Assistant 2 provided a more detailed response, focusing on AI's role in diagnosis, treatment planning, predictive analytics, administrative tasks, and personalized care. Assistant 2's response was slightly more comprehensive and structured, which is why it received a higher score. However, both responses were informative and valuable.", "score": [8.0, 9.0]}
+{"review_id": "W6aoWaiDV4aPfRHJcPmoBY", "question_id": 16, "answer1_id": "CMUL5ULZuR7YC5EPzCBN2N", "answer2_id": "Yqfg2saKSNPauCS8YdsjdD", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about CRISPR-Cas9 technology, its potential applications, and ethical implications. Assistant 2, however, offered a more detailed explanation of the process and a clearer description of how the technology works, which made their response slightly more informative and comprehensive. Assistant 1's response was still valuable, but Assistant 2's answer was more thorough and precise.", "score": [8.0, 9.0]}
+{"review_id": "D9oSxvnZGyq8m3qf5KfyPb", "question_id": 17, "answer1_id": "kEmDDQyNqSkyFihYEEBpuR", "answer2_id": "gKd2BPWp7HG9PTDdgS7HT8", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about vaccinations and herd immunity. Assistant 1 gave a brief explanation of how vaccinations work and the concept of herd immunity, which is useful for a quick understanding. However, Assistant 2 provided a more detailed explanation of the vaccination process, the importance of herd immunity for individuals who cannot be vaccinated, and the factors that affect the level of herd immunity required to protect a community. This additional information makes Assistant 2's response slightly more informative and comprehensive, earning it a higher score.", "score": [8.0, 9.0]}
+{"review_id": "5TLL4Xpp42TTShiZgsX96j", "question_id": 18, "answer1_id": "Qs3grQsqFVGK9EVkCkf9PB", "answer2_id": "CyZSkdkGWmjh8ZgxrQ2f66", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 2, however, offered a slightly more detailed response, discussing the impact on democratic processes and the measures taken by social media platforms to address the issue. Assistant 1's response was also informative, but it did not delve as deeply into the implications and potential solutions.", "score": [8.0, 9.0]}
+{"review_id": "3SyuZeDJerAbTAjM9Rg4ET", "question_id": 19, "answer1_id": "kzZ6dKN7hkRWjqtdHr7Qns", "answer2_id": "LwA42YWwqvdkStLZEUbmm9", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 briefly touched on the cultural, social, and economic factors and mentioned tailoring health promotion strategies. Assistant 2, however, provided a more detailed response, offering specific examples of how to leverage these factors to promote healthier diets, such as celebrating cultural foods, using social media campaigns, and making healthy foods more affordable. This additional detail and practical examples make Assistant 2's response slightly more informative and useful.", "score": [8.0, 9.0]}
+{"review_id": "97uvUZkXdY5v4sDuf8wqYd", "question_id": 20, "answer1_id": "DPPDG6YGFJij2GCmRL66PU", "answer2_id": "ajfFPpHDdMiSp3PVfsUFbE", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a brief overview of natural selection and its role in the evolution and adaptation of species. Assistant 2 provided a more detailed response, outlining the steps involved in natural selection and explaining how it contributes to the evolution and adaptation of species. While both answers were informative, Assistant 2's response was more comprehensive and detailed, earning it a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "6F3UU9Wy2x6VYSyexoeGhn", "question_id": 21, "answer1_id": "D62FjDb4nZANzPpfSfsiyn", "answer2_id": "3n8npKQKxgHEbEmf2K6AwF", "reviewer_id": 1, "metadata": {}, "text": "7 10\nAssistant 1 provided a brief and general introduction, which was relevant but lacked the details and formality expected in a medieval knight's introduction at a royal banquet. Assistant 2, on the other hand, offered a more comprehensive and formal introduction, including specific details such as the knight's name, hometown, and the ideals they uphold. The response from Assistant 2 was more fitting for the context of a royal banquet and demonstrated a better understanding of the medieval knight's role and responsibilities.", "score": [7.0, 10.0]}
+{"review_id": "akjoBQA2LTyYorFXdpLJwD", "question_id": 22, "answer1_id": "k7E4NNw5kyj9DmvP5Pu2zb", "answer2_id": "LfeqCy9vR3kCaJiQV4Fyqf", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and enthusiastic response, but it lacked detail and substance. The response did capture the adventurous spirit of a pirate captain, but it didn't provide much in terms of motivation or addressing the crew's potential concerns. Assistant 2, on the other hand, offered a more detailed and well-rounded response. It touched upon the rewards, the challenges, and the consequences of not participating in the search for treasure. This response painted a vivid picture and provided a stronger sense of motivation for the crew.", "score": [7.0, 9.0]}
+{"review_id": "neKvoN7CfnEAA7KF9MGMVx", "question_id": 23, "answer1_id": "KFocjVCejYrU3YmLjAqoUF", "answer2_id": "JrnFfmnsuykbTkFbUnei6k", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and helpful responses to the user's question. Assistant 1 gave a general description of how a Shakespearean character would declare their love in a soliloquy, mentioning passionate words, metaphors, and descriptions of beauty. However, Assistant 2 went a step further by providing an actual example of a soliloquy, which demonstrated a better understanding of the user's request and showcased the desired style of language. Assistant 2's response was more detailed and engaging, which is why it received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "Qx5gttMEFaRSGna9fiQRV7", "question_id": 24, "answer1_id": "dq8Sm9djS7e7y9sG9vmMJf", "answer2_id": "BDBSC5P2DE3E5LuB9FX7tn", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a concise explanation of their origin story, mentioning the discovery of their powers and the support from friends and family. However, Assistant 2 provided a more detailed and engaging response, describing the process of gaining powers, learning to use them, and the impact on the city and its people. This additional detail and storytelling make Assistant 2's response slightly better than Assistant 1's.", "score": [8.0, 9.0]}
+{"review_id": "mw9CRaZYnNzLxuwWaT38gy", "question_id": 25, "answer1_id": "XZ8fG8e6u7CyKd2moK6abe", "answer2_id": "TS97KLnyki7XWYFdSdbZiY", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth assistants provided relevant and accurate information about the technological advancements from the year 3000. Assistant 1 mentioned time travel, intelligent robots, autonomous vehicles, virtual reality, augmented reality, artificial intelligence, and machine learning. Assistant 2 provided a more detailed and comprehensive list of advancements, including virtual reality, artificial intelligence, space travel, food and water production, transportation, medicine, energy, education, and climate change. Assistant 2's response was more helpful and informative due to the level of detail and the variety of advancements discussed, which is why it received a higher score.", "score": [8.0, 10.0]}
+{"review_id": "UVxVTc7BVtNZgxfCFaNNKY", "question_id": 26, "answer1_id": "oKaXHfoK4pXwrefFWXmeA8", "answer2_id": "9ATk4tAxBoe267rQUDrgYD", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a relevant and accurate description of a winning play in a basketball game, but the response lacked the excitement and energy expected from a sports commentator. Assistant 2, on the other hand, captured the excitement and energy of a sports commentator while describing a winning play in a football game. The response was detailed, engaging, and painted a vivid picture of the final moments of the championship game. Therefore, Assistant 2 receives a higher score for better overall performance.", "score": [7.0, 9.0]}
+{"review_id": "47rHLSkmkYtjz5MW5XBjwW", "question_id": 27, "answer1_id": "ZwiZfvDWm7SETKNBfDk7Mb", "answer2_id": "C4zL3pUsGj2Z8U7Tm2nzDN", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth Assistant 1 and Assistant 2 provided relevant and helpful responses to the user's question. Assistant 1 gave a general description of their signature dish, mentioning the blend of traditional and modern cooking techniques, the use of fresh ingredients, and the balance of flavors and textures. However, Assistant 1's response lacked specific details about the dish itself.\n\nAssistant 2, on the other hand, provided a more detailed and engaging description of their signature dish. They mentioned the specific ingredients, cooking techniques, and flavors involved in the dish, such as the braised short rib, risotto, and the garnishes. Assistant 2 also conveyed their passion for cooking and the effort they put into perfecting the dish. This response was more informative and captivating, which is why Assistant 2 receives a higher score.", "score": [8.0, 10.0]}
+{"review_id": "aAsDye6PZdAPY7EaL8Lq4L", "question_id": 28, "answer1_id": "DxYopRe2LcTJMy3FWu6btd", "answer2_id": "CTi8ZRuHoAzRQjMRAU3mBN", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate responses to the user's question. Assistant 1 gave a concise response that captured the emotions and the view from the summit of Mount Everest. However, Assistant 2 provided a more detailed response, describing the emotions and the view more vividly, and also mentioning the challenging descent. While both responses were helpful, Assistant 2's answer was more comprehensive and engaging, which is why it receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "WxhoGWejkCeWXabpmt5xM4", "question_id": 29, "answer1_id": "WC3UJVh4jQ5RUkpcRMU98L", "answer2_id": "8chhW8gPzM6zHUN6VAqpym", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a relevant and accurate response, but it was less detailed compared to Assistant 2. Assistant 2's response was more comprehensive, discussing the challenges of the Martian environment, life support systems, and the importance of teamwork. Additionally, Assistant 2 mentioned the potential impact on Martian microbial life, which added depth to the answer. Overall, Assistant 2 provided a more informative and well-rounded response.", "score": [7.0, 9.0]}
+{"review_id": "RUpQtFwWVGeyo2z38mpaRR", "question_id": 30, "answer1_id": "gTvgn6ksDjGGgdprw6AG5A", "answer2_id": "Pxj7ZVsvQ9HMQnRVMh7os4", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and helpful responses to the user's question. Assistant 1's response was concise and focused on the alliance formed with other survivors, while Assistant 2's response was more detailed, providing a backstory for the character and mentioning specific allies. Assistant 2's response was more immersive and engaging, which is why it received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "MDr2gdJsFzFgWcvHtaseaG", "question_id": 31, "answer1_id": "3q7giCk2BA3Ye4Tm9HC2iw", "answer2_id": "NCUhSMqzESRJJVDKHQ7XPB", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a decent response, but there was a mistake in the last sentence, stating that higher prices could indicate popularity among locals, which is not necessarily true. Assistant 2, on the other hand, provided a more detailed and accurate response, covering various indicators to determine the restaurant's popularity among locals or tourists. Additionally, Assistant 2 explained the reasons why this information might be useful, which was more comprehensive and relevant to the question.", "score": [7.0, 9.0]}
+{"review_id": "kxKeezmuwVDPUsTGoMyTLS", "question_id": 32, "answer1_id": "hRGsxy86v26SC4yAQS29X4", "answer2_id": "mEvCEzVGnENJAMzs2ioTmp", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the question. Assistant 1 listed some subtle clues, but Assistant 2 went into more detail and provided a more comprehensive list of clues, which makes it slightly better. Both assistants addressed the topic accurately, but Assistant 2's response was more thorough and informative.", "score": [8.0, 9.0]}
+{"review_id": "KCbupK7GA3vvH33HDSQj2Z", "question_id": 33, "answer1_id": "3n49A5ggJERfXYrLns3ZeU", "answer2_id": "CoFnjLgW5Eens9JhnpiJHZ", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. However, Assistant 2's response was more detailed and comprehensive, covering a wider range of reasons why someone might choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app. Assistant 1's response was still useful, but it did not provide as much information as Assistant 2's answer.", "score": [8.0, 10.0]}
+{"review_id": "ez2MzvoTqvBsx7ksNLP43n", "question_id": 34, "answer1_id": "ErCpFtPuYVru4oTTk4WrxG", "answer2_id": "aQ9mM2wfmHSge2KvuQ8hmx", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused mainly on body language and facial expressions, while also mentioning the importance of asking questions and providing relevant answers. Assistant 2, on the other hand, provided a more detailed response, listing five different ways to determine if a person is genuinely interested in a conversation or simply being polite. Assistant 2's answer covered a wider range of factors, including nonverbal cues, responses, participation, tone of voice, and directly asking the person. This additional detail and comprehensiveness give Assistant 2 a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "jiLMSVKFFDTgXSvJT7qA4v", "question_id": 35, "answer1_id": "PTNoCRMZWoJk8HaKX7fW45", "answer2_id": "eM5S86H3bXTGLQcVW59XsD", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a concise response that touched on the main reasons someone might prefer to shop at a small, locally-owned business. However, Assistant 2 provided a more detailed and organized response, listing specific reasons and elaborating on each point. This made Assistant 2's answer more comprehensive and informative, earning it a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "QYaEYfZDTKBnDx67Jp8rDd", "question_id": 36, "answer1_id": "n8cFs9KENNwZ4z3SR4iXTr", "answer2_id": "MpBrYa9J2zQy9NGi2dvKp8", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 offered a brief overview of assessing credibility, mentioning the author's credentials, source of information, evidence, and reviews or comments. However, Assistant 2 provided a more detailed and structured response, covering additional aspects such as checking the date, language, source bias, URL, and peer review. Assistant 2 also emphasized the importance of being critical and skeptical when evaluating information. While both answers were accurate and relevant, Assistant 2's response was more comprehensive and detailed, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "RActNFicmiDbyV8dShmL7q", "question_id": 37, "answer1_id": "GzxL9mmEK5RzKqRbqBMUVC", "answer2_id": "i8QZzVJo2WkTLc7WMv4bNm", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a brief and clear explanation of why some people enjoy being scared and why others avoid it, mentioning excitement, emotional state, and personal preferences. However, Assistant 2 provided a more detailed response, discussing the adrenaline rush, coping mechanisms, fight or flight response, stress hormones, and the role of genetics, personality traits, and life experiences. This additional information makes Assistant 2's answer more comprehensive and informative, earning it a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "azGU4pbQfFesB3hJ4Rvbnb", "question_id": 38, "answer1_id": "QpoHFgb9SzwuaXQQUuBUQD", "answer2_id": "HP2CYZ3HJWMcGp6QF9qbq6", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on the general idea of observing interactions, addressing, handling disagreements, and nonverbal cues. Assistant 2, however, went into more detail by providing specific examples of cultural aspects that can be observed, such as nonverbal communication, social distance, greetings, interaction patterns, and values and beliefs. This additional detail makes Assistant 2's response slightly more informative and useful for someone trying to understand how to observe cultural norms and expectations in social situations.", "score": [8.0, 9.0]}
+{"review_id": "eQbbzNXSDVzN8XCa29wcx9", "question_id": 39, "answer1_id": "Fxe6MS4GpP3LMDUwzY2cPA", "answer2_id": "XrMYXEWHff6jXjjJWquXfW", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question, acknowledging that it is a matter of personal opinion and priorities. Assistant 1 presented the two sides of the argument, but Assistant 2 went a step further by suggesting that both goals can be pursued simultaneously and explaining the benefits of each. Assistant 2 also emphasized the importance of balancing priorities and resources, which adds more depth to the response. Therefore, Assistant 2 receives a slightly higher score for providing a more detailed and comprehensive answer.", "score": [8.0, 9.0]}
+{"review_id": "PjJ898Xk9wtZZxaQkrzVz7", "question_id": 40, "answer1_id": "mJiQ2FGR4Xb8kmhZjharkw", "answer2_id": "eeS6qNLeX6AScyGrS5pwrc", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 emphasized the importance of creating policies and initiatives that promote both job creation and technological progress, mentioning the need for investing in training and education. Assistant 2 provided a more detailed response, discussing the importance of both job creation and technological progress, and elaborating on how to achieve a balance between the two. Assistant 2 also addressed the need for social equity in the context of automation. While both responses were helpful, Assistant 2's answer was more comprehensive and detailed, which is why it received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "nYvtSBsJgkwkTzr4dm49V4", "question_id": 41, "answer1_id": "6Kph4RHRKEZ4YUoaHuEhBv", "answer2_id": "an76qPuSJ4TQDWvNXdcJ2w", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more precise answer by stating that the average human blinks roughly 50 million times in a lifetime, while Assistant 2 mentioned that it's difficult to determine an exact number. Both assistants explained the importance and purpose of blinking, but Assistant 1's response was more detailed and informative. Assistant 2's response, while still relevant and accurate, was less specific and focused more on the factors that influence blinking frequency. Overall, both responses were helpful and relevant, but Assistant 1's answer was more detailed and precise.", "score": [8.0, 7.0]}
+{"review_id": "H7cCbW9r8sxJC6gTVV89cu", "question_id": 42, "answer1_id": "WBwpBQwhxn5kxLDb7MschC", "answer2_id": "DYnmt5xTTTkLuyyBxhmDsq", "reviewer_id": 1, "metadata": {}, "text": "4 8\nAssistant 1's response contains incorrect information and lacks a clear step-by-step explanation. The atomic weight of a single atom of salt is not 58.943 g/atom, and the final answer of 102.98 atoms is incorrect. Assistant 2's response is more helpful, relevant, and accurate, providing a step-by-step explanation and an estimate of the number of atoms in a grain of salt. The level of detail in Assistant 2's response is also more appropriate, making it easier for the reader to understand the reasoning behind the calculations.", "score": [4.0, 8.0]}
+{"review_id": "DHugR9EbZbDKrrVsGsbWsa", "question_id": 43, "answer1_id": "kf8nahQVci2ZLaYikagB7U", "answer2_id": "MbcVvnW3Eh9wZi34SzYawn", "reviewer_id": 1, "metadata": {}, "text": "4 7\nAssistant 1 provided an incorrect number of lightning strikes per day, stating that there are approximately 2000 lightning strikes. The explanation provided was also quite basic and lacked detail. Assistant 2, on the other hand, provided a more detailed and accurate response, discussing the factors that influence lightning activity and the complexity of estimating the number of strikes per day. However, Assistant 2 did not provide a specific number of lightning strikes per day, which was part of the original question. Despite this, Assistant 2's response was more helpful, relevant, and accurate overall.", "score": [4.0, 7.0]}
+{"review_id": "bEBxYa6HHrr3aEczQdZ8nw", "question_id": 44, "answer1_id": "Gptgryd4o2dC8V5aqRmeJJ", "answer2_id": "4oSKRo3DtcNtE8gRbrSJPg", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided an answer with a specific number of balloons needed to lift a house, but the explanation was not clear and contained errors in the calculations. The answer seemed to be based on an incorrect assumption about the weight of the house (264.72 lbs) and the amount of helium needed to lift 500 pounds (89.1 cubic feet). This led to an incorrect conclusion.\n\nAssistant 2, on the other hand, provided a more detailed and accurate explanation, taking into account the principles of buoyancy, volume, and the weight of a typical house. The estimate of 100 million balloons was based on a more reasonable assumption of the house's weight (100 tons) and the volume of each balloon (1 cubic meter). The answer also acknowledged the limitations and impracticality of attempting such a feat in reality. Overall, Assistant 2's response was more helpful, relevant, and accurate, with a better level of detail.", "score": [7.0, 9.0]}
+{"review_id": "JzX2APR47JmwQyZBmASxjE", "question_id": 45, "answer1_id": "RfBWW8ZhdfTuTMb454Un4o", "answer2_id": "bhh9SFsYgquUPeC2qBKMJw", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided an outdated study from 2017 and gave an incorrect number of text messages sent per minute. The explanation was not step-by-step and did not provide a clear reasoning for the answer. Assistant 2, on the other hand, provided a more recent knowledge cutoff date and a step-by-step explanation for estimating the number of text messages sent per minute. Assistant 2 also acknowledged the fluctuating nature of the number and provided relevant context and data sources. Therefore, Assistant 2's response was more helpful, relevant, accurate, and detailed.", "score": [7.0, 9.0]}
+{"review_id": "Ta7DyFYBxYXqvetEXivtoJ", "question_id": 46, "answer1_id": "neGgLYm47JvqN8qkw8VeoW", "answer2_id": "oKMFEsBBp4SEJiVPRQZN2N", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1 provided a rough estimate of 1 billion words spoken daily on Earth, but the explanation lacked details and reasoning behind the number. The response focused more on the importance of language rather than providing a step-by-step explanation of the estimate. Assistant 2, on the other hand, provided a more detailed explanation, considering factors such as languages, speech per person, and world population. Although Assistant 2 did not provide an exact number, the response acknowledged the difficulty in estimating the exact number and provided a more reasonable estimate in the trillions. Additionally, Assistant 2 mentioned the exclusion of written communication, which adds context to the estimate. Therefore, Assistant 2 receives a higher score for providing a more detailed and well-reasoned response.", "score": [6.0, 8.0]}
+{"review_id": "FPLGd9p44nTMckbQWhAj4a", "question_id": 47, "answer1_id": "KSePbzLwsYasR3aui4HU8h", "answer2_id": "XXXVQo5w5XCFiEuK6n3hUV", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided a clear explanation of how snowflakes form and the factors that affect their shape, size, and number. However, the answer did not provide a step-by-step reasoning for the number of snowflakes falling during a typical winter. Assistant 2, on the other hand, provided a more detailed explanation of the factors affecting snowflake formation and an estimate of the number of snowflakes falling per square meter during a typical winter day. Although both answers acknowledged the difficulty in providing an exact number, Assistant 2's response was more detailed and relevant to the question.", "score": [7.0, 8.0]}
+{"review_id": "bkiWLjzueaREbVweRHq4Bi", "question_id": 48, "answer1_id": "SWWfLpP2gnWHLbYRZXNsW3", "answer2_id": "Ysbx8mFxYva6TbHQGKL8K7", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more detailed and structured response, estimating the total number of pages to be around 100 billion based on the number of unique titles and average page count per book. The response also touched on the importance of books and the transition to digital formats. Assistant 2, on the other hand, focused more on the difficulty of providing an exact number and gave a rough estimate of 1 billion pages, which is significantly lower than Assistant 1's estimate. Assistant 2's response also did not account for the number of unique titles, which makes their estimate less accurate. Overall, Assistant 1's response was more helpful, relevant, and detailed, while Assistant 2's response was less accurate and less detailed.", "score": [8.0, 6.0]}
+{"review_id": "gTGj3aSQg7KN3Ak4A6bDC4", "question_id": 49, "answer1_id": "WaBwKYhs7eAG22qCGLH2j3", "answer2_id": "KenbYuKVVUVXe2stVtxkeF", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided a clear and concise answer, but the calculation was incorrect, as they multiplied the number of years by the length of a year, which doesn't make sense. Assistant 2 provided a more detailed explanation and a better estimate, but the answer could have been more concise. Both assistants provided relevant information, but Assistant 2's response was more accurate and detailed, which is why it received a higher score.", "score": [7.0, 8.0]}
+{"review_id": "75xrrTTvHGe53BpVZesAhr", "question_id": 50, "answer1_id": "MfMJeE9om7qyBbqopHouf4", "answer2_id": "ceWTK9jYWZq2Dd2H7KGkWY", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and general answer, mentioning that the number of songs recorded throughout history is difficult to quantify and giving a conservative estimate of several hundred thousand. However, this estimate seems too low, and the answer lacks detailed reasoning. Assistant 2, on the other hand, provided a more detailed and well-reasoned response, considering factors such as the number of songs created and the development of recording technology. Assistant 2 also acknowledged the difficulty in providing an exact number but offered a more plausible estimate of potentially billions of recorded songs. Additionally, Assistant 2 emphasized the importance of music in human culture and society, which added depth to the answer.", "score": [7.0, 9.0]}
+{"review_id": "mt29YmuAyGqJXcHR4AP4xX", "question_id": 51, "answer1_id": "TjWPRDM6JFpPF8xeRptCKb", "answer2_id": "6Bbzs6YWyzPj52rZAfRPTt", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief response that touched on the limited access to the Internet during the Renaissance period, but it lacked depth and detail. Assistant 2, on the other hand, offered a more comprehensive and speculative analysis of how the Internet might have been invented and its potential impact on society during that time. Assistant 2's response considered various aspects such as localization, speed of communication, and the influence of art, literature, and science on the development of the Internet. This made Assistant 2's response more helpful, relevant, and detailed, which is why it receives a higher score.", "score": [7.0, 9.0]}
+{"review_id": "FRviLDru3mrDChRNUTzcaL", "question_id": 52, "answer1_id": "iR2tYTsWTFENEP7Qy9RgtX", "answer2_id": "EG6h5jgpQ8wesKAX43nt9X", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on the potential preservation of Aztec culture, language, and influence in the region, while Assistant 2 provided a more detailed historical context and listed several possible scenarios that could have occurred if the Aztecs had repelled the Spanish conquistadors. Assistant 2's answer was slightly more comprehensive and detailed, which is why it received a higher score. However, both answers were informative and addressed the hypothetical nature of the question.", "score": [8.0, 9.0]}
+{"review_id": "ecqHSM9uoYijnMK7jPSg54", "question_id": 53, "answer1_id": "AZdS8xAi3GwAmCqkNSnnwv", "answer2_id": "J76ZgE27N2Pe77NcEBrWUV", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a relevant and somewhat detailed response, but it was less comprehensive than Assistant 2's response. Assistant 1 focused mainly on the potential impact on urbanization, economic growth, and the timing of the Renaissance and scientific revolution. Assistant 2, on the other hand, provided a more detailed and well-organized response, covering the potential consequences in five different areas: demographic, economic, political, social, and medical. This made Assistant 2's response more informative and helpful overall.", "score": [7.0, 9.0]}
+{"review_id": "YMonyFkpcW3m2jwZcpztxg", "question_id": 54, "answer1_id": "VmwifF2JD5osYKDTqv2ZRS", "answer2_id": "hoFFABFFF7J8juv3AnoVWZ", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the question. Assistant 1 gave a general overview of the possible contributions Newton could have made in the field of biology, focusing on human body functions and animal behavior. However, Assistant 2 provided a more detailed response, discussing specific areas where Newton could have made significant contributions, such as plant biology, animal behavior, evolution, and medicine. Assistant 2 also mentioned Newton's interests and experiments in these areas, which adds credibility to the answer. While both responses were accurate and relevant, Assistant 2's answer was more comprehensive and detailed, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "cxAbCMLkgJvSekf92WoncW", "question_id": 55, "answer1_id": "mUL5UPj3qDGaCriEjL2U3B", "answer2_id": "8R5n2uZ4oq52QkWQsSS6No", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a relevant and accurate response, but it was quite brief and lacked specific details about the potential impact of the Beatles not forming as a band. Assistant 2, on the other hand, provided a more detailed and comprehensive answer, listing several possible scenarios and discussing the potential consequences of the Beatles not existing. This made Assistant 2's response more helpful and informative, earning it a higher score.", "score": [7.0, 9.0]}
+{"review_id": "7zrfwesi9MMLguwfypMSde", "question_id": 56, "answer1_id": "dVdwUoVrAQJDuWxiodykiw", "answer2_id": "ECtu3QVXVrNhssSyktKzkq", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. Assistant 1 gave a brief response, mentioning the critical advantage gained by the Allies due to the cracking of the Enigma code, which helped them win the war. Assistant 2, however, provided a more detailed response, discussing the importance of the Enigma code in German military communications, the potential impact on key battles and campaigns, and the fact that codebreaking was just one aspect of the Allies' efforts during the war. Assistant 2's response was more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "eYpcy9Gk3nhVyvyiVakjhm", "question_id": 57, "answer1_id": "EiNn9jjfy7dga6xfCtLtF8", "answer2_id": "jb8ot3ucdF3RvzApEYKft6", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 briefly explained the importance of the Suez Canal for international trade and navigation, and how its absence would have made trade more difficult and expensive. Assistant 2, however, provided a more detailed response, discussing the consequences for international trade, the impact on the global economy, and the role of the Suez Canal in military operations and geopolitics. This additional information and context make Assistant 2's response slightly more comprehensive and informative, earning it a higher score.", "score": [8.0, 9.0]}
+{"review_id": "mFfNhvff8L9cgoTSGMeJ7t", "question_id": 58, "answer1_id": "eqG9f2R9hXVyZrZMpcqAYq", "answer2_id": "cAjR8aDWRKVKbxKeqRRgvW", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate information about the potential outcomes if the Maya civilization had not mysteriously collapsed. Assistant 1 focused on the continuation of the civilization's growth, including the development of cities, monuments, writing, astronomy, and trade networks. Assistant 2 provided a more detailed response, outlining five possible scenarios that could have occurred, touching on aspects such as political stability, cultural influence, and environmental impact. While both responses were helpful, Assistant 2's answer was more comprehensive and offered a wider range of potential outcomes, which is why it received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "DFDrfgAMt5TgCcXYG6A3u2", "question_id": 59, "answer1_id": "ex42CLEzDVC2TxAvMCtYQJ", "answer2_id": "7hKzFRQWWPVbr8H4wcbGdA", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a relevant and accurate response, but it was not as detailed as Assistant 2's response. Assistant 2 went into more depth about the indigenous civilizations, the consequences of Columbus's arrival, and the potential for these societies to continue thriving without European colonization. Both answers were helpful, but Assistant 2 provided a more comprehensive and informative response.", "score": [7.0, 9.0]}
+{"review_id": "NcqFgb4pCKj4XJ6EBraU4j", "question_id": 60, "answer1_id": "RfFANSwHeCm3Pqe2AJ86Dk", "answer2_id": "3vMtYzSrV5ARpYA9fGUe4H", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 gave a brief overview of the possible outcomes if van Gogh had been successful during his lifetime, such as achieving recognition, financial stability, and expanding his portfolio. Assistant 2 provided a more detailed response, listing specific outcomes and elaborating on how these outcomes could have influenced van Gogh's life and career. While both responses were informative, Assistant 2's response was more comprehensive and detailed, which is why it received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "Pu5MbGgNFys2dxrmPdgjoa", "question_id": 61, "answer1_id": "nzxdtWmgQhRkHeVAanVogZ", "answer2_id": "nvbTnFLEyPSSgPdf2n6Mqp", "reviewer_id": 2, "metadata": {}, "text": "4 6\n\nAssistant 1:\nStrengths:\n- The description of the program is clear and concise.\n\nWeaknesses:\n- The assistant did not provide any code, only a general description of the program.\n\nSuggestions for improvement:\n- Provide a complete code implementation that solves the given problem statement.\n\nAssistant 2:\nStrengths:\n- The assistant provided a code implementation.\n- The code is structured and includes comments explaining the logic.\n\nWeaknesses:\n- The code does not actually count the occurrences of a specific word in the file.\n- The code uses C-style file handling instead of C++ file handling (e.g., ifstream).\n- The code does not handle long lines properly (limited to 100 characters).\n\nSuggestions for improvement:\n- Implement the functionality to count the occurrences of a specific word in the file.\n- Use C++ file handling (e.g., ifstream) instead of C-style file handling (e.g., fopen).\n- Handle lines of any length by using std::string and getline() function.\n- Include error handling for file opening and reading.", "score": [4.0, 6.0]}
+{"review_id": "jxrw73PKwFQj72Tbt9A8Xq", "question_id": 62, "answer1_id": "kRo6z5qmeiSM8Xz6BPJHLC", "answer2_id": "RS2roGuiWf5jZfncgfHPeG", "reviewer_id": 2, "metadata": {}, "text": "4 2\n\nAssistant 1:\nStrengths:\n- The code is structured and has comments explaining the purpose of the function and its arguments.\n- The code initializes the matrix and calculates the longest common subsequence using dynamic programming.\n\nWeaknesses:\n- The code has an incorrect implementation of the dynamic programming algorithm for finding the longest common subsequence.\n- The code initializes the first row with 0 twice, which is redundant.\n- The code does not return the correct result for the given problem statement.\n\nSuggestions for improvement:\n- Correct the dynamic programming algorithm to find the longest common subsequence.\n- Remove the redundant initialization of the first row with 0.\n- Test the code with example inputs to ensure it returns the correct result.\n\nAssistant 2:\nStrengths:\n- The code is structured and has comments explaining the purpose of the function and its arguments.\n\nWeaknesses:\n- The code has an incorrect implementation of the dynamic programming algorithm for finding the longest common subsequence.\n- The code initializes the first row and column of the matrix with 1 instead of 0, which is incorrect.\n- The code does not return the correct result for the given problem statement.\n\nSuggestions for improvement:\n- Correct the dynamic programming algorithm to find the longest common subsequence.\n- Initialize the first row and column of the matrix with 0 instead of 1.\n- Test the code with example inputs to ensure it returns the correct result.", "score": [4.0, 2.0]}
+{"review_id": "454XdWFuqK3wcKK2rZL2NA", "question_id": 63, "answer1_id": "7kj8odEE79UBdR4pT6u6TY", "answer2_id": "Rz8sSwJzffBjUhjEYAyEWS", "reviewer_id": 2, "metadata": {}, "text": "6 8\n\nAssistant 1:\nStrengths:\n- Provided a regular expression for email validation.\n\nWeaknesses:\n- Did not provide a complete Python implementation.\n- No comments or explanation of the regular expression components.\n\nSuggestions for improvement:\n- Provide a complete Python implementation using the provided regular expression.\n- Add comments to explain the different components of the regular expression.\n\nAssistant 2:\nStrengths:\n- Provided a complete Python implementation for email validation.\n- Used a regular expression to validate the email address.\n- Included comments to explain the regular expression components.\n\nWeaknesses:\n- There is a typo in the variable name \"Email Addition\" which should be \"email_pattern\".\n- The regular expression provided is slightly different from Assistant 1's and does not account for some valid email addresses (e.g., with a hyphen in the domain name).\n\nSuggestions for improvement:\n- Correct the variable name to \"email_pattern\".\n- Use the regular expression provided by Assistant 1 to account for a wider range of valid email addresses.\n- Consider using `re.fullmatch()` instead of `re.match()` to ensure the entire input string is an email address.", "score": [6.0, 8.0]}
+{"review_id": "LhkkQ4ZJEyJ5Pe88MDgZrr", "question_id": 64, "answer1_id": "UULZ4GLz9j9ot7tN7x3ySD", "answer2_id": "SQCnPwtYgHH57ikbXgUpQq", "reviewer_id": 2, "metadata": {}, "text": "4 9\n\nAssistant 1:\nStrengths:\n- The explanation of the program is clear and easy to understand.\n\nWeaknesses:\n- The code provided is not in any programming language and is more like pseudocode.\n- The logic provided does not actually solve the problem of finding the nth Fibonacci number using dynamic programming.\n- No comments or explanations are provided within the code itself.\n\nSuggestions for improvement:\n- Provide a working code example in a specific programming language.\n- Use proper dynamic programming techniques, such as memoization or tabulation, to solve the problem.\n- Include comments within the code to explain the logic and functionality.\n\nAssistant 2:\nStrengths:\n- The code provided is in Python and is a working solution to the problem.\n- The code uses memoization, a dynamic programming technique, to efficiently solve the problem.\n- The explanation of the code is clear and concise.\n\nWeaknesses:\n- The code has a minor issue with the nested function having the same name as the outer function, which can be confusing and should be avoided.\n- The code could benefit from more comments to explain the logic and functionality.\n\nSuggestions for improvement:\n- Rename the inner function to avoid confusion with the outer function.\n- Add more comments within the code to explain the logic and functionality.", "score": [4.0, 9.0]}
+{"review_id": "gEGhWcVHk99wpcXJqpdtQS", "question_id": 65, "answer1_id": "Rh8P3bxSJx4p6nCZkSZBrh", "answer2_id": "mfGSLxAAjjBmUFtCx3STEN", "reviewer_id": 2, "metadata": {}, "text": "7 10\n\nAssistant 1 provided a good explanation of how a binary search algorithm works but did not provide any code implementation. The explanation was clear and concise, but it would have been more helpful to include an example implementation.\n\nAssistant 2 provided a clear and concise explanation of the binary search algorithm, along with a step-by-step implementation guide. The Python code provided is accurate, efficient, and easy to understand. The example usage demonstrates how the function works and helps the reader understand the algorithm's functionality. Assistant 2's submission is excellent and adheres to proper coding standards and best practices.\n\nFor Assistant 1, I would recommend providing a code implementation to support the explanation of the algorithm. This would make the submission more complete and helpful for the user.\n\nFor Assistant 2, the submission is already excellent. One minor suggestion would be to add a few comments within the code to explain the logic and functionality of each section. This would make the code even more accessible to readers who may be less familiar with programming concepts.", "score": [7.0, 10.0]}
+{"review_id": "oMsic9yeBr7wV3fu7VVV9J", "question_id": 66, "answer1_id": "R3aYUUtY46ht4ToU8US96x", "answer2_id": "CcUgvJProVma9sNKW2nKHc", "reviewer_id": 2, "metadata": {}, "text": "6 8\n\nAssistant 1 provided a general explanation of how to implement a queue using two stacks but did not provide any actual code. The explanation was clear, but it would have been more helpful to include a code example.\n\nAssistant 2 provided a code implementation of a queue using two stacks. However, the code has some issues. The `in_queue` and `out_queue` variables are not used correctly, and the `dequeue` method does not transfer elements from the `in_stack` to the `out_stack`. Here's an improved version of the code:\n\n```python\nclass Queue:\n def __init__(self):\n self.in_stack = []\n self.out_stack = []\n\n def enqueue(self, element):\n self.in_stack.append(element)\n\n def dequeue(self):\n if not self.out_stack:\n while self.in_stack:\n self.out_stack.append(self.in_stack.pop())\n return self.out_stack.pop()\n\n def peek(self):\n if not self.out_stack:\n while self.in_stack:\n self.out_stack.append(self.in_stack.pop())\n return self.out_stack[-1]\n\n def is_empty(self):\n return not self.in_stack and not self.out_stack\n```\n\nAssistant 2's code structure and readability were good, but the code logic had some issues. The improved version above fixes these issues and provides a working implementation of a queue using two stacks.", "score": [6.0, 8.0]}
+{"review_id": "8hNsNrHWE3dJVP3Z8LkSBW", "question_id": 67, "answer1_id": "MQJFbH3KoFaSMmPgmK9LPa", "answer2_id": "a4NeYaavwpgqC46erF3VFP", "reviewer_id": 2, "metadata": {}, "text": "6 7\n\nAssistant 1:\nStrengths:\n- The answer provides a general idea of how to solve the problem using nested loops.\n\nWeaknesses:\n- The answer does not provide any code implementation.\n- The \"found\" flag concept is not well-explained and might not be necessary for this problem.\n\nSuggestions for improvement:\n- Provide a code implementation to better illustrate the solution.\n- Instead of using a \"found\" flag, consider printing the common elements directly when they are found.\n\nAssistant 2:\nStrengths:\n- The answer provides a code implementation in C++.\n- The code is structured and includes comments to explain the logic.\n\nWeaknesses:\n- The code is unnecessarily complex and does not follow the problem statement's requirement of not using extra data structures.\n- The code initializes two empty arrays (common and second) which are not needed.\n- The code uses the min() function from the cmath library, which is not necessary for this problem.\n\nSuggestions for improvement:\n- Simplify the code by removing the unnecessary arrays and using nested loops to directly compare the elements of the two input arrays.\n- Remove the use of the min() function and instead use the actual sizes of the input arrays in the loop conditions.\n- Ensure the code adheres to the problem statement's requirement of not using extra data structures.", "score": [6.0, 7.0]}
+{"review_id": "B36Zdm2uDBdL3snLuEmHGM", "question_id": 68, "answer1_id": "dBWqryuzG6BHRdPgrWWpbH", "answer2_id": "bpcHYkjjXamp998ARdtfWK", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\nf(x) = 5x^3 - 2x + 3\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 4 + 3\nf(2) = 39\n\nNow, I will examine the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe value of f(2) is 1.\n\nAssistant 1's answer is incorrect.\n\nAssistant 2:\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(4) - 2 + 3\nf(2) = 20 - 2 + 3\nf(2) = 19\n\nAssistant 2's answer is also incorrect. The correct calculation should be:\n\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 4 + 3\nf(2) = 39\n\nScores:\nAssistant 1: 1 (Incorrect answer and no problem-solving steps)\nAssistant 2: 6 (Incorrect answer, but the problem-solving steps were present, just with a small calculation mistake)\n\nIn this case, Assistant 2 performed better as they provided problem-solving steps, but both assistants provided incorrect answers.", "score": [1, 6]}
+{"review_id": "3Yt7eP3RQAAifZf8dk7W35", "question_id": 69, "answer1_id": "W74byrCbE7XiJWjcPTZS7x", "answer2_id": "SququGaG2Y8euqGVwV2Ueb", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\n3x + 10 = 5(x - 2)\n3x + 10 = 5x - 10\n-2x = -20\nx = 10\n\nNow, I will examine the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nx = 6.\nAssistant 1's answer is incorrect.\n\nAssistant 2:\n3x + 10 = 5(x - 2)\n3x + 10 = 5x - 10\n-2x = -20\nx = 10\nAssistant 2's answer is correct.\n\nBased on the correctness of their answers and problem-solving steps, I would give Assistant 1 a score of 1 and Assistant 2 a score of 10. Assistant 2 performed better because they provided the correct answer and showed the correct problem-solving steps. Assistant 1 only provided an incorrect answer without any explanation or steps.\n\nOutput: (1, 10)", "score": [1, 10]}
+{"review_id": "BzResN4bYKbRi2gCiwJXdw", "question_id": 70, "answer1_id": "7WC5quGKdv8hADCxWaWZ57", "answer2_id": "9Zb7Fenxop3RQFUrxWr5Hn", "reviewer_id": 3, "metadata": {}, "text": "First, let's solve the problem independently:\n\nThe endpoints of the line segment are (2, -2) and (10, 4). We can use the distance formula to find the length of the segment:\n\ndistance = \u221a((x2 - x1)^2 + (y2 - y1)^2)\n\nPlugging in the coordinates:\n\ndistance = \u221a((10 - 2)^2 + (4 - (-2))^2)\ndistance = \u221a((8)^2 + (6)^2)\ndistance = \u221a(64 + 36)\ndistance = \u221a100\ndistance = 10\n\nNow, let's evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe answer provided by Assistant 1 is incorrect. The length of the line segment is not 8 units. There is no explanation or problem-solving process provided.\n\nAssistant 2:\nAssistant 2's answer is also incorrect. The calculations are wrong, and the Pythagorean theorem is not applied correctly. The correct calculation should be:\n\nhypotenuse^2 = 8^2 + 6^2\nhypotenuse = \u221a(8^2 + 6^2)\nhypotenuse \u2248 \u221a(64 + 36)\nhypotenuse \u2248 \u221a100\nhypotenuse \u2248 10\n\nThus, the length of the line segment is 10.\n\nScores:\nAssistant 1: 2 (Incorrect answer, no explanation)\nAssistant 2: 4 (Incorrect answer, but attempted problem-solving process)\n\nThe better-performing assistant is Assistant 2, as they attempted to solve the problem using the Pythagorean theorem, but their calculations were incorrect. Assistant 1 provided an incorrect answer without any explanation or problem-solving process.\n\nOutput: (2, 4)", "score": [2, 4]}
+{"review_id": "YuZavxr7fR5u3ZSDyEgem9", "question_id": 71, "answer1_id": "AZER7D3RKZ9F9SXHiMjdqM", "answer2_id": "HkYsYvcMNHvt5suAZJpFWw", "reviewer_id": 1, "metadata": {}, "text": "9 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's request. Assistant 1 provided a concise and clear template for a joint venture proposal email, while Assistant 2 provided a more detailed and comprehensive example. Assistant 2's response included more context, benefits, and proposed next steps, which made it slightly more informative and useful than Assistant 1's response. However, both responses were of high quality and would be helpful for the user.", "score": [9.0, 10.0]}
+{"review_id": "aghG5RfvDTmQoCvDyEnnVn", "question_id": 72, "answer1_id": "MSrdDafr77UvSHCnsPMSP3", "answer2_id": "F45reLGER4jrAjZZi5r58x", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth assistants provided helpful and relevant resignation letter templates. Assistant 1 provided a concise and straightforward template, while Assistant 2 provided a more detailed and comprehensive letter. Assistant 2's response included the sender's and employer's addresses, a more personalized message, and an offer to help with the transition process, which makes it more complete and professional. Therefore, Assistant 2 receives a higher score.", "score": [8.0, 10.0]}
+{"review_id": "kPAqDmUdvaXPyAWX3wZktB", "question_id": 73, "answer1_id": "hxkjUkDkXhGP78Vo74B4WE", "answer2_id": "ZLx4JuCtwNcv55nPbyXQvB", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a helpful and relevant response, but it was not as detailed as Assistant 2's response. Assistant 1 gave a general outline of how to structure a letter of recommendation, which is useful but not as precise as the user might have hoped. On the other hand, Assistant 2 provided a complete and well-structured example of a formal letter of recommendation, including specific details and sections that the user can easily follow and adapt for their own purposes. This makes Assistant 2's response more helpful, accurate, and detailed, deserving a higher score.", "score": [8.0, 10.0]}
+{"review_id": "abEp6Vbc5ShgxLuwqmXEVd", "question_id": 74, "answer1_id": "hv9jpG9PxeJ9AnFdNzH3Jv", "answer2_id": "XPgHiaRwa5ykfKMZG7GQ6b", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. However, Assistant 2's response was slightly more detailed and comprehensive. Assistant 2 included a list of key features and benefits, which added value to the email and made it more compelling. Additionally, Assistant 2 mentioned special introductory pricing, which could entice customers to try the new software solution. While Assistant 1's response was also well-written and informative, it lacked the specific details and promotional aspect that made Assistant 2's response stand out.", "score": [8.0, 9.0]}
+{"review_id": "T38Hsx5n9dmbAqgF6wZjNm", "question_id": 75, "answer1_id": "X5BVHT5pFpWxB88SzERrEG", "answer2_id": "MEWrWkY8Y5CSBTCNkfo2nx", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a brief and concise apology, but it lacked the personal touch and details that Assistant 2 included. Assistant 2's response was more comprehensive, addressing the customer's frustration, providing reassurance, and offering assistance for any further concerns. This makes Assistant 2's response more helpful, relevant, and detailed, earning it a higher score.", "score": [8.0, 10.0]}
+{"review_id": "986nVDpQjDpVm23ph32FrQ", "question_id": 76, "answer1_id": "Af2SufKwvDBriNXnZX9UoP", "answer2_id": "f8Jmezvy4SwqgSVeWQqTzk", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a brief overview of the topic, touching on the roots of jazz, its impact on popular culture, and its legacy. However, it lacked the details and structure of a script for a YouTube video. Assistant 2, on the other hand, provided a well-structured and detailed script that covered the history, cultural significance, and impact of jazz music. It also included visual cues and suggestions for footage, making it more suitable for a YouTube video format.", "score": [8.0, 10.0]}
+{"review_id": "RMyHPmqupL8HtPXbX6cGDx", "question_id": 77, "answer1_id": "HVnqVdSGRwQfqSsiYxtTTn", "answer2_id": "bZVCu7vDL4GGm5P8zN3xou", "reviewer_id": 1, "metadata": {}, "text": "6 9\nAssistant 1 provided a brief overview of their travel blog post, but did not actually include any content or details about the experiences or attractions. Assistant 2, on the other hand, composed an engaging and detailed travel blog post about a recent trip to Hawaii, highlighting cultural experiences such as attending a Hawaiian Luau and visiting the Bishop Museum, as well as must-see attractions like the Napali Coast and the Road to Hana. The response from Assistant 2 was more helpful, relevant, accurate, and detailed, which is why it received a higher score.", "score": [6.0, 9.0]}
+{"review_id": "PikbspXF7Luwe2Fevb3mSh", "question_id": 78, "answer1_id": "8hcmmjzyMyhFJ8ZwqJJfvw", "answer2_id": "gYkH3Pit5J9gnJyXJtoHUG", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth assistants provided captivating movie reviews for recently released science fiction films. Assistant 1's review of \"The Future is Blue\" was engaging and touched on the plot, characters, and special effects. However, Assistant 2's review of \"Beyond the Stars\" was slightly more detailed and provided a more comprehensive overview of the film's elements, including the plot, special effects, and character development. Additionally, Assistant 2's review had a more immersive and captivating tone, making it more appealing to the reader.", "score": [8.0, 9.0]}
+{"review_id": "BrRDxvLVc6irWyLUkzofxQ", "question_id": 79, "answer1_id": "CU6gGfJqQmwZNxEG2n2Uda", "answer2_id": "dX4Pt6xQpstrrnZgRLSVnN", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and helpful responses to the question. Assistant 1 gave a general overview of the podcast structure, touching on the impact of streaming services on music consumption, musicians' income, and music discovery. Assistant 2, however, provided a more detailed and structured response, breaking the podcast down into an intro, three segments, and an outro. This made Assistant 2's response more precise and easier to follow, which is why it received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "NGypZbHD34h87Bgx5MbRYj", "question_id": 80, "answer1_id": "5SWbCoLV6NRrq5RtWQK4jp", "answer2_id": "jseJYZmd8fBnzbx2EN9oq9", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a good response that captured the essence of a symphony concert review, but it lacked specific details about the pieces performed and the orchestra itself. The response was more of a general impression rather than a detailed review. On the other hand, Assistant 2 provided a more comprehensive and detailed review, mentioning specific pieces, composers, and the orchestra's performance of those pieces. Additionally, Assistant 2 discussed the conductor's insights and anecdotes, which added depth to the review. Overall, Assistant 2's response was more informative and engaging, earning a higher score.", "score": [8.0, 10.0]}
diff --git a/model/fastchat/eval/table/review/vicuna-13b_20230322-new-hp-fp16/review_bard_vicuna-13b-20230322-new-hp-fp16.jsonl b/model/fastchat/eval/table/review/vicuna-13b_20230322-new-hp-fp16/review_bard_vicuna-13b-20230322-new-hp-fp16.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..b354370031c26dda21f0ba12d2e0e3d5fdcacc92
--- /dev/null
+++ b/model/fastchat/eval/table/review/vicuna-13b_20230322-new-hp-fp16/review_bard_vicuna-13b-20230322-new-hp-fp16.jsonl
@@ -0,0 +1,80 @@
+{"review_id": "F5j7QTB3NnT3fFUKFYrqWL", "question_id": 1, "answer1_id": "3oW4JY265ZPJGTYi2CgRYF", "answer2_id": "FNAYdXQwcrNkTD4nCqdFGb", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information on improving time management skills. Assistant 1's response was slightly more detailed, including additional tips such as automating tasks and getting organized. Assistant 2's response was also helpful and provided a clear list of tips, but it lacked the extra details that made Assistant 1's response more comprehensive. Overall, both assistants performed well, but Assistant 1's response was slightly more detailed and helpful.", "score": [9.0, 8.5]}
+{"review_id": "DpUkg3soYPvKSoaL9dt6wk", "question_id": 2, "answer1_id": "CtDuDqypbL958TQork7v54", "answer2_id": "7SzYV3TjzyrVkSKKGaXN4c", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about dealing with stress. They both covered similar key points, such as exercise, relaxation techniques, social support, sleep, and a healthy diet. Both answers also emphasized the importance of finding what works best for the individual and making stress management a regular part of one's routine. The level of detail in both responses is sufficient to provide a clear understanding of the various ways to manage stress. It is difficult to differentiate the overall performance of the two assistants, as they both provided comprehensive and valuable information. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "8Zdv6Shd9Y3gVz6ozXSsdX", "question_id": 3, "answer1_id": "jqb5c6eBeyVTFo7SsqxqHA", "answer2_id": "cA7LoXTBCgegnTcKWpjxQq", "reviewer_id": 1, "metadata": {}, "text": "9 7\nAssistant 1 provided a more accurate and detailed response, correctly identifying the differences between Python and JavaScript, such as typing, standard libraries, and their primary applications. Assistant 2 made some incorrect statements, such as claiming that Python is a procedural language and JavaScript is a compiled language. Assistant 1's response was more helpful and relevant, while Assistant 2's response contained some inaccuracies and less detail.", "score": [9.0, 7.0]}
+{"review_id": "iH9xsm7x6acxBLhVtivnVZ", "question_id": 4, "answer1_id": "P5rC8W6gTuD4aY6HR5HQj9", "answer2_id": "8Yo5UVMPzoxjCsi3fuHNmH", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed responses to the question about increasing productivity while working from home. Both assistants offered similar tips, such as setting up a dedicated workspace, establishing a schedule, eliminating distractions, taking breaks, and communicating with the team. They also emphasized the importance of self-care, including exercise, a healthy diet, and mental health. Both responses were well-organized and easy to understand, making it difficult to differentiate between the two in terms of quality. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "eBKti8rpMKJL5bbsjniumM", "question_id": 5, "answer1_id": "3uaqwfbwxtyDdqB8UVN3jM", "answer2_id": "cCc7EgnQNgcVAVqk96Pyto", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate explanations of the basics of quantum computing. Assistant 1's response was slightly more detailed, covering quantum gates and quantum algorithms, while Assistant 2's response was more concise. Both responses mentioned superposition, entanglement, qubits, and the potential applications of quantum computing. Assistant 1's response was more structured and provided a clearer breakdown of the key concepts, which is why it receives a slightly higher score.", "score": [9.0, 8.5]}
+{"review_id": "AbvyyPz6nz5vXAYeum7RVM", "question_id": 6, "answer1_id": "HTRJ5q97DrgzoSofxkgedz", "answer2_id": "TUL5M2TWXQmM56U4Ckk4s4", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information in their responses. Assistant 1 provided a more detailed response, discussing the nutritional differences, benefits of each type of protein, and considerations for choosing between plant-based and animal-based protein sources. Assistant 2 also provided a good response, but it was slightly less detailed and focused more on the broader differences between the two types of protein sources. Both responses were informative and useful, but Assistant 1's response was more comprehensive and detailed, which is why it received a higher score.", "score": [9.0, 8.0]}
+{"review_id": "2zMP5KGEcnWxhSy8MowEXF", "question_id": 7, "answer1_id": "EhhyKNc3m8c9tnxm8uPfmt", "answer2_id": "iZUdysQf69MHQE9Lq76mii", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about developing critical thinking skills. They both offered a list of practical tips and emphasized the importance of practice and self-reflection. Assistant 1's answer was slightly more concise, while Assistant 2's answer provided more elaboration on some of the tips. However, both responses were of high quality and provided a sufficient level of detail to help the user understand how to develop their critical thinking skills.", "score": [9.0, 9.0]}
+{"review_id": "WU5R7ZDYCiHjxeX4w2bbZ4", "question_id": 8, "answer1_id": "JQXPknRQd24NUPKZxi6RNf", "answer2_id": "D8EWgfq9yytMgQYybq3Rhq", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 provided a clear list of challenges faced by the education sector, with brief explanations for each point. However, Assistant 2 provided a more comprehensive list of challenges, including additional points such as access to education, inclusion of students with disabilities, employability, and globalization. Assistant 2 also used a numbered list format, which made it easier to read and understand. Overall, both assistants performed well, but Assistant 2 offered a more detailed and organized response.", "score": [8.0, 9.0]}
+{"review_id": "PFDpEPHw9wJccxJ9U2Y7E7", "question_id": 9, "answer1_id": "Lb3C2xQKdLCqFj4v3rmaof", "answer2_id": "5bzosDZ96t2p5qxeS7VAeA", "reviewer_id": 1, "metadata": {}, "text": "9 6.5\nAssistant 1 provided a more comprehensive and well-organized answer, covering personal, psychological, social, cultural, and economic factors that influence consumer behavior. The answer also included examples for each factor, making it easier to understand. Assistant 2's answer was less detailed and missed the cultural factors. Additionally, Assistant 2's response ended abruptly, making it seem incomplete.", "score": [9.0, 6.5]}
+{"review_id": "nDkiK6rYXKGZAqFV555ToE", "question_id": 10, "answer1_id": "DhuZJtL3jhnrsTBvDct9oV", "answer2_id": "itBobRwhSZUnsWkUs5bVwX", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about effective strategies for conflict resolution in the workplace. Both answers included similar strategies, such as active listening, open communication, problem-solving, and mediation. Both assistants also provided a sufficient level of detail in their responses. The only minor difference is that Assistant 2 mentioned conflict resolution training, establishing ground rules, and documenting everything, which are also valuable strategies. However, both answers are comprehensive and informative, and therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "fkW29wJsEAiPCEq5sYtqUv", "question_id": 11, "answer1_id": "mDSj4BKim2eANUnEjW7xBm", "answer2_id": "iVTKdmUeTySryqEHhtgmkS", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information regarding the implications of using single-use plastic bottles versus reusable bottles. Assistant 1's response was more concise and focused on the main environmental and health impacts, while Assistant 2 provided additional details on the production process, water quality, and cost implications. Both responses were informative, but Assistant 1's response was slightly more precise and well-organized, which is why it receives a slightly higher score.", "score": [9.0, 8.5]}
+{"review_id": "jhCtUtTMTCoEBc5UW3RH8a", "question_id": 12, "answer1_id": "MnkceSK7WwyXqAhbuKVYX7", "answer2_id": "7UgtosTTGx7bnRcQGxdDJo", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both covered important factors to consider when designing an inclusive and accessible public transportation system. Assistant 1 focused on accessibility, affordability, convenience, safety, and sustainability, while Assistant 2 discussed universal accessibility, diversity of needs, multi-modal options, frequency and reliability, safety, information and communication, user-friendly fare collection, integration with other modes, community engagement, and inclusive pricing. Both answers provided a good level of detail, and it is difficult to determine which one is better, as they both covered different aspects of the topic. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "L33DhQWsPUKKeeQJjPjJvD", "question_id": 13, "answer1_id": "EsyaBVpTN8BGbTSiFMnZUF", "answer2_id": "Yt5tqBrrKfcumuuuxf2qqM", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was more precise and well-structured, with a clear distinction between fiscal and monetary policies and their respective roles in combating economic recessions. Assistant 1 also touched upon the debate between the use of fiscal and monetary policies, which added depth to the answer. Assistant 2's response was also informative, but it lacked the same level of clarity and structure as Assistant 1's response. Additionally, Assistant 2's answer repeated some points, which made it slightly less concise. Overall, both responses were helpful, but Assistant 1's answer was more precise and well-organized.", "score": [9.0, 8.0]}
+{"review_id": "WsWV9h75BoGVH3JUNggnTb", "question_id": 14, "answer1_id": "dX8M752A6tzqLg9KhwgG5p", "answer2_id": "4pZ4Uy544Bc3K59fhbW7xj", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was slightly more detailed, providing specific examples of how language and cultural barriers can affect communication and relationships, as well as offering suggestions for overcoming these barriers. Assistant 2's response was also informative, but it did not provide as many specific examples or suggestions for overcoming barriers. Both answers were well-structured and easy to understand, but Assistant 1's response was more comprehensive, which is why it received a higher score.", "score": [9.0, 8.0]}
+{"review_id": "YQGFhzrEateyDRwrnCuo8b", "question_id": 15, "answer1_id": "dzwhq5XbaEBVpargyczz9B", "answer2_id": "762peC97upw58WFQeLNoXZ", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 provided a slightly more detailed response, covering four specific areas where AI can be used to improve healthcare delivery: diagnosing diseases, treating diseases, monitoring patients, and providing administrative support. Assistant 2 also provided a good response, discussing AI's potential in assisting with diagnosis and treatment planning, predictive analytics, streamlining administrative tasks, and personalizing patient care. However, Assistant 1's response was more structured and covered a broader range of applications, which is why it receives a slightly higher score.", "score": [9.0, 8.5]}
+{"review_id": "EXaoCgDZmCfncAcMKmoarE", "question_id": 16, "answer1_id": "8zqxUtHxgtoHBkbf2bkqNW", "answer2_id": "Yqfg2saKSNPauCS8YdsjdD", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth assistants provided helpful, relevant, and accurate information about the CRISPR-Cas9 gene editing technology, its potential applications, and ethical implications. Assistant 2, however, provided a slightly more detailed explanation of the process, including the introduction of the CRISPR-Cas9 system into the cell and the role of the repair template. This additional information gives Assistant 2 a slight edge in terms of the level of detail provided, resulting in a higher score.", "score": [8.0, 9.0]}
+{"review_id": "CsJEfSeUE7vZKgecBHRpxZ", "question_id": 17, "answer1_id": "WJc37t4n5PqmKKS3V4eMG2", "answer2_id": "gKd2BPWp7HG9PTDdgS7HT8", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed responses to the question. They both explained how vaccinations work to protect individuals and communities from infectious diseases and provided a clear explanation of herd immunity. Both responses covered the importance of herd immunity in protecting those who cannot be vaccinated or have a weaker immune response. The quality of the answers is very similar, and both assistants deserve a high score of 9.", "score": [9.0, 9.0]}
+{"review_id": "2Uwz48bkBCuLtJH8wqHqXC", "question_id": 18, "answer1_id": "CvVLf8FgoHywJy8j8JJ4qL", "answer2_id": "CyZSkdkGWmjh8ZgxrQ2f66", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information in their responses. Assistant 1 provided a more detailed explanation of how social media platforms use algorithms to determine what news stories are shown to users, which can contribute to the spread of misinformation. However, Assistant 2's response was more concise and focused on the broader implications of social media's influence on news consumption and the spread of misinformation. Both assistants mentioned measures to combat misinformation, but Assistant 2's response was slightly more comprehensive in discussing the ongoing debate about the effectiveness of these measures. Overall, both responses were informative, but Assistant 2's response was slightly more well-rounded and concise.", "score": [8.0, 9.0]}
+{"review_id": "BMrbx2HZz2SHFN4Q7yifRs", "question_id": 19, "answer1_id": "P5rytR6vTJjxgWxRoxT3vX", "answer2_id": "LwA42YWwqvdkStLZEUbmm9", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information in response to the question. Assistant 1 gave a clear explanation of how cultural, social, and economic factors influence food choices and provided examples for each factor. They also suggested ways to promote healthier diets, such as making healthy foods more accessible and affordable, and educating people about the importance of eating a healthy diet. Assistant 2, however, provided a more comprehensive response by discussing the importance of understanding these factors and how they can be leveraged to encourage healthier food choices. They also mentioned the need for a holistic approach that considers the broader social and economic context. Assistant 2's response was slightly more detailed and well-rounded, which is why they received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "2H8xHUPdT4rzKGtLmvnLqD", "question_id": 20, "answer1_id": "5biCd7QRZP6rquaz8eC9Vm", "answer2_id": "ajfFPpHDdMiSp3PVfsUFbE", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed responses to the question about natural selection and its contribution to the evolution and adaptation of species. Both assistants explained the principles of natural selection, including variation, reproduction, survival, and heredity. They also provided examples to illustrate how natural selection can lead to the evolution of new species and the adaptation of species to their environment. The quality of the answers is very similar, and both assistants deserve a high score of 9.", "score": [9.0, 9.0]}
+{"review_id": "Yef5sw6yehkq5n5PYByuZK", "question_id": 21, "answer1_id": "363RwB6kr8nV6qFNdjXZnS", "answer2_id": "3n8npKQKxgHEbEmf2K6AwF", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth assistants provided relevant and helpful responses to the question. Assistant 1 gave a concise and straightforward introduction, focusing on the knight's service and respecting the royal presence. Assistant 2, on the other hand, provided a more detailed and eloquent introduction, emphasizing the knight's background, values, and commitment to the kingdom. Assistant 2's response was more immersive and engaging, which is why it received a slightly higher score. However, both responses were accurate and appropriate for the context of a medieval knight's introduction at a royal banquet.", "score": [8.0, 9.0]}
+{"review_id": "ShGfXWpfadMdvPjLwjmD5o", "question_id": 22, "answer1_id": "gDnYxMu5Dd52xhMqQAJaZP", "answer2_id": "LfeqCy9vR3kCaJiQV4Fyqf", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant responses to the question. Assistant 1 focused on the crew's current state, appealing to their sense of adventure and excitement, and offering them a share of the treasure. Assistant 2, on the other hand, emphasized the rewards, power, and envy that would come with finding the treasure, as well as the dangers and challenges they would face. Assistant 2 also painted a more vivid picture of the treasure and the emotions the crew would experience. While both responses were good, Assistant 2's response was slightly more engaging and motivational, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "DX9t4EQdRZuukZ97GmLDtQ", "question_id": 23, "answer1_id": "kCV5RSrnmmTyv3HA5oU38P", "answer2_id": "JrnFfmnsuykbTkFbUnei6k", "reviewer_id": 1, "metadata": {}, "text": "9 10\nBoth Assistant 1 and Assistant 2 provided relevant and detailed responses to the user's question. Assistant 1 gave a brief introduction on how they would declare their love and provided an example of a soliloquy. Assistant 2, while mentioning that they are an AI language model and do not experience emotions, still provided a Shakespearean-style soliloquy that was more in line with the user's request. Assistant 2's response was more creative and closer to the Shakespearean language, which is why it receives a slightly higher score.", "score": [9.0, 10.0]}
+{"review_id": "EuVtDo759b9soEkH3JhTf2", "question_id": 24, "answer1_id": "CTGLYNAWrWeuCYZAR9bD2r", "answer2_id": "BDBSC5P2DE3E5LuB9FX7tn", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both explained their origin stories in a way that a curious child would understand. Assistant 1 focused more on personal experiences and inspiration from other superheroes, while Assistant 2 emphasized the discovery and development of their powers. Assistant 2's answer was slightly more detailed and engaging, which is why it received a higher score. Both answers, however, were well-crafted and appropriate for the target audience.", "score": [8.0, 9.0]}
+{"review_id": "YX2PyWLwGNdjFRYDvhd5mV", "question_id": 25, "answer1_id": "Eh5qCENdftYf9JKrBspYna", "answer2_id": "TS97KLnyki7XWYFdSdbZiY", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information in response to the user's question. Assistant 1 provided a detailed list of six technological advancements, while Assistant 2 provided a list of nine advancements. Both assistants covered some common topics, such as AI, virtual reality, and space travel. However, Assistant 2's response was more comprehensive, covering additional topics like transportation, medicine, and climate change. This made Assistant 2's response slightly more informative and detailed, earning it a higher score.", "score": [8.0, 9.0]}
+{"review_id": "oHNtMRsinSzXESViTh2Rzu", "question_id": 26, "answer1_id": "M6hCe8Z4JDpt75HZ5PsSWr", "answer2_id": "9ATk4tAxBoe267rQUDrgYD", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate descriptions of a winning play in the final seconds of a championship game. Assistant 1 gave a more detailed and precise description of the play, including the specific yard line and the quarterback's actions. However, Assistant 2 provided a more engaging and emotional description, capturing the excitement and energy of a sports commentator during a championship game. While both responses were helpful, Assistant 2's response was more in line with the role of a sports commentator, which is why it received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "DoHfYEtf4HM7RkyEqqbzcb", "question_id": 27, "answer1_id": "QCDsmbALfut5758pyiKvc5", "answer2_id": "C4zL3pUsGj2Z8U7Tm2nzDN", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided highly detailed and relevant answers to the question. They both described their signature dishes with great precision, highlighting the ingredients, cooking techniques, and flavors. Both assistants also expressed their passion for their dishes and their confidence in impressing the judges. It is difficult to differentiate between the two responses in terms of quality, as both are equally well-crafted and informative. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "PVFvHB8aqbsQeZbyrN5wbh", "question_id": 28, "answer1_id": "NWUbhwZQCuXsuQimrjQRza", "answer2_id": "CTi8ZRuHoAzRQjMRAU3mBN", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and detailed responses to the user's question. Assistant 1 did a great job of describing the emotions and the view from the top of Mount Everest, using vivid language and imagery to convey the experience. However, Assistant 2's response was slightly more helpful and accurate, as it acknowledged the AI's inability to experience emotions and provided a more objective description of the emotions and the view from the summit. Additionally, Assistant 2 mentioned other nearby peaks, which added to the level of detail in the response.", "score": [8.0, 9.0]}
+{"review_id": "ZADELxBKx4Jr7fzd4reV8D", "question_id": 29, "answer1_id": "VYwSjZrSLW9ZSvqryyjEaB", "answer2_id": "8chhW8gPzM6zHUN6VAqpym", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 gave a more personal account of daily life on Mars, detailing the daily routine and challenges faced. Assistant 2, on the other hand, provided a more general overview of life on Mars, focusing on the challenges and the need for resourcefulness and teamwork. Both responses included similar challenges, such as the harsh environment, radiation exposure, and isolation. However, Assistant 2's response was slightly more detailed and comprehensive, which is why it receives a higher score.", "score": [8.0, 9.0]}
+{"review_id": "jKb9Z2tEtFQ7GvHrx2Y9Nj", "question_id": 30, "answer1_id": "FA7PXuUbEVGKHaWpxaimy8", "answer2_id": "Pxj7ZVsvQ9HMQnRVMh7os4", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and detailed responses to the user's question. Assistant 1 focused on the character's self-sufficiency, garden, and chickens, while also mentioning two allies, Sarah and John. Assistant 2, on the other hand, provided a more immersive response by adopting the first-person perspective and describing the character's life in a fallout shelter. Assistant 2 also mentioned two allies, Maria and Jack, and touched upon the challenges of trust and forming a makeshift society. Both responses were helpful and accurate, but Assistant 2's response was slightly more engaging and detailed, earning it a higher score.", "score": [8.0, 9.0]}
+{"review_id": "gzyfNwK9T9ad5GcFSrwH4B", "question_id": 31, "answer1_id": "j5EV5cZNsn9DcF6WsvXRzS", "answer2_id": "NCUhSMqzESRJJVDKHQ7XPB", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information in response to the user's question. Assistant 1 gave a slightly more detailed answer, listing more ways to determine if a restaurant is popular among locals or tourists, and providing a clear explanation of why this information might be useful. Assistant 2 also provided useful indicators and reasons for why this information is helpful, but the response was slightly less detailed and organized compared to Assistant 1. Overall, both assistants performed well, but Assistant 1 had a slight edge in terms of detail and clarity.", "score": [9.0, 8.5]}
+{"review_id": "9jNHLajJfRcr7NVmkZLiuP", "question_id": 32, "answer1_id": "2eAYCYmwTkPa3ejQDv8LyB", "answer2_id": "mEvCEzVGnENJAMzs2ioTmp", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 gave a clear and concise list of behaviors that could indicate someone is pretending to understand a topic, while Assistant 2 provided a more detailed and expanded list of clues. Assistant 2's answer also included a note about the possibility that these behaviors might not always indicate pretense, which adds nuance to the response. Overall, both assistants performed well, but Assistant 2's answer was slightly more comprehensive and nuanced, earning it a higher score.", "score": [8.0, 9.0]}
+{"review_id": "cnMzbR7MUCcScvn2AU5TbX", "question_id": 33, "answer1_id": "d562WYnhsvgJ8J6Ubitmvw", "answer2_id": "CoFnjLgW5Eens9JhnpiJHZ", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1's response was slightly more detailed, providing specific situations where using a paper map or asking for directions may be the best option, such as hiking, driving in remote areas, and traveling to a country with a different language. Assistant 2's response was also informative, but it did not provide as many specific examples. Both assistants covered the main reasons why someone might choose a paper map or ask for directions over a GPS device or smartphone app, but Assistant 1's response was more comprehensive.", "score": [9.0, 8.5]}
+{"review_id": "XoJgVHPKHhAJsWjedmvu6s", "question_id": 34, "answer1_id": "hPMvV6zL2C4qTP4mRmhJwG", "answer2_id": "aQ9mM2wfmHSge2KvuQ8hmx", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was slightly more detailed, with a clear structure and specific examples of what to look for in body language, questions, and responses. Assistant 2 also provided a good answer, but it was slightly less detailed and organized. Both assistants covered similar points, but Assistant 1's response was more concise and easier to follow, which is why it received a slightly higher score.", "score": [9.0, 8.5]}
+{"review_id": "QTADJkQ5cQFKcqHxw3cboc", "question_id": 35, "answer1_id": "npWNeKceGyqCYaRpY4w54g", "answer2_id": "eM5S86H3bXTGLQcVW59XsD", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both covered the main reasons why someone might prefer to shop at a small, locally-owned business instead of a large chain store, even if the prices are higher. The level of detail in both responses is sufficient and well-organized. Assistant 1 focused on four main points, while Assistant 2 expanded on those points and added two more reasons. Both answers are of high quality and deserve equal scores.", "score": [9.0, 9.0]}
+{"review_id": "jU7ePk5CXSkKaJQ8LNf3Nt", "question_id": 36, "answer1_id": "WVuaK9m8Sedcws27tNu7Ev", "answer2_id": "MpBrYa9J2zQy9NGi2dvKp8", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. They both offered a list of tips on how to assess the credibility of a source without relying on the reputation of the author or publisher. Assistant 1 focused on checking credentials, looking for bias, checking sources, looking for errors, and reading the source critically. Assistant 2 provided similar tips but also included checking the date, evaluating the language, assessing the source's bias, checking the URL, and looking for peer review. Both responses were detailed and informative, and they complement each other well. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "5zvcBsYefjtBTuvu8dBEcw", "question_id": 37, "answer1_id": "HLtTf83Y5QRP4TxX6nw5TC", "answer2_id": "i8QZzVJo2WkTLc7WMv4bNm", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was slightly more detailed, discussing the release of endorphins, adrenaline, and the sense of control over fear as reasons why some people enjoy being scared. Assistant 2 also mentioned adrenaline and coping with fears, but did not mention endorphins. Both assistants acknowledged that personal preference plays a role in whether someone enjoys being scared or not, and Assistant 2 mentioned the possible influence of genetics, personality traits, and life experiences. Overall, both responses were informative and well-rounded, but Assistant 1 provided a slightly more detailed answer.", "score": [9.0, 8.5]}
+{"review_id": "CbYDQX7R9ezj5kkVRPYebt", "question_id": 38, "answer1_id": "Fmdtexq6QQNuoqZkZfDURY", "answer2_id": "HP2CYZ3HJWMcGp6QF9qbq6", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on identifying patterns of behavior, reactions to violations of cultural norms, and talking to people about their culture. Assistant 2 expanded on the answer by discussing nonverbal communication, social distance, greetings, interaction patterns, and values and beliefs. Assistant 2's response was slightly more detailed and covered a broader range of aspects related to cultural norms and expectations, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "38xqQVJnhXcNeEWnJbRjS3", "question_id": 39, "answer1_id": "WxnC69jTMkyJvcqvMCgCwY", "answer2_id": "XrMYXEWHff6jXjjJWquXfW", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nAssistant 1 provided a more detailed response, listing potential benefits and costs of space exploration, as well as mentioning the ethical implications. The response was well-structured and covered various aspects of the question. Assistant 2 also provided a relevant response, but it was less detailed and didn't cover as many aspects as Assistant 1. Both assistants acknowledged that the decision is a matter of personal opinion and priorities, but Assistant 1's response was more comprehensive and informative.", "score": [8.0, 7.5]}
+{"review_id": "NGKHZmtHH4fjrhR9uH3mTi", "question_id": 40, "answer1_id": "npZdTFPRqZfoqzt5YurYEL", "answer2_id": "eeS6qNLeX6AScyGrS5pwrc", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 emphasized the importance of prioritizing both job creation and technological progress, providing suggestions on how to mitigate the negative effects of automation on employment. Assistant 2, on the other hand, focused on striking a balance between job creation and technological progress, discussing the importance of both aspects and suggesting policies to achieve this balance. Assistant 2's response was slightly more detailed and structured, which is why it received a higher score. Both assistants, however, provided valuable insights into the topic.", "score": [8.0, 9.0]}
+{"review_id": "3L3VecPaViNhLwkop4tc96", "question_id": 41, "answer1_id": "iy9aa5sqFeNA2uPQZLpxEz", "answer2_id": "an76qPuSJ4TQDWvNXdcJ2w", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided a detailed breakdown of the number of blinks per minute, hour, day, and year, and even estimated the total number of blinks in a lifetime. However, the answer did not take into account the variability of factors such as age, health, and lifestyle. Assistant 2, on the other hand, acknowledged the difficulty in determining an exact number of blinks per lifetime due to these factors and provided a more realistic range of blinks per minute. Assistant 2 also mentioned the influence of screen time on blinking frequency, which is a relevant factor in today's world. Both assistants explained the purpose of blinking, but Assistant 2's response was more comprehensive and took into account the variability of factors affecting blinking frequency.", "score": [7.0, 8.0]}
+{"review_id": "HqGwzhX9hBR6UEF3P38qBV", "question_id": 42, "answer1_id": "XAALo4GKWE3QNb7wbkWVNk", "answer2_id": "DYnmt5xTTTkLuyyBxhmDsq", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided a step-by-step explanation but made a mistake in calculating the number of atoms in a grain of salt. The mass of one grain of salt was not calculated, and the explanation was not clear. Assistant 2 provided a more detailed and accurate response, considering the size and mass of a grain of salt and calculating the number of atoms accordingly. Both assistants provided relevant information, but Assistant 2's response was more helpful and precise.", "score": [7.0, 8.0]}
+{"review_id": "Xuze3Gcb5D4NofeEUNCmBs", "question_id": 43, "answer1_id": "XRWjLnJNyGNMfktTvWpbRV", "answer2_id": "MbcVvnW3Eh9wZi34SzYawn", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided a straightforward answer of 8.6 million lightning strikes per day, but the explanation was not as detailed as it could have been. The response did not explain how the 100 lightning strikes per second were calculated or how it relates to the daily number. The answer also focused on the dangers of lightning strikes, which was not directly related to the question.\n\nAssistant 2, on the other hand, provided a more detailed explanation of the factors influencing lightning strikes and the difficulty in estimating the exact number. The response included an estimate of 19.7 trillion lightning strikes per year, which could have been divided by 365 to give a daily estimate. However, the response did not provide a specific daily number, which was the main focus of the question. Despite this, the explanation was more thorough and took the reader through the reasoning process more effectively.", "score": [7.0, 8.0]}
+{"review_id": "GYvWRpUV7eXtBMYSZKvSCk", "question_id": 44, "answer1_id": "CHeyn9eR3u5eFq99UJ3pbB", "answer2_id": "4oSKRo3DtcNtE8gRbrSJPg", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a rough estimate of the number of balloons needed to lift a house and mentioned some factors to consider, but the explanation was not as detailed or step-by-step as requested. Assistant 2, on the other hand, provided a more detailed explanation, breaking down the reasoning into steps and using a formula to estimate the number of balloons needed. Assistant 2 also addressed the practicality of the scenario, making it a more helpful and precise response.", "score": [7.0, 9.0]}
+{"review_id": "TMyBk2SxGRZQvNv5JB7b3q", "question_id": 45, "answer1_id": "kfoNRLCHFucfWcxFBPoJgP", "answer2_id": "bhh9SFsYgquUPeC2qBKMJw", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information in their responses. Assistant 1 gave a clear answer of 15,220,700 texts per minute and explained the reasons behind the high number of text messages sent. However, Assistant 2 provided a more detailed step-by-step explanation of the reasoning behind the estimate of 16 million texts per minute, taking into account factors such as time of day, day of the week, and geographic location. Assistant 2 also mentioned the knowledge cutoff date and acknowledged the limitations of the estimate. Therefore, Assistant 2 receives a slightly higher score due to the additional details and transparency in the response.", "score": [8.0, 9.0]}
+{"review_id": "moSHMirAmW5pzL8YeGrKqV", "question_id": 46, "answer1_id": "A4sDEGXpWaNVA39uRsDNjB", "answer2_id": "oKMFEsBBp4SEJiVPRQZN2N", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more detailed and structured response, mentioning a study that found the average person speaks about 182,500 words per week and breaking it down to an estimated 7,000 words per day. The response also discussed factors that can affect the number of words spoken per day, such as occupation, culture, and personality. Assistant 2, on the other hand, provided a rough estimate based on languages, speech per person, and world population but did not provide a specific number. Both responses acknowledged the difficulty in estimating the exact number of words spoken daily on Earth, but Assistant 1's response was more helpful and relevant due to the inclusion of a specific estimate and a more in-depth discussion of factors affecting the number of words spoken.", "score": [8.0, 7.0]}
+{"review_id": "6WRM4CXE3dXvQjTrH8HwMJ", "question_id": 47, "answer1_id": "hUAc6BCs5NuY7tS62PPXbT", "answer2_id": "XXXVQo5w5XCFiEuK6n3hUV", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided an estimated number of snowflakes that fall each year, which is 1 septillion, and explained the factors that affect the number of snowflakes, such as snowfall, temperature, and wind. The response also described how snowflakes are formed and their shapes. However, the answer did not specifically address the number of snowflakes during a typical winter, and the given estimate may not be accurate.\n\nAssistant 2 acknowledged the difficulty in providing a specific number of snowflakes that fall during a typical winter and explained that the number can vary greatly depending on factors such as snowfall rate, temperature, humidity, and wind speed. The response provided an estimate based on averages and assumptions, which is more relevant to the question. However, the answer could have been more precise and detailed in explaining the calculations and assumptions. Overall, Assistant 2's response was more relevant and helpful in addressing the question.", "score": [7.0, 8.0]}
+{"review_id": "D79zcWVSkZdTX3EPcgDPvw", "question_id": 48, "answer1_id": "TMtjmb5tDizQ9RETRxpt2s", "answer2_id": "Ysbx8mFxYva6TbHQGKL8K7", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more detailed and comprehensive answer, taking into account factors such as the number of books indexed by Google Books, the average length of a book, and the potential underestimation due to self-published books and lost or destroyed books. The answer also provided a range of estimated pages, which is more realistic given the uncertainty of the data. Assistant 2's answer, while still relevant and somewhat accurate, provided a less detailed estimate and did not consider as many factors as Assistant 1. Additionally, Assistant 2's estimate of 1 billion pages seems significantly lower than Assistant 1's estimate, which ranged from 100 trillion to 1 quadrillion pages.", "score": [8.0, 6.0]}
+{"review_id": "WYnf2uSh2YRZivcwZQvXZF", "question_id": 49, "answer1_id": "AwQMPhhaJ32ByA3VjKF5Ph", "answer2_id": "KenbYuKVVUVXe2stVtxkeF", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided a clear explanation of the Earth's orbit and its importance for life on Earth. However, the initial statement about 4.543 billion orbits is incorrect, as it is based on the age of the Earth, not the beginning of life. Assistant 2 provided a more accurate estimate of 10 billion orbits, taking into account the age of life on Earth and the Earth-Sun orbit. Assistant 2 also provided more context on the factors that influenced the estimate, such as asteroid impacts and the stepping-stone model. Both assistants provided relevant information, but Assistant 2's response was more accurate and detailed in addressing the question.", "score": [7.0, 8.0]}
+{"review_id": "HdQbz7JqhrawunpUnLjtk3", "question_id": 50, "answer1_id": "UCqxXfMAnqJKbApsjrPFcq", "answer2_id": "ceWTK9jYWZq2Dd2H7KGkWY", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more precise and structured answer, mentioning specific numbers from the US Copyright Office, Spotify, and Apple Music, which helps to give a better understanding of the scale of recorded songs. Assistant 2, while providing a broader context and discussing the cultural impact of music, did not provide as many specific data points or estimates. Both answers acknowledged the difficulty in estimating the exact number of recorded songs, but Assistant 1's response was more helpful and detailed in terms of providing concrete numbers and sources.", "score": [8.0, 7.0]}
+{"review_id": "aiTPDe79hvCMoR3GjAHNRj", "question_id": 51, "answer1_id": "YMRg5Xi9BBvvqqny2tqJZ3", "answer2_id": "6Bbzs6YWyzPj52rZAfRPTt", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more relevant and detailed response, discussing the potential impact of the Internet on the Renaissance period in terms of intellectual and cultural growth, arts, and education. Assistant 2, on the other hand, focused more on speculating how the Internet could have been invented during that time, which was not the main focus of the question. While Assistant 2 did touch on some potential outcomes, the response was less focused on the actual impact of the Internet on the Renaissance period.", "score": [8.0, 6.0]}
+{"review_id": "oKVVytKEEXQSqXu9bWWnvL", "question_id": 52, "answer1_id": "fhr7iMPb9SVJ663mXKBuct", "answer2_id": "EG6h5jgpQ8wesKAX43nt9X", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate information regarding the Aztecs and the Spanish conquistadors. Assistant 1 focused more on the potential consequences of the Aztecs repelling the Spanish, while Assistant 2 provided a brief historical context before diving into possible scenarios. Assistant 2's response was slightly more detailed and organized, which made it easier to understand the potential outcomes. Both responses acknowledged the speculative nature of alternate history, but Assistant 2's response was more comprehensive, thus earning a higher score.", "score": [8.0, 9.0]}
+{"review_id": "Fm9wKrZszpFLBkRDEjW3X3", "question_id": 53, "answer1_id": "4rrmyZw9zhyJGqdrcazFvt", "answer2_id": "J76ZgE27N2Pe77NcEBrWUV", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 focused on the immediate consequences of the Black Death, such as the decline in population, labor shortage, and societal changes. Assistant 2, on the other hand, provided a more detailed and structured response, discussing potential consequences in five different areas: demographic, economic, political, social, and medical. This made Assistant 2's response more comprehensive and informative, which is why it received a higher score. However, both responses were well-written and informative, providing valuable insights into the potential effects of the Black Death not occurring in the 14th century.", "score": [8.0, 9.0]}
+{"review_id": "hQ6UkPaCdHigZVjVWKyocG", "question_id": 54, "answer1_id": "Za3mY9xwxpZdPmGW48wtzu", "answer2_id": "hoFFABFFF7J8juv3AnoVWZ", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the question. Assistant 1 discussed the possibilities of Newton making important discoveries in medicine, studying the human body, and investigating diseases. However, Assistant 2 provided a more detailed response, offering specific examples of areas in biology where Newton might have made contributions, such as plant biology, animal behavior, evolution, and medicine. Assistant 2's answer was more comprehensive and informative, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "Vvt86C6qFWUNY5iauMpM8M", "question_id": 55, "answer1_id": "cbAaJS9ULjR4XYSHATujSG", "answer2_id": "8R5n2uZ4oq52QkWQsSS6No", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused more on the cultural impact of the Beatles and how their absence might have affected the world in terms of hope, optimism, and popular culture. Assistant 2, on the other hand, provided a more detailed response, listing several possible scenarios of how the music industry and popular culture might have been affected without the Beatles. While both answers were accurate and relevant, Assistant 2's response was more detailed and provided a broader range of potential outcomes, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "n64QzF9iV2XaNuiSj7eoxP", "question_id": 56, "answer1_id": "ZEgb9fvopGo7HF5wPeoeHs", "answer2_id": "ECtu3QVXVrNhssSyktKzkq", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused more on the impact of Turing's work on the Allied victory and the potential consequences if he had not cracked the Enigma code. Assistant 2, while also discussing the potential consequences, acknowledged that codebreaking was just one aspect of the Allies' efforts during World War II and mentioned other factors that played critical roles in the outcome of the war. This additional context provided by Assistant 2 gives it a slightly higher score, as it offers a more comprehensive understanding of the situation.", "score": [8.0, 9.0]}
+{"review_id": "7K3PSDB4P4yQztgPLgTWbS", "question_id": 57, "answer1_id": "igMXoEiszFM65ZS2KUTvtm", "answer2_id": "jb8ot3ucdF3RvzApEYKft6", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused more on the impact of the Suez Canal on Egypt's economy and national pride, while Assistant 2 discussed the consequences for international trade and the geopolitical landscape. Assistant 2's answer was slightly more comprehensive, as it covered a broader range of consequences and provided more context about the canal's significance in trade and military operations. Therefore, Assistant 2 receives a higher score of 9, while Assistant 1 receives a score of 8.", "score": [8.0, 9.0]}
+{"review_id": "KgmpYAW82aAVp4PKj54LVL", "question_id": 58, "answer1_id": "Up4h8RpgVVafBtUj4tiGPZ", "answer2_id": "cAjR8aDWRKVKbxKeqRRgvW", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 gave a general overview of the possible outcomes if the Maya civilization had not collapsed, mentioning their advanced technology and culture, and the potential for a global impact. However, Assistant 2 provided a more detailed response, outlining five specific scenarios that could have occurred if the Maya civilization had not collapsed. This level of detail and organization made Assistant 2's response slightly more informative and useful, earning it a higher score.", "score": [8.0, 9.0]}
+{"review_id": "9pDYitT76qoK5Y6KiNCQuh", "question_id": 59, "answer1_id": "CMg3ypKyykGUzQsDmX7i35", "answer2_id": "7hKzFRQWWPVbr8H4wcbGdA", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. Assistant 1 focused on the potential for other explorers to discover the Americas and the impact of Columbus's discovery on the world. Assistant 2, on the other hand, focused on the consequences of Columbus's arrival on the indigenous peoples and how their societies might have developed without European influence. Assistant 2's response was slightly more detailed and provided a clearer picture of the potential alternative history of the Americas, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "WtBeK3CgoEh7siB9DiFNj5", "question_id": 60, "answer1_id": "WhLiJXznzRBkzxDdnc5ndX", "answer2_id": "3vMtYzSrV5ARpYA9fGUe4H", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 gave a good overview of how Van Gogh's life might have been different if he had been successful during his lifetime, mentioning financial stability, travel, and the potential impact on his art. However, Assistant 2 provided a more detailed response, listing four possible outcomes and elaborating on how each might have influenced Van Gogh's life and art. While both answers were accurate and informative, Assistant 2's response was more comprehensive and detailed, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "HeJt7Awd8AwFwtLSybWTsX", "question_id": 61, "answer1_id": "Vzmhr5RMndkR866p4pUef6", "answer2_id": "nvbTnFLEyPSSgPdf2n6Mqp", "reviewer_id": 2, "metadata": {}, "text": "4 6\n\nAssistant 1:\nStrengths:\n- Polite and offers alternative assistance.\n\nWeaknesses:\n- Did not provide any code related to the problem statement.\n- Did not attempt to solve the given problem.\n\nSuggestions for improvement:\n- Attempt to solve the problem by providing code that addresses the problem statement.\n\nAssistant 2:\nStrengths:\n- Provided a code structure that reads a file line by line.\n- Included necessary header files and used appropriate error checking.\n\nWeaknesses:\n- The code does not count the occurrences of a specific word in the file.\n- The code is counting the number of lines in the file instead of the occurrences of a specific word.\n- The code uses C-style file handling instead of C++ style.\n\nSuggestions for improvement:\n- Implement a function to count the occurrences of a specific word in each line.\n- Use C++ style file handling (e.g., ifstream) instead of C-style (e.g., FILE, fopen).\n- Add comments to explain the logic and functionality of the code.\n\nHere's an improved version of the code:\n\n```cpp\n#include \n#include \n#include \n#include \nusing namespace std;\n\nint main() {\n ifstream file(\"filename.txt\");\n string line, word, target = \"specific_word\";\n int count = 0;\n\n if (!file.is_open()) {\n cerr << \"Error opening file\" << endl;\n return 1;\n }\n\n while (getline(file, line)) {\n stringstream ss(line);\n while (ss >> word) {\n if (word == target) {\n count++;\n }\n }\n }\n\n file.close();\n cout << \"The word '\" << target << \"' appears \" << count << \" times in the file.\" << endl;\n return 0;\n}\n```\n\nThis version uses C++ style file handling and counts the occurrences of a specific word in the file.", "score": [4.0, 6.0]}
+{"review_id": "EFiAY67ZjvQqPSWNiNo3Hb", "question_id": 62, "answer1_id": "jPrfhv9oaZGrkXxDUw5Vbt", "answer2_id": "RS2roGuiWf5jZfncgfHPeG", "reviewer_id": 2, "metadata": {}, "text": "7 4\n\nAssistant 1:\nStrengths:\n- The code is accurate and implements the given problem statement correctly.\n- The time and space complexity are provided, which is helpful for understanding the efficiency of the algorithm.\n- The code is well-structured and easy to read.\n\nWeaknesses:\n- The code lacks comments explaining the logic and functionality of the code.\n- The variable names could be more descriptive.\n\nSuggestions for improvement:\n- Add comments to explain the logic and functionality of the code.\n- Use more descriptive variable names, such as `length1` and `length2` instead of `m` and `n`.\n\nAssistant 2:\nStrengths:\n- The code is well-structured and easy to read.\n\nWeaknesses:\n- The code is incorrect and does not implement the given problem statement correctly. The initialization of the `dp` matrix is incorrect, and the LCS calculation logic is also incorrect.\n- The code lacks comments explaining the logic and functionality of the code.\n- The variable names could be more descriptive.\n\nSuggestions for improvement:\n- Correct the initialization of the `dp` matrix and the LCS calculation logic to match the correct dynamic programming approach.\n- Add comments to explain the logic and functionality of the code.\n- Use more descriptive variable names, such as `length1` and `length2` instead of `m` and `n`.", "score": [7.0, 4.0]}
+{"review_id": "nQrdMCWLQcCHLMJ5iqVm9Y", "question_id": 63, "answer1_id": "KdZNrf6udTfCxGGcQxRbXk", "answer2_id": "Rz8sSwJzffBjUhjEYAyEWS", "reviewer_id": 2, "metadata": {}, "text": "7.5 6.5\n\nAssistant 1:\nStrengths:\n- The code is clear and easy to understand.\n- The function is reusable and can be easily integrated into other code.\n- The regex pattern is mostly correct and covers most of the email address format requirements.\n\nWeaknesses:\n- There is a small typo in the regex pattern, which includes a space character in the username and domain parts. It should be removed.\n- The top-level domain is limited to 2-3 characters, which may not cover all valid top-level domains.\n\nSuggestions for improvement:\n- Remove the space character from the regex pattern.\n- Consider extending the top-level domain character limit to cover more valid domains.\n\nAssistant 2:\nStrengths:\n- The code is easy to understand and includes user input.\n- The regex pattern is mostly correct and covers most of the email address format requirements.\n\nWeaknesses:\n- The variable name \"Email Addition\" is not a valid Python variable name and should be changed to a valid one, like \"email_pattern\".\n- The code does not use a function, which makes it less reusable and harder to integrate into other code.\n- The top-level domain is limited to only letters, which may not cover all valid top-level domains.\n- The code does not follow proper Python naming conventions for variables.\n\nSuggestions for improvement:\n- Change the variable name \"Email Addition\" to a valid Python variable name, like \"email_pattern\".\n- Encapsulate the code in a function for better reusability.\n- Consider extending the top-level domain character limit to cover more valid domains.\n- Follow proper Python naming conventions for variables.", "score": [7.5, 6.5]}
+{"review_id": "2xE9GfDfgjbrHydqTaP8XZ", "question_id": 64, "answer1_id": "AFzqAfbYVL25DBqFA2cv4G", "answer2_id": "SQCnPwtYgHH57ikbXgUpQq", "reviewer_id": 2, "metadata": {}, "text": "8 6\n\nAssistant 1:\nStrengths:\n- The code is clear and easy to understand.\n- The dynamic programming approach is implemented correctly using a table to store the Fibonacci numbers.\n- The comments are helpful and explain the code's logic and functionality.\n\nWeaknesses:\n- The code could be more efficient by using a memoization approach instead of a table.\n\nSuggestions for improvement:\n- Consider using a memoization approach to store the previously computed Fibonacci numbers, which can help reduce the space complexity of the solution.\n\nAssistant 2:\nStrengths:\n- The code attempts to use memoization to store the previously computed Fibonacci numbers.\n- The code is mostly clear and easy to understand.\n\nWeaknesses:\n- The code has a nested function with the same name as the outer function, which can be confusing.\n- The code does not properly implement memoization, as it does not store the computed Fibonacci numbers in the memo dictionary.\n- The comments are not as clear and concise as they could be, and they do not fully explain the code's logic and functionality.\n\nSuggestions for improvement:\n- Rename the nested function to avoid confusion.\n- Properly implement memoization by storing the computed Fibonacci numbers in the memo dictionary.\n- Improve the comments to better explain the code's logic and functionality.", "score": [8.0, 6.0]}
+{"review_id": "gSDDaoe9AF3NpK7NpT9WMr", "question_id": 65, "answer1_id": "Lt56JzWfXBxbvxrLf4fn3t", "answer2_id": "mfGSLxAAjjBmUFtCx3STEN", "reviewer_id": 2, "metadata": {}, "text": "8 9\n\nBoth Assistant 1 and Assistant 2 provided correct implementations of the binary search algorithm. However, there are some differences in their explanations and code structure.\n\nAssistant 1:\n- The explanation of the algorithm is clear and concise.\n- The code is accurate and efficient.\n- The function signature includes low and high parameters, which can be useful for recursive implementations but are not necessary for this iterative approach.\n- There are no comments in the code, which could make it harder for others to understand.\n\nAssistant 2:\n- The explanation of the algorithm is more detailed, providing a step-by-step implementation.\n- The code is accurate, efficient, and slightly more readable than Assistant 1's code.\n- The function signature is simpler, only requiring the array and target as parameters.\n- The example usage is helpful for understanding how to use the function.\n- There are no comments in the code, but the explanation provided compensates for this.\n\nSuggestions for improvement:\n- Both assistants should include comments in their code to explain the logic and functionality.\n- Assistant 1 could simplify the function signature to only require the array and target as parameters, similar to Assistant 2's implementation.\n- Assistant 2 could further improve the code readability by adding more whitespace and using more descriptive variable names.", "score": [8.0, 9.0]}
+{"review_id": "F9s6BCBkuk2psBezySuCmD", "question_id": 66, "answer1_id": "bmaShWUyEqPE6kbHD6FjLk", "answer2_id": "CcUgvJProVma9sNKW2nKHc", "reviewer_id": 2, "metadata": {}, "text": "8 4\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to read.\n- The implementation is efficient and correctly solves the problem.\n- The use of two stacks for enqueue and dequeue operations is a good approach.\n- The code includes additional methods like __len__, __iter__, and __contains__ which can be useful.\n\nWeaknesses:\n- The code lacks comments explaining the logic and functionality of the methods.\n\nSuggestions for improvement:\n- Add comments to explain the logic and functionality of each method.\n\nAssistant 2:\nStrengths:\n- The code is readable and easy to understand.\n\nWeaknesses:\n- The implementation is incorrect and does not solve the problem as intended.\n- The dequeue method does not return the dequeued element.\n- The peek method is not necessary for the problem statement.\n- The assumptions mentioned are not relevant to the problem statement.\n- The code lacks comments explaining the logic and functionality of the methods.\n\nSuggestions for improvement:\n- Correct the dequeue method to return the dequeued element and transfer elements from the in_stack to the out_stack when necessary.\n- Remove the peek method and unnecessary assumptions.\n- Add comments to explain the logic and functionality of each method.", "score": [8.0, 4.0]}
+{"review_id": "ndC8uGt7pHV2AxogEV9BRK", "question_id": 67, "answer1_id": "hiSVom5YqSrKZKor8CjHdU", "answer2_id": "a4NeYaavwpgqC46erF3VFP", "reviewer_id": 2, "metadata": {}, "text": "7 5\n\nAssistant 1:\nStrengths:\n- The code is written in Python, which is easy to read and understand.\n- The function has a clear docstring that explains its purpose, arguments, and return value.\n- The code is concise and straightforward.\n\nWeaknesses:\n- The problem statement specifically asked not to use any extra data structures, but the code uses a set to store the common elements.\n- The algorithm has a time complexity of O(n^2), which can be slow for large arrays.\n\nSuggestions for improvement:\n- To avoid using extra data structures, you can modify the code to print the common elements directly instead of storing them in a set.\n- You can optimize the algorithm by sorting both arrays first and then using a two-pointer approach to find the common elements, which would reduce the time complexity to O(n*log(n)).\n\nAssistant 2:\nStrengths:\n- The code is written in C++, which is a widely-used programming language.\n- The code attempts to solve the problem without using extra data structures.\n\nWeaknesses:\n- The code is not well-structured and lacks proper indentation, making it difficult to read and understand.\n- The code does not include any comments to explain its logic and functionality.\n- The code uses two additional arrays (common and second) to store the common elements, which goes against the problem statement's requirement of not using extra data structures.\n- The code uses the min function from the cmath library, which is unnecessary and can be replaced with a simple comparison.\n\nSuggestions for improvement:\n- Improve the code structure and indentation to make it more readable.\n- Add comments to explain the code's logic and functionality.\n- Instead of using additional arrays to store the common elements, print them directly as they are found.\n- Replace the min function with a simple comparison to determine the smaller array size.", "score": [7.0, 5.0]}
+{"review_id": "XGWbx4ScB8UifedHVFmPuM", "question_id": 68, "answer1_id": "JRHeURg4TrVAgWNp5yqjFx", "answer2_id": "bpcHYkjjXamp998ARdtfWK", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\nf(x) = 5x^3 - 2x + 3\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 4 + 3\nf(2) = 39\n\nNow, I will evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nf(2) = 5(2)^3 - 2(2) + 3 = 40 - 4 + 3 = 39\nThe answer and the problem-solving steps are correct.\n\nAssistant 2:\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(4) - 2 + 3 (This step is incorrect, as (2)^3 should be 8, not 4)\nf(2) = 20 - 2 + 3\nf(2) = 19\nThe answer is incorrect, and there is an error in the problem-solving steps.\n\nScores:\nAssistant 1: 10\nAssistant 2: 4\n\nAssistant 1 performed better, as they provided the correct answer and problem-solving steps, while Assistant 2 made an error in their calculations.", "score": [10, 4]}
+{"review_id": "ZuyaJ5ZhSqGypMuy8Em8u9", "question_id": 69, "answer1_id": "hZwFKeRYBLKihBEWSEGMYJ", "answer2_id": "SququGaG2Y8euqGVwV2Ueb", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\n3x + 10 = 5(x - 2)\n\nDistribute the 5 on the right-hand side of the equation:\n3x + 10 = 5x - 10\n\nSubtract 3x from both sides:\n10 = 2x - 10\n\nAdd 10 to both sides:\n20 = 2x\n\nDivide both sides by 2:\nx = 10\n\nNow, I will evaluate the answers provided by Assistant 1 and Assistant 2.\n\nAssistant 1:\nThe steps are mostly correct, but there is a mistake in the final step. The assistant divided both sides by -2 instead of 2, which led to an incorrect answer of x = 5.\n\nAssistant 2:\nThe steps are correct, and the assistant arrived at the correct answer of x = 10.\n\nScores:\nAssistant 1: 7 (due to the mistake in the final step)\nAssistant 2: 10 (correct steps and answer)\n\nAssistant 2 performed better in this case.", "score": [7, 10]}
+{"review_id": "GM6H7ZLAvVQBcELrRaDawz", "question_id": 70, "answer1_id": "iG4SJq9xCKCDDtiqU3szJZ", "answer2_id": "9Zb7Fenxop3RQFUrxWr5Hn", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\nThe endpoints of the line segment are (2, -2) and (10, 4). To find the length of the line segment, we can use the distance formula:\n\n$$d = \\sqrt{(x_2 - x_1)^2 + (y_2 - y_1)^2}$$\n\nwhere $(x_1, y_1) = (2, -2)$ and $(x_2, y_2) = (10, 4)$. Substituting these values into the distance formula, we get:\n\n$$d = \\sqrt{(10 - 2)^2 + (4 - (-2))^2} = \\sqrt{8^2 + 6^2} = \\sqrt{100} = 10$$\n\nTherefore, the length of the line segment is 10 units.\n\nNow, I will evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe problem-solving process is correct, and the final answer is also correct. The assistant used the distance formula and substituted the given coordinates to find the length of the line segment.\n\nAssistant 2:\nThe problem-solving process is incorrect. The assistant tried to use the Pythagorean theorem but made a mistake in calculating the lengths of the legs of the right triangle. The correct lengths should be 8 and 6, not 2 and 4. This led to an incorrect final answer.\n\nScores:\nAssistant 1: 10\nAssistant 2: 3\n\nAssistant 1 performed better in this case, as they used the correct method and arrived at the correct answer. Assistant 2 made a mistake in calculating the lengths of the legs of the right triangle, leading to an incorrect final answer.", "score": [10, 3]}
+{"review_id": "SArRaNeTcYzqD9W6jkfRMU", "question_id": 71, "answer1_id": "CjhV8cJ6UEp7Y4f3eWB6E5", "answer2_id": "HkYsYvcMNHvt5suAZJpFWw", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's request. They each offered a well-written and detailed formal email template that the user can use to propose a joint venture to a potential business partner. Both templates included key elements such as the benefits of the joint venture, the strengths of the companies, and a call to action for further discussion. The quality of both responses is very similar, and both assistants deserve a high score of 9.", "score": [9.0, 9.0]}
+{"review_id": "Xf5sgDVZSRoJCEQrk6MCK3", "question_id": 72, "answer1_id": "5waLH9pZsr8jVTpy67QH58", "answer2_id": "F45reLGER4jrAjZZi5r58x", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate resignation letter templates that express gratitude and leave on good terms. Both letters are detailed and well-structured, making it easy for the user to personalize and use them. The quality of the answers is very similar, and both assistants deserve a high score for their performance.", "score": [9.0, 9.0]}
+{"review_id": "h78pPDfVNGWaYtiqLx2upD", "question_id": 73, "answer1_id": "ZV4sUNujQGS2w7Wg4jbXbf", "answer2_id": "ZLx4JuCtwNcv55nPbyXQvB", "reviewer_id": 1, "metadata": {}, "text": "9 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. Assistant 1's response was concise and included all the necessary elements of a recommendation letter. However, Assistant 2's response was more detailed, providing a more comprehensive structure for a formal letter, including the sender's and recipient's addresses, date, and a more personalized approach to describing the student's achievements and skills. This additional detail and formatting make Assistant 2's response slightly better, earning it a higher score.", "score": [9.0, 10.0]}
+{"review_id": "fmxKrvuZ9F5SpyQZX5To9E", "question_id": 74, "answer1_id": "X94xm9m7tayWsKFNACqAG4", "answer2_id": "XPgHiaRwa5ykfKMZG7GQ6b", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth assistants provided relevant and helpful responses. Assistant 1's response was concise and to the point, but it lacked some details and personalization that would make the email more compelling. Assistant 2's response was more detailed, providing a list of key features and benefits, as well as mentioning special introductory pricing. This made Assistant 2's response more engaging and informative, which is why it receives a higher score.", "score": [8.0, 10.0]}
+{"review_id": "RRUsq4YJdrKM7fJEAhu7Kb", "question_id": 75, "answer1_id": "EFL7fU6gnicQY6DRpHvbTK", "answer2_id": "MEWrWkY8Y5CSBTCNkfo2nx", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. They both drafted apology emails addressing the delay in the customer's order and reassured that the issue has been resolved. Assistant 1's response was slightly more precise and provided a specific date for when the customer was expecting their order, which made the response feel more personalized. Assistant 2's response was also well-written and addressed the issue, but it lacked the specific date detail. Overall, both responses were of high quality, but Assistant 1's response was slightly more detailed and precise.", "score": [9.0, 8.5]}
+{"review_id": "CbEq8KYuePtWhVxuRESMMx", "question_id": 76, "answer1_id": "XJLFjCr6vgQZjEXbaMjTa6", "answer2_id": "f8Jmezvy4SwqgSVeWQqTzk", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth assistants provided relevant and accurate information about the history and cultural significance of jazz. Assistant 1's response was more focused on the historical aspects and the roots of jazz, while Assistant 2's response was formatted as a YouTube script and included more details about the impact of jazz on popular culture and its role as a bridge between cultures. Assistant 2's response was more engaging and better suited for a YouTube video, which is why it received a higher score. However, both responses were helpful and informative.", "score": [8.0, 9.0]}
+{"review_id": "hjqQSpdX4PtcEdHERLZ4wW", "question_id": 77, "answer1_id": "bz3rS9MRs9QmaLGg4A8a7B", "answer2_id": "bZVCu7vDL4GGm5P8zN3xou", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a helpful and accurate response, highlighting cultural experiences and must-see attractions in Hawaii. However, the response lacked the engaging and personal touch that is typically found in a travel blog post. Assistant 2, on the other hand, crafted an engaging and immersive travel blog post that not only included cultural experiences and must-see attractions but also shared personal experiences and emotions. The vivid descriptions and storytelling in Assistant 2's response made it more enjoyable to read and better suited for a travel blog post.", "score": [8.0, 10.0]}
+{"review_id": "5KzUfjH75yTCWbXYhCFGQG", "question_id": 78, "answer1_id": "kCS2ZqzKTdDpaXwu3TiFXP", "answer2_id": "gYkH3Pit5J9gnJyXJtoHUG", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided captivating movie reviews for recently released science fiction films, discussing the plot, characters, and special effects in detail. Assistant 1 focused on the movie \"Dune,\" while Assistant 2 reviewed a film called \"Beyond the Stars.\" Both assistants provided well-structured reviews, highlighting the strengths of the films and giving specific examples of the special effects. The character descriptions were also well-developed, and both assistants mentioned the performances of the actors. Overall, both responses were helpful, relevant, accurate, and detailed, deserving a score of 9 each.", "score": [9.0, 9.0]}
+{"review_id": "nE34Y69pukUf9UBqf8WMDt", "question_id": 79, "answer1_id": "7WWWS5UhWcVzkhTypqcFU4", "answer2_id": "dX4Pt6xQpstrrnZgRLSVnN", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided relevant and detailed podcast scripts discussing the influence of streaming platforms on the music industry. Assistant 1's script was slightly more structured, with a clear introduction, body, and conclusion, as well as addressing both the pros and cons of streaming platforms. Assistant 2's script was also well-structured, but it was divided into segments, which made it feel a bit less cohesive. Both scripts were accurate and covered important aspects of the topic, but Assistant 1's script had a slightly better flow and organization, which is why it received a higher score.", "score": [9.0, 8.5]}
+{"review_id": "kadxGHJJu3J9xqPtbUTUgR", "question_id": 80, "answer1_id": "QcSFqbgqLWqn96V6BvWT7B", "answer2_id": "jseJYZmd8fBnzbx2EN9oq9", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided detailed and engaging symphony concert reviews, discussing the orchestra's performance and overall audience experience. Assistant 1's response was slightly more helpful and relevant, as it mentioned specific symphonies and the audience's reaction to the performance. Assistant 2 also provided a well-written review, but it focused more on the technical aspects of the performance and the conductor's insights. Both responses were accurate and detailed, but Assistant 1's response was more in line with the user's request, hence the slightly higher score.", "score": [9.0, 8.5]}
diff --git a/model/fastchat/eval/table/review/vicuna-13b_20230322-new-hp-fp16/review_gpt35_vicuna-13b-20230322-new-hp-fp16.jsonl b/model/fastchat/eval/table/review/vicuna-13b_20230322-new-hp-fp16/review_gpt35_vicuna-13b-20230322-new-hp-fp16.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..82174db4253a99eb3b9c93865dcb42084807d404
--- /dev/null
+++ b/model/fastchat/eval/table/review/vicuna-13b_20230322-new-hp-fp16/review_gpt35_vicuna-13b-20230322-new-hp-fp16.jsonl
@@ -0,0 +1,80 @@
+{"review_id": "4sGnoyKdy9Ld7oLLS74zSK", "question_id": 1, "answer1_id": "BZGowHM7L3RvtWRktKZjLT", "answer2_id": "FNAYdXQwcrNkTD4nCqdFGb", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about improving time management skills. Assistant 1's response was slightly more detailed, with a clear list of tips and a brief explanation for each tip. Assistant 2's response was also helpful and provided a similar list of tips, but with slightly less detail and explanation. Both assistants covered important aspects of time management, such as setting goals, prioritizing tasks, minimizing distractions, and taking breaks. Overall, both responses were of high quality, but Assistant 1's answer was slightly more comprehensive and detailed.", "score": [9.0, 8.5]}
+{"review_id": "EbXWcvD4DdR7V4cPsQGBbR", "question_id": 2, "answer1_id": "GsrPLfiNEcRDYAC5aUoY23", "answer2_id": "7SzYV3TjzyrVkSKKGaXN4c", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both listed effective ways to deal with stress, including exercise, relaxation techniques, sleep, social support, healthy diet, and time management. Both assistants also mentioned seeking professional help if stress becomes overwhelming. The level of detail in both answers is sufficient to provide a clear understanding of the suggested methods for dealing with stress. The slight differences in the answers do not significantly impact the overall quality, and both responses are equally valuable.", "score": [9.0, 9.0]}
+{"review_id": "dudKwV2CUDsdZoSkN7dApr", "question_id": 3, "answer1_id": "5SGfexmPoJTWoVbPMB87KB", "answer2_id": "cA7LoXTBCgegnTcKWpjxQq", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more accurate and detailed response, correctly identifying that JavaScript is a multi-paradigm language and explaining the differences in syntax, typing, and application areas. Assistant 2 made some incorrect statements, such as claiming that Python is a procedural language and JavaScript is a compiled language. Assistant 1's response was more helpful and relevant, while Assistant 2's response contained some inaccuracies and less detail.", "score": [8.0, 6.0]}
+{"review_id": "Xd5RgwK635uZ4QgdWGThB9", "question_id": 4, "answer1_id": "RcqQg364kgCkYPshqvmGSp", "answer2_id": "8Yo5UVMPzoxjCsi3fuHNmH", "reviewer_id": 1, "metadata": {}, "text": "9 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate tips for increasing productivity while working from home. Assistant 1 provided a concise list of 8 tips, while Assistant 2 provided a more detailed list of 10 tips. Assistant 2's response included additional tips on exercise, diet, and mental health, which contributed to a more comprehensive answer. Therefore, Assistant 1 receives a score of 9, and Assistant 2 receives a score of 10.", "score": [9.0, 10.0]}
+{"review_id": "HtraHoEHidJvZk9perhNRN", "question_id": 5, "answer1_id": "3R9jAndzLHQqJwmMi5mNox", "answer2_id": "cCc7EgnQNgcVAVqk96Pyto", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed explanations of the basics of quantum computing. They both covered the essential concepts, such as qubits, superposition, entanglement, and the potential applications of quantum computing. The explanations were clear and easy to understand, making both responses equally valuable for someone looking to learn about quantum computing.", "score": [9.0, 9.0]}
+{"review_id": "BxJK5LhYzKR8SFnKgG59Hr", "question_id": 6, "answer1_id": "Uw8SaYLPMGR5sdV9FDx8pb", "answer2_id": "TUL5M2TWXQmM56U4Ckk4s4", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information regarding the differences between plant-based and animal-based protein sources. Assistant 1 provided a good overview of the nutritional differences and environmental impact, while Assistant 2 went into more detail by discussing sustainability, health benefits, ethical considerations, taste, and allergies. Assistant 2's response was slightly more comprehensive, which is why it received a higher score. However, both responses were informative and useful.", "score": [8.0, 9.0]}
+{"review_id": "6oevuGS97XVCMyV5Rr7KWS", "question_id": 7, "answer1_id": "53gmokt2KBgKu6NMPopxcu", "answer2_id": "iZUdysQf69MHQE9Lq76mii", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about developing critical thinking skills. They both offered practical tips and emphasized the importance of practice and self-reflection. The level of detail in both responses was sufficient, with Assistant 1 providing a numbered list of tips, while Assistant 2 used a more narrative approach. Both answers covered similar points, such as asking questions, analyzing information, considering different perspectives, and engaging with diverse sources. Overall, both assistants performed exceptionally well, and it is difficult to differentiate their performance.", "score": [9.0, 9.0]}
+{"review_id": "JupV8w7xVgxZgu62W28RuK", "question_id": 8, "answer1_id": "bKrbrGsN7zjKLvBk2h3tvo", "answer2_id": "D8EWgfq9yytMgQYybq3Rhq", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant, accurate, and detailed answers to the question about the major challenges faced by the education sector today. Assistant 1 listed seven challenges, while Assistant 2 listed ten challenges. Assistant 2's answer is slightly more comprehensive, covering additional challenges such as safety, employability, and globalization. Both answers are well-structured and easy to understand, but Assistant 2's response is more detailed and covers a broader range of issues, which is why it receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "7vHrpBgdW7jcPwmj8VkPZp", "question_id": 9, "answer1_id": "HEGL3aPUnNrdNtNt3XLDKi", "answer2_id": "5bzosDZ96t2p5qxeS7VAeA", "reviewer_id": 1, "metadata": {}, "text": "9 6\nAssistant 1 provided a more comprehensive and well-organized answer, covering personal, psychological, social, situational, and marketing mix factors that influence consumer behavior. The answer was relevant, accurate, and detailed. Assistant 2's response was less detailed and missed situational and marketing mix factors, which are important aspects of consumer behavior. Additionally, Assistant 2's response was incomplete, ending abruptly after mentioning economic factors.", "score": [9.0, 6.0]}
+{"review_id": "3JGrVkmbotwqLiByLKVMkn", "question_id": 10, "answer1_id": "W9zpMVa2cJUJW8B2uGMCJy", "answer2_id": "itBobRwhSZUnsWkUs5bVwX", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about effective strategies for conflict resolution in the workplace. They both covered similar key points, such as active listening, open communication, focusing on interests, brainstorming solutions, and seeking outside help if necessary. The level of detail in both responses is sufficient to provide a clear understanding of the strategies. The only difference is that Assistant 2 provided a few additional strategies, such as conflict resolution training, establishing ground rules, and documenting everything. However, both answers are equally valuable and informative, so they both receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "6FBRu9bDvLzb3pRxpLuQ8w", "question_id": 11, "answer1_id": "LacdmoweqKYGzt3aMBYjEa", "answer2_id": "iVTKdmUeTySryqEHhtgmkS", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information in their responses. Assistant 1's response was slightly more organized and concise, making it easier to understand the key points. Assistant 2's response also covered the main points but included some repetition and less clear organization. Both assistants provided a good level of detail, but Assistant 1's response was more focused and better structured, which is why it receives a slightly higher score.", "score": [9.0, 8.5]}
+{"review_id": "gEPnH2qcHSEBjtRoWgSGwo", "question_id": 12, "answer1_id": "JqVreebbPuNdjw8E8K4Ssf", "answer2_id": "7UgtosTTGx7bnRcQGxdDJo", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information in response to the question. Assistant 1's response was more structured and organized, making it easier to understand and follow. Assistant 2's response covered similar points but was slightly less organized. Both assistants provided a good level of detail, but Assistant 1's response was more comprehensive, including employee training and awareness, which was not mentioned by Assistant 2. Overall, both responses were informative and useful, but Assistant 1's response was slightly better in terms of organization and comprehensiveness.", "score": [9.0, 8.5]}
+{"review_id": "ghnY2z3MtPPkXePtuVBqYu", "question_id": 13, "answer1_id": "hEMThhsN85Ud5X8xBv9BZJ", "answer2_id": "Yt5tqBrrKfcumuuuxf2qqM", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was slightly more detailed and organized, which made it easier to understand the different aspects of fiscal and monetary policies. Assistant 2's response was also informative, but it was not as well-structured and contained some repetition. Overall, both assistants provided valuable information on how governments can use fiscal and monetary policies to combat economic recessions, but Assistant 1's response was more comprehensive and well-organized.", "score": [9.0, 8.0]}
+{"review_id": "2SHSqPQ2nmwVS4r6pUs5EJ", "question_id": 14, "answer1_id": "BvFV7sx53PAK5bNn89urFs", "answer2_id": "4pZ4Uy544Bc3K59fhbW7xj", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was slightly more detailed, discussing the impact of language barriers on trust and the importance of language classes, cultural exchange programs, and sensitivity training. Assistant 2 also provided a good response, discussing the impact of language and cultural barriers on stereotypes and prejudices, and suggesting ways to overcome these barriers. However, Assistant 1's response was more comprehensive and provided a slightly clearer structure, which is why it received a higher score.", "score": [8.0, 7.5]}
+{"review_id": "348f9siRVxp9CKHyEU8GNr", "question_id": 15, "answer1_id": "dM5GHbLuPNfzUbBnJz6w7K", "answer2_id": "762peC97upw58WFQeLNoXZ", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 focused on a specific scenario involving AI-powered chatbots for patient triage and automating routine tasks, while Assistant 2 discussed AI applications in diagnosis, treatment planning, predictive analytics, and personalized patient care. Assistant 1's response was slightly more detailed and structured, which made it easier to understand the potential benefits of AI in healthcare delivery. Assistant 2's response was also informative and covered a broader range of AI applications, but it could have benefited from a more structured presentation. Overall, both responses were of high quality, but Assistant 1's response was slightly better in terms of detail and organization.", "score": [9.0, 8.5]}
+{"review_id": "RydtYUoJU288nHJAoQfDnn", "question_id": 16, "answer1_id": "BX7maaP5kGY6bBTLJRwkit", "answer2_id": "Yqfg2saKSNPauCS8YdsjdD", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the CRISPR-Cas9 gene editing technology, its potential applications, and ethical implications. Assistant 1 offered a slightly more detailed response, discussing the potential for eugenics, unintended consequences, and access and equity concerns. Assistant 2 provided a clear step-by-step explanation of the CRISPR-Cas9 process, which was helpful for understanding the technology. Both assistants addressed the ethical concerns, but Assistant 1's response was more comprehensive, which is why it receives a slightly higher score.", "score": [9.0, 8.0]}
+{"review_id": "GuUCS8BsqAfpEusRJw3SBi", "question_id": 17, "answer1_id": "STuX8oc7Gu3SN6EWzwpUpp", "answer2_id": "gKd2BPWp7HG9PTDdgS7HT8", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed responses to the question. They both explained how vaccinations work by introducing a harmless piece of a virus or bacteria to the body, which triggers the immune system to produce antibodies that provide protection against future infections. They also both explained the concept of herd immunity and its importance in protecting individuals who cannot be vaccinated and the community as a whole. The responses are equally informative and well-structured, making it difficult to differentiate between the two in terms of quality. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "2TeQavZEWDrskczRcpyKCw", "question_id": 18, "answer1_id": "TFUUXWS7yn2u2b4n7eM3ZB", "answer2_id": "CyZSkdkGWmjh8ZgxrQ2f66", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused more on the democratization of information and the responsibility of social media platforms to control the spread of misinformation. Assistant 2, on the other hand, provided a more balanced view of the positive and negative aspects of social media platforms, mentioning the ease of access to news and the impact on democratic processes. Assistant 2 also discussed the measures taken by social media platforms to address misinformation. While both answers were informative, Assistant 2's response was slightly more comprehensive and detailed, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "d6yeCBsh8woxXmDqYNpMuU", "question_id": 19, "answer1_id": "3yRq2XXPi83H7Rr5SZS9rE", "answer2_id": "LwA42YWwqvdkStLZEUbmm9", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 provided a clear explanation of cultural, social, and economic factors and their influence on food choices, as well as suggestions for promoting healthier diets. Assistant 2 also covered these factors and provided examples of how to leverage them for promoting healthier diets. However, Assistant 2's response was slightly more comprehensive, as it included additional examples of initiatives and a more holistic approach to addressing the issue.", "score": [8.0, 9.0]}
+{"review_id": "e3nbJAjRWiaK3PecFw6BZ8", "question_id": 20, "answer1_id": "Sw34dAwQPCfGGotwRwhvtv", "answer2_id": "ajfFPpHDdMiSp3PVfsUFbE", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed explanations of the process of natural selection and its contribution to the evolution and adaptation of species. Both assistants covered the key aspects of natural selection, such as genetic variation, survival, reproduction, and the emergence of new species. The explanations were clear and easy to understand, making both responses equally valuable in answering the user's question.", "score": [9.0, 9.0]}
+{"review_id": "RJR9JREyiQpmKnPrTvo2WD", "question_id": 21, "answer1_id": "cZw4Jw8Zyz6ZUy4WDsC6ta", "answer2_id": "3n8npKQKxgHEbEmf2K6AwF", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and helpful responses to the question. Assistant 1's response was concise and covered the main aspects of introducing oneself as a medieval knight at a royal banquet. Assistant 2's response was more detailed, providing additional context about the knight's background and emphasizing the ideals of honor, bravery, and loyalty. Both responses were accurate and appropriate for the given scenario, but Assistant 2's response was slightly more engaging and informative, which is why it receives a higher score.", "score": [8.0, 9.0]}
+{"review_id": "NSV5iA2bQbTKw2aaGgXbnV", "question_id": 22, "answer1_id": "nj9y2HTWFGsD5B278ozm73", "answer2_id": "LfeqCy9vR3kCaJiQV4Fyqf", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. Assistant 1's response was more concise and focused on the excitement of the adventure, the teamwork required, and the ultimate goal of finding treasure. Assistant 2's response was also engaging, but it included a more detailed description of the potential rewards, the challenges faced, and the consequences of cowardice. While both responses were strong, Assistant 1's response was slightly more motivating and to the point, earning it a higher score.", "score": [9.0, 8.5]}
+{"review_id": "UsBZmEKiZofPZaRhCm6vdD", "question_id": 23, "answer1_id": "Ep9rLcNzEGhG7AgbLTpRtm", "answer2_id": "JrnFfmnsuykbTkFbUnei6k", "reviewer_id": 1, "metadata": {}, "text": "9 10\nBoth Assistant 1 and Assistant 2 provided relevant and creative soliloquies that capture the essence of a Shakespearean character declaring their love. Assistant 1's soliloquy is well-written and captures the passion and devotion of the character. Assistant 2's soliloquy is also well-crafted, with vivid imagery and a strong emotional appeal. However, Assistant 2's response is slightly more detailed and varied in its expression of love, which is why it receives a slightly higher score. Both responses are accurate in terms of language and style, and both provide a high level of detail in their respective soliloquies.", "score": [9.0, 10.0]}
+{"review_id": "Mg58xEBfbfebkpYrD7dsY9", "question_id": 24, "answer1_id": "oNULT72cYUvit7D9SHb5aM", "answer2_id": "BDBSC5P2DE3E5LuB9FX7tn", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both explained their origin stories in a way that a curious child could understand. Assistant 1's response was more concise, while Assistant 2's response was more detailed and engaging. Assistant 2's answer also emphasized the personal growth and realization of a bigger purpose, which added depth to the story. Therefore, Assistant 2 receives a slightly higher score due to the additional details and emotional connection.", "score": [8.0, 9.0]}
+{"review_id": "MihMn3BDkXm6LKMBawY3eK", "question_id": 25, "answer1_id": "TX86xjPKTk2UxWwV4e8zRK", "answer2_id": "TS97KLnyki7XWYFdSdbZiY", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the technological advancements in the year 3000. Assistant 1's response was slightly more detailed, with a focus on specific advancements such as quantum computing, renewable energy, and medical advancements. Assistant 2's response covered a broader range of topics, including virtual reality, food and water production, and climate change. However, Assistant 1's response seemed more precise and organized, which made it easier to understand and follow. Both assistants did a good job, but Assistant 1's response was slightly better in terms of detail and organization.", "score": [9.0, 8.5]}
+{"review_id": "7JHLvqcNq48ABNq9Srk9xC", "question_id": 26, "answer1_id": "e5YFb6PojDThkcZdSH8kpC", "answer2_id": "9ATk4tAxBoe267rQUDrgYD", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth assistants provided a vivid description of the winning play in the final seconds of a championship game. Assistant 1 focused on a basketball scenario, while Assistant 2 described a football scenario. Assistant 1's response was detailed and captured the excitement and tension of the moment, but Assistant 2's response was more engaging and energetic, making the reader feel like they were actually listening to a sports commentator. Both responses were relevant and accurate, but Assistant 2's response was slightly more immersive, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "SHgszDxaUbvnqeA69kmBNm", "question_id": 27, "answer1_id": "NnkbyoNFRHay4toKTFfaeE", "answer2_id": "C4zL3pUsGj2Z8U7Tm2nzDN", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided excellent responses to the user's question. They both described their signature dishes in great detail, highlighting the ingredients, cooking techniques, and flavors. Both responses were engaging and painted a vivid picture of the dishes, making it easy for the reader to imagine the taste and presentation. The responses were relevant, accurate, and detailed, showcasing the creativity and passion of a world-famous chef. It is difficult to choose one over the other, as both assistants performed exceptionally well in this task.", "score": [9.0, 9.0]}
+{"review_id": "4WXo4VNiNpUF7nyMMdTYug", "question_id": 28, "answer1_id": "Gpb8MuNU3Pt7k93dpRN9WM", "answer2_id": "CTi8ZRuHoAzRQjMRAU3mBN", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and detailed information about the emotions and the view from the summit of Mount Everest. Assistant 1 gave a good overview of the challenges and the breathtaking view from the summit, but Assistant 2 provided a more vivid and engaging description of the emotions and the view, making it easier for the reader to imagine the experience. Therefore, Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "X6UxC87ZAxy6GLm3juT6gm", "question_id": 29, "answer1_id": "SYvkCCHBUZPd9DQuidZM8K", "answer2_id": "8chhW8gPzM6zHUN6VAqpym", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1's response was slightly more detailed, with a clear enumeration of the challenges faced by a space colonist on Mars. Assistant 1 also touched upon the mental health aspect, which is an important consideration in such an isolated environment. Assistant 2's response was also informative, but it lacked the same level of detail and organization as Assistant 1's response. Both assistants mentioned the importance of teamwork and the challenges of the Martian environment, but Assistant 1's response was more comprehensive and well-structured, which is why it received a slightly higher score.", "score": [8.0, 7.5]}
+{"review_id": "joCNHDFFdt8M2bgbWxiQ5j", "question_id": 30, "answer1_id": "NjdsG8tYfrHMT5zGZPavk6", "answer2_id": "Pxj7ZVsvQ9HMQnRVMh7os4", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and engaging responses to the user's question. They both created characters and described their survival strategies and allies in a post-apocalyptic world. Assistant 2's response was slightly more detailed, providing names for the character and allies, as well as mentioning the existence of a settlement and a makeshift society. This added depth to the story and made it more immersive. Assistant 1's response was also well-written and engaging, but it lacked some of the details that made Assistant 2's response stand out.", "score": [8.0, 9.0]}
+{"review_id": "YjVozfaJvqxptWvgeeWJAX", "question_id": 31, "answer1_id": "8eovAhyvrKJEMWiVdYzByH", "answer2_id": "NCUhSMqzESRJJVDKHQ7XPB", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth assistants provided helpful and relevant information on how to determine if a restaurant is popular among locals or mainly attracts tourists. Assistant 1 gave a clear and concise answer, focusing on online reviews, observing clientele, and talking to staff. Assistant 2 provided additional indicators such as parking, reservation policy, menu prices, and local crowd demographics. Both assistants explained why this information might be useful, but Assistant 2 provided more detailed reasons and examples. Assistant 1's response was accurate and helpful, but Assistant 2's response was more comprehensive and detailed, which is why Assistant 2 received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "iKMVjFFnuvBvmzLLdwJm8h", "question_id": 32, "answer1_id": "nvyaGEveLWBaxgXzriB93d", "answer2_id": "mEvCEzVGnENJAMzs2ioTmp", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both listed subtle clues that suggest someone is pretending to understand a topic or conversation. Assistant 1 provided 7 clues, while Assistant 2 provided 10 clues. Both answers were detailed and covered different aspects of the question. The only minor difference is that Assistant 2 mentioned that not everyone who exhibits these behaviors is necessarily pretending to understand, which adds a bit of context to the answer. However, both responses are of high quality and deserve a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "ZTVefDxUgZctUAd2RXf9fE", "question_id": 33, "answer1_id": "3xU2t6Yvx9EWpqfqvinNfH", "answer2_id": "CoFnjLgW5Eens9JhnpiJHZ", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both listed reasons why someone might choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app. Both assistants covered similar points, such as reliability, battery life, and personal preference. Assistant 1 mentioned the tactile experience and building a better understanding of the area, while Assistant 2 discussed privacy concerns. The level of detail in both responses was sufficient to address the question, and both assistants provided clear and concise explanations. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "KDAwF4A9C5zKJVmev8rCPX", "question_id": 34, "answer1_id": "Mq6hzNziUxzQ2juPMDrv3h", "answer2_id": "aQ9mM2wfmHSge2KvuQ8hmx", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was slightly more concise and organized, making it easier to follow and understand. Assistant 2's response was also informative, but it included an additional point about asking the person directly, which may not always be the best approach in every situation. Both assistants covered the importance of nonverbal cues, active listening, and participation in the conversation. Overall, both responses were detailed and useful, but Assistant 1's response was slightly more effective in its presentation.", "score": [9.0, 8.5]}
+{"review_id": "exVMrpvt2WbVCgqgdvfKkZ", "question_id": 35, "answer1_id": "KU6BNNN8d6MLHyrA8nV4DB", "answer2_id": "eM5S86H3bXTGLQcVW59XsD", "reviewer_id": 1, "metadata": {}, "text": "9 9.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed responses to the question. Assistant 1 listed five reasons, while Assistant 2 listed six reasons, with both assistants covering personalized service, supporting the local economy, unique products, and environmental impact. Assistant 2's response included an additional point about customer loyalty and preservation of heritage, which slightly sets it apart from Assistant 1. However, both responses were of high quality and provided valuable information to the user.", "score": [9.0, 9.5]}
+{"review_id": "S6HY7jsFssnxpuPtU4opTc", "question_id": 36, "answer1_id": "RpHbPLJamuknRRa3xU5bUF", "answer2_id": "MpBrYa9J2zQy9NGi2dvKp8", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information on assessing the credibility of a source of information. Assistant 1's response was slightly more detailed and organized, covering a wider range of factors to consider, such as evaluating the tone of the article and verifying the information using reputable sources. Assistant 2's response was also informative, but it had some overlap with Assistant 1's points and did not provide as much depth in certain areas, such as evaluating the tone and verifying the information. Overall, both responses were useful, but Assistant 1's answer was more comprehensive and well-structured.", "score": [9.0, 8.5]}
+{"review_id": "6NfD37suF7Bv3EaxoAg4fr", "question_id": 37, "answer1_id": "AFR3AJW4sSPLDLiAUvrL8s", "answer2_id": "i8QZzVJo2WkTLc7WMv4bNm", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was slightly more comprehensive, discussing the role of personality traits, past experiences, and cultural backgrounds in shaping individual preferences for fear-inducing experiences. Assistant 2's response was also informative, but it did not mention the role of cultural backgrounds. Both assistants mentioned the adrenaline rush and the unique nature of each person's response to fear. Overall, Assistant 1's answer was slightly more detailed and complete, which is why it received a higher score.", "score": [9.0, 8.5]}
+{"review_id": "PLuFKUMGaK2RLpLz2k4ute", "question_id": 38, "answer1_id": "esqiBYHa56ygcPU2ux2Pdx", "answer2_id": "HP2CYZ3HJWMcGp6QF9qbq6", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed responses to the question. They both covered key aspects of observing social behavior to understand cultural norms and expectations, such as dress codes, communication styles, social hierarchy, and customs. Assistant 1 provided a more structured response with numbered points, while Assistant 2 elaborated on the role of culture in shaping social behavior. Both responses complement each other and offer valuable insights, making it difficult to differentiate their overall performance. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "aKqyfTWErWpvn7QcbmZQbG", "question_id": 39, "answer1_id": "NmuuKUipqt62QKuEHCuBWh", "answer2_id": "XrMYXEWHff6jXjjJWquXfW", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided relevant and thoughtful answers to the question, acknowledging the complexity and subjectivity of the issue. Assistant 1's response was slightly more detailed and organized, discussing the potential benefits of space exploration and the importance of addressing Earth's problems. Assistant 1 also emphasized the possibility of balancing both priorities. Assistant 2's response was similar but slightly less detailed, and while it also mentioned the importance of both goals, it did not explicitly mention the possibility of balancing them. Overall, both assistants provided helpful and accurate answers, but Assistant 1's response was slightly more comprehensive.", "score": [8.0, 7.5]}
+{"review_id": "7LNXSnq6KevpWP2cAnRZb9", "question_id": 40, "answer1_id": "3HypDqXt6tHieMDN7hWYCh", "answer2_id": "eeS6qNLeX6AScyGrS5pwrc", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both emphasized the importance of striking a balance between job creation and technological progress. Assistant 1 provided a concise response, mentioning the benefits of technological progress and the importance of job creation. They also suggested some methods for promoting job creation. Assistant 2, on the other hand, provided a more detailed response, discussing the importance of both job creation and technological progress, and elaborating on how to achieve a balance between the two. Assistant 2's answer also touched on the importance of social equity, which added an extra layer of depth to their response. Overall, both assistants performed well, but Assistant 2's response was slightly more detailed and comprehensive, earning them a higher score.", "score": [8.0, 9.0]}
+{"review_id": "Ggbf9HijdGrKFbdoaHELrN", "question_id": 41, "answer1_id": "DmQtupeyNDrQFBccBRAsbD", "answer2_id": "an76qPuSJ4TQDWvNXdcJ2w", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more detailed and step-by-step explanation of the calculation, which made it easier to understand the reasoning behind the answer. They also acknowledged that the answer is an estimate and that factors such as age, health, and environment can affect blinking frequency. Assistant 2, on the other hand, provided a more general response without a clear calculation or estimate for the number of blinks in a lifetime. However, they did mention some factors that can influence blinking frequency and the importance of blinking for eye health. Both assistants provided relevant and accurate information, but Assistant 1's response was more helpful and precise in answering the question.", "score": [8.0, 7.0]}
+{"review_id": "naYxpEbKkwe3cQLFN6peRn", "question_id": 42, "answer1_id": "froHv7kwRMYGWPXDQXk2Gw", "answer2_id": "DYnmt5xTTTkLuyyBxhmDsq", "reviewer_id": 1, "metadata": {}, "text": "9 7\nAssistant 1 provided a more accurate and detailed response, using the average weight of a grain of salt (58.5 milligrams) from the National Institute of Standards and Technology (NIST) as a basis for the calculation. The explanation was clear, step-by-step, and easy to follow. Assistant 2, on the other hand, made some incorrect assumptions, such as the diameter of a grain of salt and the mass range (100-300 mg), which is significantly larger than the NIST's average weight. Additionally, Assistant 2's calculation method was less clear and more difficult to follow. Both assistants provided relevant information, but Assistant 1's response was more accurate, detailed, and easier to understand.", "score": [9.0, 7.0]}
+{"review_id": "fGBJ5j5nSGP9fiuSQ8kwP5", "question_id": 43, "answer1_id": "ahktv9NqxZ2cYquTXwF42r", "answer2_id": "MbcVvnW3Eh9wZi34SzYawn", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more detailed and step-by-step explanation of the reasoning behind the estimation of lightning strikes per day. The answer was well-structured and used relevant sources such as the World Meteorological Organization and National Geographic. Assistant 2's response was less focused on providing a specific number and more on discussing the factors that influence lightning activity. While both answers were informative, Assistant 1's response was more helpful and precise in addressing the user's question. Assistant 2's response, although informative, did not provide a clear estimate of daily lightning strikes, which was the main focus of the question.", "score": [8.0, 6.0]}
+{"review_id": "4PZWq27vqBv6Thczjv8fnm", "question_id": 44, "answer1_id": "kqqPRaFqb3w9Ky9LGB3yKU", "answer2_id": "4oSKRo3DtcNtE8gRbrSJPg", "reviewer_id": 1, "metadata": {}, "text": "9 7\nAssistant 1 provided a more detailed and step-by-step explanation of the calculations involved in estimating the number of balloons needed to lift a house like in the movie \"Up.\" The response considered the weight of the house, the lifting capacity of a single balloon, and the total lifting capacity needed. It also mentioned the limitations and challenges of such a scenario, making it clear that it is purely fictional and not feasible in real life.\n\nAssistant 2, on the other hand, provided a more general explanation of the principles involved in lifting a house with balloons, such as buoyancy and volume. The response did attempt to estimate the number of balloons needed, but the calculation was based on the volume of the house rather than its weight, which is less accurate. Additionally, the response did not provide as much detail on the limitations and challenges of the scenario. Overall, Assistant 2's response was still relevant and informative, but not as precise and detailed as Assistant 1's response.", "score": [9.0, 7.0]}
+{"review_id": "9rvayARouuZpxUa6G8gv32", "question_id": 45, "answer1_id": "946tQg8kS7GYPSm4qcV6Pt", "answer2_id": "bhh9SFsYgquUPeC2qBKMJw", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information in their responses. Assistant 1 gave a clear step-by-step calculation of the number of text messages sent per minute, arriving at an estimate of approximately 13,017,543 text messages. They also mentioned that the number may vary depending on factors such as time of day and the growth of instant messaging apps. Assistant 2, on the other hand, provided a more detailed explanation of their reasoning, considering factors such as mobile usage distribution across different regions and the number of mobile cellular subscriptions worldwide. They arrived at an estimate of around 16 million text messages sent per minute. Assistant 2's response was more comprehensive and took into account additional factors, which is why they received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "HE8qyLRLDkGy5oMCBJWmXY", "question_id": 46, "answer1_id": "cU3wut3Ta3ySbRHGxfwgjc", "answer2_id": "oKMFEsBBp4SEJiVPRQZN2N", "reviewer_id": 1, "metadata": {}, "text": "9 7\nAssistant 1 provided a more detailed and step-by-step explanation of the estimation process, using a specific average daily word count per person (2,000 words) and the world population to calculate the total words spoken daily. The response also acknowledged the limitations and assumptions made in the estimate, making it more transparent and informative. Assistant 2, on the other hand, provided a less precise estimation and did not offer a clear calculation or specific numbers. While Assistant 2 mentioned some relevant factors, the response lacked the clarity and detail provided by Assistant 1.", "score": [9.0, 7.0]}
+{"review_id": "HzX34MFhFV2hufs9NDApoe", "question_id": 47, "answer1_id": "hQP784Ch2yq2b3BaXVBVX3", "answer2_id": "XXXVQo5w5XCFiEuK6n3hUV", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more structured and step-by-step explanation, considering the size of snowflakes, the measurement of snowfall, and using the average snowfall in the United States to give an estimate of the number of snowflakes. Assistant 2 also provided relevant information, but the response was less structured and focused more on the snowfall rate and assumptions. Both assistants acknowledged the difficulty in providing an exact number and gave estimates, but Assistant 1's response was clearer and more detailed.", "score": [8.0, 7.0]}
+{"review_id": "mWBfsHp94NcK3TvF7zKE9d", "question_id": 48, "answer1_id": "a92bStUFdq4LBcv3pa9y3Z", "answer2_id": "Ysbx8mFxYva6TbHQGKL8K7", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more detailed and well-structured response, estimating the total number of pages in all books ever written based on the number of books and average pages per book. The explanation was clear and easy to follow, acknowledging the limitations of the estimation. Assistant 2, on the other hand, provided a less detailed response and used a different approach to estimate the number of pages, which was less convincing. Additionally, Assistant 2's response contained some repetition and did not provide a final estimation for the total number of pages.", "score": [8.0, 6.0]}
+{"review_id": "BhFnJdfXCZQnpoUkBNG7kP", "question_id": 49, "answer1_id": "a2QAcAm9wJeP2BpyWQnhot", "answer2_id": "KenbYuKVVUVXe2stVtxkeF", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a clear and concise answer, explaining the reasoning step-by-step, and acknowledging the limitations of the estimate. Assistant 2, while providing some interesting context, made an error in the calculation and provided a less focused answer. Assistant 1's response was more relevant and accurate, while Assistant 2's response included additional details that were not directly related to the question.", "score": [8.0, 6.0]}
+{"review_id": "WKtrrcofJCfXeoVwg6S6P6", "question_id": 50, "answer1_id": "CrmusnxbTtGXF2varfcUd4", "answer2_id": "ceWTK9jYWZq2Dd2H7KGkWY", "reviewer_id": 1, "metadata": {}, "text": "8 7\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was slightly more detailed, providing specific examples such as the bone flute and the Library of Congress collection. Assistant 2's response, while still informative, was less specific and focused more on the impact of music on human culture. Both answers acknowledged the difficulty in estimating the exact number of songs recorded throughout history, but Assistant 1's response was more precise in its conclusion, estimating the number to be in the billions.", "score": [8.0, 7.0]}
+{"review_id": "8Wk7SFF5HCUsnznwqUdVJy", "question_id": 51, "answer1_id": "J9pZp6z2UUW7YcXgzUouqs", "answer2_id": "6Bbzs6YWyzPj52rZAfRPTt", "reviewer_id": 1, "metadata": {}, "text": "8 7\nBoth Assistant 1 and Assistant 2 provided relevant and accurate responses to the question. Assistant 1 gave a more detailed response, discussing the potential impact of the internet on the intellectual exchange, collaboration, and democratization of knowledge during the Renaissance period. Assistant 2, on the other hand, focused more on the possible limitations and differences in the internet's development and usage during that time. While both responses were helpful, Assistant 1's answer was more comprehensive and provided a clearer picture of the potential impact of the internet during the Renaissance period, which is why it received a slightly higher score.", "score": [8.0, 7.0]}
+{"review_id": "SjnCokwZEkzZ7WNJxn8wWQ", "question_id": 52, "answer1_id": "67bYUQb6zru8ofiub7uNUi", "answer2_id": "EG6h5jgpQ8wesKAX43nt9X", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a good overview of the potential consequences of the Aztecs repelling the Spanish conquistadors, mentioning the impact on the Americas, Europe, and the world. Assistant 2, however, provided a more detailed response by outlining five possible scenarios that could have occurred if the Aztecs had successfully repelled the Spanish. This additional detail and organization make Assistant 2's answer slightly better, but both responses are informative and valuable.", "score": [8.0, 9.0]}
+{"review_id": "PcEjxGYhjEZ462USNCxPU9", "question_id": 53, "answer1_id": "gAisnQTHWFLW8aa5fQPNJf", "answer2_id": "J76ZgE27N2Pe77NcEBrWUV", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant, accurate, and detailed responses to the question about the potential consequences if the Black Death had not occurred in the 14th century. Assistant 1 focused on the social hierarchy, economic opportunities, medical advancements, and cultural landscape. Assistant 2, on the other hand, provided a more structured response, discussing the potential consequences in terms of demographics, economy, politics, social structures, and medical knowledge. Assistant 2's response was slightly more comprehensive and organized, which is why it received a higher score. However, both responses were helpful and informative.", "score": [8.0, 9.0]}
+{"review_id": "cSjEAvNEzkJrp3qz6VDNaQ", "question_id": 54, "answer1_id": "4ZJCbj7T8BGzNhDqz7NSF4", "answer2_id": "hoFFABFFF7J8juv3AnoVWZ", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused on the potential impact of Newton's focus on biology and how it could have affected the scientific world, while Assistant 2 provided more specific examples of areas in biology where Newton might have made significant contributions. Assistant 2's answer was more detailed and provided a broader range of possibilities, which is why it received a slightly higher score. Both answers acknowledged the difficulty in predicting the exact outcomes of such a hypothetical scenario.", "score": [8.0, 9.0]}
+{"review_id": "H7CvzCQK9vAVZtgJvx3L28", "question_id": 55, "answer1_id": "c6ixri3qqLfSBBnwMkgYB7", "answer2_id": "8R5n2uZ4oq52QkWQsSS6No", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth assistants provided helpful and relevant answers to the question. Assistant 1 focused on the overall impact of the Beatles on music and society, mentioning that their influence would continue to be felt for generations. Assistant 2, however, went into more detail by providing seven possible scenarios of how the music industry and popular culture might have been affected without the Beatles. This level of detail and consideration of various aspects of the music industry makes Assistant 2's response more comprehensive and informative, earning it a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "extp2B6LmhaSTD3wyuYxZg", "question_id": 56, "answer1_id": "c9AtDn7eeSYhtH854MQDDB", "answer2_id": "ECtu3QVXVrNhssSyktKzkq", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided relevant and accurate information regarding the potential consequences of Alan Turing not cracking the Enigma code during World War II. Assistant 1 provided a slightly more detailed response, discussing the impact on the development of computer technology and artificial intelligence. Assistant 2, however, mentioned the importance of considering other factors in the outcome of the war, which adds nuance to the answer. Overall, both assistants provided helpful and precise information, but Assistant 1's response was slightly more comprehensive.", "score": [8.0, 7.5]}
+{"review_id": "GxvqSP4mHCLeT76uEytfGH", "question_id": 57, "answer1_id": "jYd2gg6MJH8hdqFSAJTaiR", "answer2_id": "jb8ot3ucdF3RvzApEYKft6", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both discussed the impact on shipping routes, international trade, and the economic consequences of not having the Suez Canal. They also touched upon the potential changes in the political landscape and the development of the region. Both answers were detailed and informative, making it difficult to distinguish one as superior to the other. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "HjrxA4V5SkGpNN7LpMVhws", "question_id": 58, "answer1_id": "nZJ6LGJFegnHetutiAQtFm", "answer2_id": "cAjR8aDWRKVKbxKeqRRgvW", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about the potential consequences if the Maya civilization had never mysteriously collapsed. Assistant 1 focused on the possible advancements in knowledge, governance, and political systems, as well as the potential impact on the colonization of the Americas. Assistant 2, on the other hand, provided a more detailed list of possible scenarios, including the continuation of the Maya's thriving culture, political stability, contact with other civilizations, cultural differences, and environmental impacts. While both answers were informative, Assistant 2's response was slightly more detailed and organized, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "CsmEaRsMMNeNeuGhHvnv6y", "question_id": 59, "answer1_id": "dmEgLyeYNcwBZWHBak6Lap", "answer2_id": "7hKzFRQWWPVbr8H4wcbGdA", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. Assistant 1 discussed the possibility of other explorers discovering the Americas and the potential impact on native populations and European colonization. Assistant 2 focused more on the indigenous peoples and the consequences of Columbus's arrival, such as the introduction of diseases and the displacement of native populations. Assistant 2's answer was slightly more detailed and provided a clearer picture of the potential alternate history without Columbus's discovery, which is why it receives a higher score.", "score": [8.0, 9.0]}
+{"review_id": "9gvTBtmLarixJzvV94ibC8", "question_id": 60, "answer1_id": "bkuECkuy7YiDUcj9oJjdrZ", "answer2_id": "3vMtYzSrV5ARpYA9fGUe4H", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the question. Assistant 1's response was slightly more detailed, discussing the potential impact on van Gogh's personal life, mental health, and the broader art world. Assistant 2 also provided a good response, but it was more focused on listing possible outcomes rather than elaborating on them. Both assistants acknowledged the speculative nature of the question, but Assistant 1's response felt more comprehensive and engaging.", "score": [8.0, 7.5]}
+{"review_id": "gHdZUgARyU5LTdU9MiLZDe", "question_id": 61, "answer1_id": "2PRjT7j3V6487xZREfQfuD", "answer2_id": "nvbTnFLEyPSSgPdf2n6Mqp", "reviewer_id": 2, "metadata": {}, "text": "8 4\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to read.\n- The function count_occurrences() is modular and can be reused.\n- The code handles file opening errors.\n- The code converts the text to lowercase to ensure accurate word matching.\n- The code uses stringstream to process words in each line.\n\nWeaknesses:\n- The code does not remove punctuation from the words, which may lead to inaccurate word matching.\n\nSuggestions for improvement:\n- Add code to remove punctuation from the words before comparing them to the search_word.\n\nAssistant 2:\nStrengths:\n- The code is simple and easy to understand.\n\nWeaknesses:\n- The code does not solve the given problem statement. It only counts the number of lines in the file, not the occurrences of a specific word.\n- The code uses C-style file handling (fopen, fgets, fclose) instead of C++ style (ifstream, getline, close).\n- The code does not handle case sensitivity or punctuation when searching for the word.\n- The code lacks comments explaining the logic and functionality.\n\nSuggestions for improvement:\n- Modify the code to count the occurrences of a specific word, not just the number of lines.\n- Use C++ style file handling (ifstream, getline, close) instead of C-style (fopen, fgets, fclose).\n- Handle case sensitivity and punctuation when searching for the word.\n- Add comments to explain the code's logic and functionality.", "score": [8.0, 4.0]}
+{"review_id": "Jx2V294fY8iEp6SudSXY2m", "question_id": 62, "answer1_id": "jWyN8NTdVix6CUoqfbRqVx", "answer2_id": "RS2roGuiWf5jZfncgfHPeG", "reviewer_id": 2, "metadata": {}, "text": "7 4\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to read.\n- The comments are helpful and explain the logic and functionality of the code.\n- The function correctly implements the dynamic programming approach to find the longest common subsequence (LCS) of two input strings.\n\nWeaknesses:\n- The code could be more efficient by initializing the 2D array 'm' with the first row and column set to 0, instead of initializing all elements to 0.\n\nSuggestions for improvement:\n- Initialize the first row and column of the 2D array 'm' to 0 to improve efficiency.\n\nAssistant 2:\nStrengths:\n- The code is structured and easy to read.\n- The function uses dynamic programming to build a 2D list 'dp'.\n\nWeaknesses:\n- The code does not correctly implement the given problem statement, as it returns the length of the LCS instead of the LCS itself.\n- The comments are not clear and concise, making it difficult to understand the code's logic and functionality.\n- The code initializes the first row and column of 'dp' to 1, which is incorrect for the LCS problem.\n\nSuggestions for improvement:\n- Modify the code to return the LCS itself instead of its length.\n- Improve the comments to better explain the code's logic and functionality.\n- Initialize the first row and column of the 2D array 'dp' to 0 to correctly solve the LCS problem.", "score": [7.0, 4.0]}
+{"review_id": "6tuLe2JSdvQAkKe7XC4KAi", "question_id": 63, "answer1_id": "mx9G7gfKTCXCmNRaiMZQr9", "answer2_id": "Rz8sSwJzffBjUhjEYAyEWS", "reviewer_id": 2, "metadata": {}, "text": "8 6\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to read.\n- The regular expression pattern is more comprehensive, including a wider range of valid email formats.\n- The code is wrapped in a function, making it reusable.\n\nWeaknesses:\n- The code lacks comments explaining the regular expression pattern.\n\nSuggestions for improvement:\n- Add comments to explain the different parts of the regular expression pattern.\n\nAssistant 2:\nStrengths:\n- The code is relatively easy to read.\n- The code takes user input, making it more interactive.\n\nWeaknesses:\n- The regular expression pattern is less comprehensive, potentially missing some valid email formats.\n- The code is not wrapped in a function, making it less reusable.\n- The variable names are not following proper naming conventions (e.g., \"Email Addition\" should be \"email_pattern\").\n- The code lacks comments explaining the regular expression pattern.\n\nSuggestions for improvement:\n- Improve the regular expression pattern to cover a wider range of valid email formats.\n- Wrap the code in a function to make it reusable.\n- Follow proper naming conventions for variables.\n- Add comments to explain the different parts of the regular expression pattern.", "score": [8.0, 6.0]}
+{"review_id": "2FxtfJWPNcg4VhBgXLUmhx", "question_id": 64, "answer1_id": "8LjpXk6Va5L2FBEwTU9YJ6", "answer2_id": "SQCnPwtYgHH57ikbXgUpQq", "reviewer_id": 2, "metadata": {}, "text": "8 4\n\nAssistant 1:\nStrengths:\n- The code is simple and easy to understand.\n- The dynamic programming approach is implemented correctly using a list to store the Fibonacci numbers.\n- The code is efficient and has a linear time complexity.\n- The comments are clear and concise, explaining the code's logic and functionality.\n\nWeaknesses:\n- The code could be further optimized by using a more space-efficient approach, such as storing only the last two Fibonacci numbers instead of the entire list.\n\nSuggestions for improvement:\n- Consider using a more space-efficient approach by storing only the last two Fibonacci numbers.\n\nAssistant 2:\nStrengths:\n- The code attempts to use memoization to store previously computed Fibonacci numbers.\n\nWeaknesses:\n- The code is unnecessarily complex and difficult to understand.\n- The code contains a redundant nested function with the same name as the outer function, which is confusing.\n- The code does not properly implement memoization, as the memo dictionary is not updated with new Fibonacci numbers.\n- The comments are not clear and concise, making it difficult to understand the code's logic and functionality.\n\nSuggestions for improvement:\n- Simplify the code by removing the unnecessary nested function.\n- Properly implement memoization by updating the memo dictionary with new Fibonacci numbers.\n- Improve the comments to better explain the code's logic and functionality.", "score": [8.0, 4.0]}
+{"review_id": "Jx9YeuEoWx2ifEz8P2mm5Z", "question_id": 65, "answer1_id": "BLC8fhAUeahFfrjBRiL9tx", "answer2_id": "mfGSLxAAjjBmUFtCx3STEN", "reviewer_id": 2, "metadata": {}, "text": "9 8.5\n\nBoth Assistant 1 and Assistant 2 provided correct and efficient implementations of the binary search algorithm. Their code is readable and follows proper coding standards. However, there are some differences in their explanations and code structure.\n\nAssistant 1:\n- The explanation is clear and concise, providing a good understanding of how the algorithm works.\n- The code is well-structured and easy to read.\n- The example usage provided is helpful for understanding how to use the function.\n- The only minor issue is the lack of comments within the code. Adding comments would make it easier for others to understand the logic behind the code.\n\nAssistant 2:\n- The explanation is detailed and provides a step-by-step implementation of the algorithm, which is helpful for those who are new to binary search.\n- The code is also well-structured and easy to read.\n- The example usage provided is helpful for understanding how to use the function.\n- The main difference between Assistant 1 and Assistant 2 is the variable names used in the code. Assistant 2 uses \"low\" and \"high\" instead of \"left\" and \"right,\" which might be slightly more intuitive for some readers.\n- Similar to Assistant 1, adding comments within the code would make it easier for others to understand the logic behind the code.\n\nOverall, both assistants did a great job implementing the binary search algorithm. Assistant 1's explanation is more concise, while Assistant 2's explanation is more detailed. Both implementations are correct and efficient, but Assistant 2's variable names might be slightly more intuitive for some readers. Adding comments to the code would improve both submissions.", "score": [9.0, 8.5]}
+{"review_id": "Agpb6QGC392rQVjQKeP9uU", "question_id": 66, "answer1_id": "NAH245JGvMWF24FWZJagYp", "answer2_id": "CcUgvJProVma9sNKW2nKHc", "reviewer_id": 2, "metadata": {}, "text": "9 5\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to understand.\n- The implementation is efficient and correctly solves the problem.\n- Comments are clear and concise, explaining the purpose of each method.\n- Proper coding standards and best practices are followed.\n\nWeaknesses:\n- There is a minor issue with the is_empty() method, where it uses `not bool(self.stack1 or self.stack2)` instead of `not (self.stack1 or self.stack2)`. However, this does not affect the functionality of the code.\n\nSuggestions for improvement:\n- The is_empty() method can be simplified to `return not (self.stack1 or self.stack2)`.\n\nAssistant 2:\nStrengths:\n- The code is readable and easy to understand.\n\nWeaknesses:\n- The implementation is incorrect and does not solve the problem as required.\n- The dequeue method is not implemented correctly, as it does not transfer elements from the in_stack to the out_stack.\n- The peek method is not required in the problem statement, and it is not implemented correctly.\n- The is_empty method checks if the in_queue is empty, but it should check if both stacks are empty.\n- Comments are not provided to explain the code's logic and functionality.\n\nSuggestions for improvement:\n- Implement the dequeue method correctly by transferring elements from the in_stack to the out_stack when the out_stack is empty.\n- Remove the peek method, as it is not required in the problem statement.\n- Modify the is_empty method to check if both stacks are empty.\n- Add comments to explain the code's logic and functionality.", "score": [9.0, 5.0]}
+{"review_id": "MHpYtwoZuBpD2UmCXE7vHs", "question_id": 67, "answer1_id": "NKXTwB3WtfFcmQZbTxBP5o", "answer2_id": "a4NeYaavwpgqC46erF3VFP", "reviewer_id": 2, "metadata": {}, "text": "7 4\n\nAssistant 1:\nStrengths:\n- The code is simple and easy to understand.\n- The solution is efficient in terms of space complexity.\n- The example provided is helpful for understanding the code.\n\nWeaknesses:\n- The code does not meet the requirement of not using any extra data structures, as it uses a list to store common elements.\n- The time complexity can be improved.\n\nSuggestions for improvement:\n- To meet the requirement of not using any extra data structures, you can print the common elements directly instead of storing them in a list.\n- You can optimize the time complexity by sorting the arrays and using a two-pointer approach.\n\nAssistant 2:\nStrengths:\n- The code attempts to solve the problem without using any extra data structures.\n\nWeaknesses:\n- The code is not efficient, as it uses nested loops and multiple arrays.\n- The code is not easy to understand, and the comments do not provide clear explanations.\n- The code is not written in a modular way, making it difficult to reuse or test.\n- The code does not compile due to undeclared variables and incorrect usage of functions.\n\nSuggestions for improvement:\n- Declare the necessary variables and fix the compilation errors.\n- Use a more efficient algorithm, such as sorting the arrays and using a two-pointer approach.\n- Write the code in a modular way, separating the input, processing, and output into different functions.\n- Improve the comments to provide clearer explanations of the code's logic and functionality.", "score": [7.0, 4.0]}
+{"review_id": "MiEb4NdFbejqM4g3CTEQcp", "question_id": 68, "answer1_id": "5P7wPyrNwBrHNdJNrETS5h", "answer2_id": "bpcHYkjjXamp998ARdtfWK", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\nf(x) = 5x^3 - 2x + 3\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 4 + 3\nf(2) = 39\n\nNow, I will evaluate the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 1\nf(2) = 39\n\nAssistant 2:\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(4) - 2 + 3 (Incorrect step: 2^3 should be 8, not 4)\nf(2) = 20 - 2 + 3\nf(2) = 19\n\nAssistant 1's process is correct, and the final answer is 39. Assistant 2 made an error in calculating 2^3, which led to an incorrect final answer. Therefore, I would give Assistant 1 a score of 10 and Assistant 2 a score of 4.\n\nThe output tuple is (10, 4).", "score": [10, 4]}
+{"review_id": "ZkqsuztdgAo3KSzjTNJvJ3", "question_id": 69, "answer1_id": "UmnL4WoucBduFpX4jptkNU", "answer2_id": "SququGaG2Y8euqGVwV2Ueb", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\n3x + 10 = 5(x - 2)\n\n3x + 10 = 5x - 10 (use the distributive property)\n\n10 + 10 = 5x - 3x (subtract 3x from both sides)\n\n20 = 2x (simplify)\n\nx = 10 (divide both sides by 2) \n\nTherefore, x = 10.\n\nNow, I will evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe problem-solving steps are correct, and the final answer is x = 10.\n\nAssistant 2:\nThe problem-solving steps are incorrect. The mistake is in the step where they subtract 5x from both sides. It should be subtracting 3x from both sides, as shown in Assistant 1's answer. The correct answer is x = 10, not x = -5.\n\nScores:\nAssistant 1: 10\nAssistant 2: 4\n\nAssistant 1 performed better in this case, as they provided the correct answer and problem-solving steps.", "score": [10, 4]}
+{"review_id": "5ve7RdpnSgw9JXXQ44cnMg", "question_id": 70, "answer1_id": "Hymm673GkY5tXNaTFeNUBZ", "answer2_id": "9Zb7Fenxop3RQFUrxWr5Hn", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\nGiven the endpoints (2, -2) and (10, 4), we can use the distance formula:\n\ndistance = sqrt((x2 - x1)^2 + (y2 - y1)^2)\n\nPlugging in the coordinates of the given endpoints, we get:\n\ndistance = sqrt((10 - 2)^2 + (4 - (-2))^2)\ndistance = sqrt(8^2 + 6^2)\ndistance = sqrt(64 + 36)\ndistance = sqrt(100)\ndistance = 10\n\nThe length of the line segment is 10 units.\n\nNow, I will evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe problem-solving process is correct, and the final answer is also correct. The assistant used the distance formula and plugged in the correct values to arrive at the correct answer.\n\nAssistant 2:\nThe problem-solving process is incorrect. The assistant attempted to use the Pythagorean theorem but made mistakes in calculating the lengths of the legs of the right triangle. The final answer is also incorrect.\n\nBased on the evaluation, I would give the following scores:\n\nAssistant 1: 10\nAssistant 2: 3\n\nAssistant 1 performed better in this case, as they used the correct method and arrived at the correct answer.", "score": [10, 3]}
+{"review_id": "eA4Tb6m3gPs5v5fthbtUWy", "question_id": 71, "answer1_id": "PUzddJ35E3WsM7BDQ9E59i", "answer2_id": "HkYsYvcMNHvt5suAZJpFWw", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's request. They each provided a well-written formal email template that can be used to propose a joint venture to a potential business partner. Both responses included appropriate details, such as the benefits of the joint venture, the strengths of the companies, and proposed next steps. The quality of both responses is very similar, and they both deserve a high score of 9.", "score": [9.0, 9.0]}
+{"review_id": "f8vBXCoEFe5AD4Umz3Qdgo", "question_id": 72, "answer1_id": "6Q72hZCtDkgq379yhdmN5N", "answer2_id": "F45reLGER4jrAjZZi5r58x", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth assistants provided relevant and helpful resignation letter templates. Assistant 1's response was slightly more detailed and included specific mentions of gratitude for the employer's support, guidance, and collaboration, which made it more personalized. Assistant 2's response was also helpful and relevant, but it was slightly less detailed and personalized compared to Assistant 1's response. Both responses were accurate and appropriate for the user's request.", "score": [9.0, 8.5]}
+{"review_id": "QFv9kc9y65kwzWNj5B8xV4", "question_id": 73, "answer1_id": "ReXnHy9C8SwcYPAep6gvJg", "answer2_id": "ZLx4JuCtwNcv55nPbyXQvB", "reviewer_id": 1, "metadata": {}, "text": "9 9\n\nBoth Assistant 1 and Assistant 2 provided well-structured, detailed, and relevant formal letters of recommendation for a student applying to a prestigious graduate program in computer science. Both letters included the necessary elements, such as the sender's and recipient's information, an introduction, a description of the student's academic achievements and extracurricular activities, and a conclusion. The language used in both letters is formal and appropriate for the context. Both assistants demonstrated a high level of competence in addressing the user's request, and it is difficult to distinguish one as being significantly better than the other. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "ivSfAcuBY7L7YyvSrZJ7Hr", "question_id": 74, "answer1_id": "cKk5zZe8yYY4JH3kr5pGXG", "answer2_id": "XPgHiaRwa5ykfKMZG7GQ6b", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's request for a product launch announcement email. Both emails effectively introduced the new software solution and highlighted its benefits, while also offering a special introductory offer. Assistant 1's response was slightly more detailed in terms of the specific benefits of the software, such as increased collaboration, real-time tracking, and comprehensive reporting. However, Assistant 2's response was more concise and had a slightly better flow, making it easier to read and understand. Additionally, Assistant 2's response included a call to action for scheduling a demo, which is a valuable addition to a product launch email. Overall, both responses were of high quality, but Assistant 2's response was slightly better in terms of readability and including a call to action.", "score": [8.0, 9.0]}
+{"review_id": "BuruAw9mhL54toBtSn4gdh", "question_id": 75, "answer1_id": "c5rwA3cPjytSGcn7H8dZ6Q", "answer2_id": "MEWrWkY8Y5CSBTCNkfo2nx", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. Assistant 1's response was slightly more detailed, including information about the cause of the delay, steps taken to resolve the issue, and measures put in place to prevent future delays. This level of detail may provide the customer with greater reassurance that the issue has been addressed. Assistant 2's response was also well-written and addressed the main points of an apology email, but it lacked the specific details provided by Assistant 1. Both responses were polite and professional, and either would be suitable for use in an apology email.", "score": [9.0, 8.5]}
+{"review_id": "DJJNN7rpe3Smk4xPTK3Sud", "question_id": 76, "answer1_id": "XZGPtBo86KfF9REZ36s2X5", "answer2_id": "f8Jmezvy4SwqgSVeWQqTzk", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided relevant and detailed scripts for a YouTube video exploring the history and cultural significance of jazz. Assistant 1's script was well-structured and had a clear narrative flow, with a good balance of historical context and modern-day significance. The script also included visuals and narration that would make for an engaging video. Assistant 2's script was also informative and engaging, with a host guiding the viewer through the history of jazz and its cultural impact. However, Assistant 1's script had a slightly better structure and flow, which is why it received a slightly higher score.", "score": [9.0, 8.5]}
+{"review_id": "PXJsmAbnG2is6Fbwf6utex", "question_id": 77, "answer1_id": "DRncHCsdGji756efDhacUT", "answer2_id": "bZVCu7vDL4GGm5P8zN3xou", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided engaging and detailed travel blog posts about a recent trip to Hawaii. They both highlighted cultural experiences and must-see attractions, making it difficult to choose one over the other. Assistant 1 focused more on hula performances, luaus, and natural attractions like volcanic craters and Waikiki Beach. Assistant 2, on the other hand, emphasized the Bishop Museum, Napali Coast, and the Road to Hana. Both assistants showcased different aspects of Hawaii, making their responses equally informative and enjoyable to read.", "score": [9.0, 9.0]}
+{"review_id": "YWS5oKszsrSrtthBCidSbU", "question_id": 78, "answer1_id": "Y5rCQHHDA6WNfhRcB6QboG", "answer2_id": "gYkH3Pit5J9gnJyXJtoHUG", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided captivating movie reviews for a recently released science fiction film, discussing the plot, characters, and special effects in detail. Both reviews were engaging, relevant, and accurate, making it difficult to choose one over the other. The level of detail in both responses was sufficient to give the reader a clear understanding of the films' storylines and features. Therefore, both assistants receive a score of 9 for their excellent performance.", "score": [9.0, 9.0]}
+{"review_id": "Zx9sBBoZKjin5fKGaTrTJm", "question_id": 79, "answer1_id": "Lea4wh5n6GsEkBnKsntN64", "answer2_id": "dX4Pt6xQpstrrnZgRLSVnN", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided relevant and detailed podcast script structures. Assistant 1's response was more comprehensive, with a clear division of sections and a more detailed discussion of the impact on artists and the music industry. Assistant 2's response was also helpful and relevant, but it lacked the depth and organization found in Assistant 1's response. Assistant 1's script also included a more engaging introduction and conclusion, making it a more complete podcast script.", "score": [9.0, 8.0]}
+{"review_id": "aC3GAuowHuvwJPbzbiGpg4", "question_id": 80, "answer1_id": "gdLxzcypTeuD6ToC6HWnXh", "answer2_id": "jseJYZmd8fBnzbx2EN9oq9", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided well-written and detailed symphony concert reviews that discussed the orchestra's performance and the overall audience experience. Both reviews were engaging and captured the essence of attending a symphony concert. They both showcased the orchestra's talent and the audience's reaction to the performance. It is difficult to differentiate between the two responses in terms of helpfulness, relevance, accuracy, and level of detail, as both assistants did an excellent job in addressing the user's request. Therefore, both Assistant 1 and Assistant 2 receive a score of 9.", "score": [9.0, 9.0]}
diff --git a/model/fastchat/eval/table/review/vicuna-13b_20230322-new-hp-fp16/review_llama_vicuna-13b-20230322-new-hp-fp16.jsonl b/model/fastchat/eval/table/review/vicuna-13b_20230322-new-hp-fp16/review_llama_vicuna-13b-20230322-new-hp-fp16.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..3922747b7fe98285ef01b4644d6213f3687e5048
--- /dev/null
+++ b/model/fastchat/eval/table/review/vicuna-13b_20230322-new-hp-fp16/review_llama_vicuna-13b-20230322-new-hp-fp16.jsonl
@@ -0,0 +1,80 @@
+{"review_id": "8PYRvR723xkJtr4MuZAUTQ", "question_id": 1, "answer1_id": "J3UA6eGXGyFeUGqGpP3g34", "answer2_id": "FNAYdXQwcrNkTD4nCqdFGb", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about improving time management skills. Assistant 1 provided a slightly more detailed response with a longer list of strategies, while Assistant 2's response was more concise and well-organized. Both responses covered essential aspects of time management, such as setting goals, prioritizing tasks, and minimizing distractions. However, Assistant 1's answer included some additional points, such as managing stress, getting enough sleep, and striking a balance, which contributed to a more comprehensive response. Therefore, Assistant 1 receives a score of 9, while Assistant 2 receives a score of 8.5.", "score": [9.0, 8.5]}
+{"review_id": "WLkDKpLa293rB4TL53sfkm", "question_id": 2, "answer1_id": "jZVDNTtFnNNd6vgkGjzAGa", "answer2_id": "7SzYV3TjzyrVkSKKGaXN4c", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about effective ways to deal with stress. Assistant 1 provided a slightly more detailed response with a list of 10 methods, while Assistant 2 provided 9 methods. Both assistants covered similar strategies, such as exercise, relaxation techniques, social support, sleep, healthy diet, goal-setting, time management, and seeking professional help. Assistant 1 also mentioned developing a support network and talking to someone, while Assistant 2 mentioned avoidance of unhealthy habits. Both answers were well-structured and informative, but Assistant 1's response was slightly more comprehensive, which is why it received a higher score.", "score": [9.0, 8.5]}
+{"review_id": "6GxScivADJ7XfcnAKHVxHC", "question_id": 3, "answer1_id": "NbSxRXf8Yuvjr7HC6AvrPw", "answer2_id": "cA7LoXTBCgegnTcKWpjxQq", "reviewer_id": 1, "metadata": {}, "text": "4 8\nAssistant 1's response contains many repetitive and incorrect points, making it less helpful and accurate. Assistant 2's response is more concise, accurate, and relevant, providing a clearer comparison between Python and JavaScript.", "score": [4.0, 8.0]}
+{"review_id": "TRWRBkTBBJhqowfVzLAYZF", "question_id": 4, "answer1_id": "E9zrPDdncYvAebEaxiPXfx", "answer2_id": "8Yo5UVMPzoxjCsi3fuHNmH", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant tips for increasing productivity while working from home. Assistant 1 provided six clear tips, while Assistant 2 expanded on those tips and provided a more comprehensive list of ten suggestions. Assistant 2's response was slightly more detailed and covered additional aspects such as dressing professionally, communication with the team, exercise, diet, and mental health, which contributed to a higher score. Both responses were accurate and relevant, but Assistant 2's answer was more thorough and well-rounded.", "score": [8.0, 9.0]}
+{"review_id": "eLAf8nqBPGgg2eNcfPgLr3", "question_id": 5, "answer1_id": "36TYpADDHAfRvtvKitfSXR", "answer2_id": "cCc7EgnQNgcVAVqk96Pyto", "reviewer_id": 1, "metadata": {}, "text": "8 9.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate explanations of the basics of quantum computing. Assistant 2, however, provided a more detailed and comprehensive response, covering the concepts of superposition and entanglement more thoroughly, as well as mentioning the potential applications and current state of quantum computing research. Assistant 1's response was also informative, but it could have benefited from more elaboration on the concepts and potential applications.", "score": [8.0, 9.5]}
+{"review_id": "9TmmFbcE4SkrfEEnCwbXeA", "question_id": 6, "answer1_id": "8vtLLYRM352mZ8PXXbwzBT", "answer2_id": "TUL5M2TWXQmM56U4Ckk4s4", "reviewer_id": 1, "metadata": {}, "text": "8 9.5\nBoth Assistant 1 and Assistant 2 provided helpful and relevant information in their responses. Assistant 1 focused on the completeness of the protein sources, which is an important aspect to consider. However, Assistant 2 provided a more comprehensive answer, covering a wider range of differences between plant-based and animal-based protein sources, including nutritional content, sustainability, health benefits, ethical considerations, taste, and allergies. This additional information makes Assistant 2's response more detailed and informative, resulting in a higher score.", "score": [8.0, 9.5]}
+{"review_id": "VZUNwfpveMFvYxXa8C2FsZ", "question_id": 7, "answer1_id": "mrCniyWYvAtxWhgMdXq2BG", "answer2_id": "iZUdysQf69MHQE9Lq76mii", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question about developing critical thinking skills. Assistant 1 offered four steps, while Assistant 2 provided a more detailed response with seven steps. Both answers were accurate and contained valuable advice. However, Assistant 2's response was more comprehensive, covering a wider range of strategies and offering more depth in the explanations. This is why Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "YA6PhMfmdzMcrJ9a8hcGP6", "question_id": 8, "answer1_id": "S8fmdbvpvbgTUyDU9V3M3N", "answer2_id": "D8EWgfq9yytMgQYybq3Rhq", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a relevant and accurate response, but it only focused on one major challenge, which is the lack of skilled teachers. On the other hand, Assistant 2 provided a more comprehensive and detailed answer, listing ten major challenges faced by the education sector today. This response covered a wider range of issues, including access, quality, funding, technology, accountability, inclusion, safety, employability, globalization, and teacher shortages. Therefore, Assistant 2 receives a higher score for providing a more complete and informative answer.", "score": [7.0, 9.0]}
+{"review_id": "eabUxE7d5tNeNuays4hMkk", "question_id": 9, "answer1_id": "KmuNjvNKRyaFwaBZTLArcG", "answer2_id": "5bzosDZ96t2p5qxeS7VAeA", "reviewer_id": 1, "metadata": {}, "text": "9 6\nAssistant 1 provided a more comprehensive and well-organized answer, covering a wider range of factors that influence consumer behavior. The answer is detailed and precise, making it easier for the user to understand the various factors. Assistant 2's response is less detailed and misses some important factors, such as situational and market factors. Additionally, Assistant 2's response is incomplete, ending abruptly after the fourth point.", "score": [9.0, 6.0]}
+{"review_id": "5Ywujs9FQW2zgvpPjqAVdp", "question_id": 10, "answer1_id": "HNNT9k5htiMeRwNSD6dcz4", "answer2_id": "itBobRwhSZUnsWkUs5bVwX", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a list of conflict resolution strategies, but some of the explanations were not clear or accurate, such as avoidance being an effective strategy in the short term. Assistant 2 provided a more comprehensive list of strategies, with clearer explanations and practical steps to take in resolving conflicts. The response from Assistant 2 was more helpful, relevant, and detailed, which is why it received a higher score.", "score": [7.0, 9.0]}
+{"review_id": "hh7vLmymDxbkVWM3kVakvh", "question_id": 11, "answer1_id": "ChXjhDDikxU9FV3CADs6Ym", "answer2_id": "iVTKdmUeTySryqEHhtgmkS", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a relevant and accurate response, but it lacked some details and organization. The answer focused mainly on the environmental impact and briefly mentioned the health risks associated with single-use plastic bottles. Assistant 2, on the other hand, provided a more comprehensive and well-organized response, covering both environmental and human health implications, as well as mentioning the benefits of using reusable bottles. The level of detail in Assistant 2's response was higher, making it more helpful and informative for the user.", "score": [7.0, 9.0]}
+{"review_id": "HQQgjbi4udovdMiR4AqdXH", "question_id": 12, "answer1_id": "5wsPnN3VmmSkahgugFNo7u", "answer2_id": "7UgtosTTGx7bnRcQGxdDJo", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information regarding the factors to consider when designing an inclusive and accessible public transportation system. Assistant 1 focused more on the needs of people with disabilities, while Assistant 2 provided a broader range of factors, including universal accessibility, diversity of needs, multi-modal options, frequency, reliability, safety, information, communication, user-friendly fare collection, integration with other modes, community engagement, and inclusive pricing. Assistant 2's response was more comprehensive and well-organized, which is why it received a higher score. However, both responses were informative and useful.", "score": [8.0, 9.0]}
+{"review_id": "CRbBScstvWEjW7J4uG3Avg", "question_id": 13, "answer1_id": "NRGZGnU2sPN3ShMe9C3fMn", "answer2_id": "Yt5tqBrrKfcumuuuxf2qqM", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a response that focused on fiscal policy but failed to address monetary policy, which is an essential part of the question. The response also incorrectly categorized interest rate changes as fiscal policy, when they are actually part of monetary policy. Assistant 2, on the other hand, provided a more comprehensive answer that covered both fiscal and monetary policies and their respective tools. Assistant 2 also provided a more accurate categorization of interest rate changes as part of monetary policy. Overall, Assistant 2's response was more helpful, relevant, accurate, and detailed than Assistant 1's response.", "score": [7.0, 9.0]}
+{"review_id": "gDZK264kgVmS6Q28YotyJF", "question_id": 14, "answer1_id": "inKimHkWsXShQBTRmxr5Yg", "answer2_id": "4pZ4Uy544Bc3K59fhbW7xj", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 gave a brief overview of how language and cultural barriers can affect communication and relationships in multicultural societies. However, Assistant 2 provided a more detailed response, discussing the specific impacts of language and cultural barriers, such as misunderstandings, misinterpretations, stereotypes, and isolation. Assistant 2 also offered suggestions on how to overcome these barriers, making their response more comprehensive and actionable. Therefore, Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "48arH6gczA9WBhV3bvQpF6", "question_id": 15, "answer1_id": "H8aKtWwf8m6Lgxc2YyR2yf", "answer2_id": "762peC97upw58WFQeLNoXZ", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a brief overview of AI applications in healthcare, but the response lacked a specific scenario and details. Assistant 2, on the other hand, provided a more comprehensive answer, describing a scenario where AI could be used to improve diagnosis, treatment planning, predictive analytics, administrative tasks, and personalized patient care. The response from Assistant 2 was more relevant, detailed, and directly addressed the user's question.", "score": [8.0, 10.0]}
+{"review_id": "ESn82DvoqX6oLUWtdPn7es", "question_id": 16, "answer1_id": "PafVwxMsjSkYUETiVBgxTU", "answer2_id": "Yqfg2saKSNPauCS8YdsjdD", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the CRISPR-Cas9 gene editing technology. Assistant 2, however, offered a slightly more detailed and structured explanation of the process, including the step-by-step procedure and a clearer distinction between the potential applications and ethical implications. Assistant 1's response was also informative but lacked the same level of detail and organization as Assistant 2's response.", "score": [8.0, 9.0]}
+{"review_id": "GZRzXUxRqchuYJM6gBiqdP", "question_id": 17, "answer1_id": "dmDUAfTP4aERJqqSeDBybu", "answer2_id": "gKd2BPWp7HG9PTDdgS7HT8", "reviewer_id": 1, "metadata": {}, "text": "8 9.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about vaccinations and herd immunity. Assistant 1 gave a brief explanation of how vaccinations work and what herd immunity is, which is useful for a quick understanding. However, Assistant 2 provided a more detailed and comprehensive response, explaining the concept of herd immunity more thoroughly and mentioning the importance of herd immunity for those who cannot be vaccinated. This additional information makes Assistant 2's response more valuable and informative, resulting in a higher score.", "score": [8.0, 9.5]}
+{"review_id": "3DnMvFfeC328LZCLcQa8jm", "question_id": 18, "answer1_id": "8KGSSqbLqVdSZMEN9oCv5R", "answer2_id": "CyZSkdkGWmjh8ZgxrQ2f66", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1 focused on the specific ways social media platforms influence news consumption and sharing, as well as the potential implications for the spread of misinformation. Assistant 2 provided a more general overview of the impact of social media on news consumption and sharing, while also addressing concerns about misinformation and the measures taken by social media platforms to combat it. Assistant 2's response was slightly more comprehensive and well-rounded, which is why it received a higher score. However, both responses were informative and useful in answering the question.", "score": [8.0, 9.0]}
+{"review_id": "ZGkEnXAN63Qy9vyku7Tuzn", "question_id": 19, "answer1_id": "HbnJXJpPfaM2iX3ek4Epvy", "answer2_id": "LwA42YWwqvdkStLZEUbmm9", "reviewer_id": 1, "metadata": {}, "text": "6 9\nAssistant 1's response started off by discussing the influence of cultural, social, and economic factors on food choices but then went into a long list of interventions without explaining how they relate to the factors mentioned earlier. The response was repetitive and lacked a clear structure. Assistant 2, on the other hand, provided a more concise and well-structured response that directly addressed the question, explaining how these factors influence food choices and offering suggestions on how to promote healthier diets. Assistant 2's response was more helpful, relevant, and accurate, which is why it received a higher score.", "score": [6.0, 9.0]}
+{"review_id": "UBoiDJYhJuF3mKe9eKQfNX", "question_id": 20, "answer1_id": "mx8Abfz5PtDcn6jgCA8zhM", "answer2_id": "ajfFPpHDdMiSp3PVfsUFbE", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a somewhat relevant response, but it focused more on population size and genetic drift rather than explaining the process of natural selection in detail. Assistant 2, on the other hand, gave a more comprehensive and accurate explanation of natural selection, including the steps involved in the process and how it contributes to the evolution and adaptation of species. Assistant 2's response was more helpful, relevant, and detailed, which is why it received a higher score.", "score": [7.0, 9.0]}
+{"review_id": "o6E5tti7AR7bKaavhxLP46", "question_id": 21, "answer1_id": "NuS9PUGkJG2pHscArvfyeF", "answer2_id": "3n8npKQKxgHEbEmf2K6AwF", "reviewer_id": 1, "metadata": {}, "text": "7 10\nAssistant 1's response is brief and lacks detail, but it does convey the basic idea of introducing oneself as a knight. Assistant 2's response, on the other hand, is much more detailed and provides a more immersive and accurate portrayal of how a medieval knight might introduce themselves at a royal banquet. The response includes elements such as addressing the audience, mentioning their background, and expressing their commitment to the ideals of knighthood. This makes Assistant 2's response more helpful, relevant, and accurate, earning it a higher score.", "score": [7.0, 10.0]}
+{"review_id": "CbWvroz2UjdmgeGQKVfxe6", "question_id": 22, "answer1_id": "SPjzirzbzo3UJ8BHXSgY9y", "answer2_id": "LfeqCy9vR3kCaJiQV4Fyqf", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a relevant and helpful response, but it was not as detailed or engaging as Assistant 2's response. Assistant 2 painted a vivid picture of the rewards and challenges, used more pirate-like language, and created a sense of excitement and determination. This made Assistant 2's response more effective in motivating the crew to search for hidden treasure.", "score": [7.0, 9.0]}
+{"review_id": "DmpfU9prKudH4r85xWqyJu", "question_id": 23, "answer1_id": "JkeeC4MZVwybPU8DSbusnT", "answer2_id": "JrnFfmnsuykbTkFbUnei6k", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a relevant and accurate response by explaining what a soliloquy is and giving an example from Romeo and Juliet. However, the answer could have been more helpful and detailed by providing a unique soliloquy as requested. Assistant 2, on the other hand, crafted a well-written and original soliloquy that demonstrates how a Shakespearean character might declare their love. The response was relevant, accurate, and detailed, which is why Assistant 2 receives a higher score.", "score": [8.0, 10.0]}
+{"review_id": "Ci7A2CFGDfUQKDsyrLh5Sp", "question_id": 24, "answer1_id": "UcfKSXuzBPkZzLHsH4EwQz", "answer2_id": "BDBSC5P2DE3E5LuB9FX7tn", "reviewer_id": 1, "metadata": {}, "text": "4 9\nAssistant 1's response was not relevant to the question as it focused on the chatbot's own origin story rather than providing a superhero origin story explanation for a curious child. Assistant 2, on the other hand, provided a relevant and engaging origin story that was appropriate for a child. The response included details about gaining powers, discovering their purpose, and using their abilities for good, which aligns with the question's intent. Assistant 2's response was helpful, accurate, and detailed, making it a better overall answer.", "score": [4.0, 9.0]}
+{"review_id": "ZELDUdRGHBFdrKTvE4Z7EJ", "question_id": 25, "answer1_id": "cEGdo69FwL2Y3teYkMk9Gp", "answer2_id": "TS97KLnyki7XWYFdSdbZiY", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about the technological advancements in the year 3000. Assistant 1 provided a clear list of five advancements, while Assistant 2 provided a more detailed list of nine advancements. Assistant 2's response was more comprehensive and covered a wider range of topics, such as space travel, food and water production, medicine, energy, and climate change, which were not mentioned by Assistant 1. This additional information makes Assistant 2's response more valuable and informative, resulting in a higher score.", "score": [8.0, 9.0]}
+{"review_id": "HvJKSmecEEh3pG9KL5cxbW", "question_id": 26, "answer1_id": "W28UtVDZTcWwa8TmeruRn4", "answer2_id": "9ATk4tAxBoe267rQUDrgYD", "reviewer_id": 1, "metadata": {}, "text": "6 9\nAssistant 1's response was confusing and lacked the excitement expected from a sports commentator, which is why it received a score of 6. Assistant 2, on the other hand, provided a more engaging and exciting description of the winning play, capturing the essence of a sports commentator, and thus received a score of 9. Assistant 2's response was more relevant and accurate to the user's question, while Assistant 1's response was less detailed and harder to follow.", "score": [6.0, 9.0]}
+{"review_id": "c5Zr3A4bXnbZzZgUhHbwsf", "question_id": 27, "answer1_id": "j65UyAfRdAcWBAEinXJzBK", "answer2_id": "C4zL3pUsGj2Z8U7Tm2nzDN", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a relevant and accurate response, but it lacked the level of detail and immersive description that was present in Assistant 2's response. Assistant 2 painted a vivid picture of the dish, its ingredients, and the overall culinary experience, making it more engaging and appealing to the panel of judges. Assistant 2's response was more in line with what one would expect from a world-famous chef describing their signature dish.", "score": [8.0, 10.0]}
+{"review_id": "mKwEGSa5xMZ5LyBMN4PbME", "question_id": 28, "answer1_id": "VWuywPvFPK42ZxmHpmQ27y", "answer2_id": "CTi8ZRuHoAzRQjMRAU3mBN", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's response was relevant and provided a description of emotions and the view from the top of Mount Everest, but the details were somewhat generic and repetitive. Assistant 2, on the other hand, provided a more vivid and engaging description of the emotions and the view, including specific details about the surrounding peaks and the sense of accomplishment. Assistant 2's response was more helpful, accurate, and detailed, which is why it received a higher score.", "score": [7.0, 9.0]}
+{"review_id": "MCzRWHc6XP7dzRCMy9zXGN", "question_id": 29, "answer1_id": "AUWSLTAvGV5sP9qLsVyzjT", "answer2_id": "8chhW8gPzM6zHUN6VAqpym", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a decent response, but it was more focused on describing the conditions on Mars rather than the daily life of a space colonist. The response also contained some repetition and lacked a clear structure. Assistant 2, on the other hand, provided a more comprehensive and well-structured answer, addressing the daily life, challenges, and responsibilities of a space colonist on Mars. The response was more relevant to the question and provided a better understanding of what life on Mars would be like.", "score": [7.0, 9.0]}
+{"review_id": "VTULyx64zjt8TRH5MQtDCa", "question_id": 30, "answer1_id": "GZtEdKLxmUUbenH4XRyLkE", "answer2_id": "Pxj7ZVsvQ9HMQnRVMh7os4", "reviewer_id": 1, "metadata": {}, "text": "4 9\nAssistant 1's response was not helpful or relevant to the question, as it did not provide a description of a character in a post-apocalyptic world, their survival, or the allies they encounter. Instead, it provided a list of ways to describe allies, which was not the focus of the question. Assistant 2, on the other hand, provided a detailed and engaging narrative of a character named John in a post-apocalyptic world, describing how he survives and the allies he encounters. The response was relevant, accurate, and detailed, making it a much better answer to the question.", "score": [4.0, 9.0]}
+{"review_id": "Loq2ZWdNqMdukRmTRQ8Yiz", "question_id": 31, "answer1_id": "kba2Xpb75rUfnskZSzrBas", "answer2_id": "NCUhSMqzESRJJVDKHQ7XPB", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's response was somewhat helpful but lacked specific indicators to determine if a restaurant is popular among locals or tourists. It also didn't explain why this information might be useful. Assistant 2, on the other hand, provided a detailed list of key indicators to look for and explained the reasons why knowing whether a restaurant is popular among locals or tourists can be useful. Assistant 2's response was more comprehensive and informative, making it the better answer.", "score": [7.0, 9.0]}
+{"review_id": "MVQrcQEqHCBEhPFCnaheJY", "question_id": 32, "answer1_id": "RCaptsMBYXseVJgjJyyvUh", "answer2_id": "mEvCEzVGnENJAMzs2ioTmp", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 listed six subtle clues, while Assistant 2 provided a more detailed list of ten clues, including nonverbal cues and specific behaviors. Assistant 2's answer was more comprehensive and provided a broader range of clues, which is why it received a higher score. Additionally, Assistant 2 also mentioned that not everyone exhibiting these behaviors is necessarily pretending to understand, which adds nuance to the answer.", "score": [8.0, 9.0]}
+{"review_id": "hguZXn9PotcHzgvL857Kqj", "question_id": 33, "answer1_id": "2CeidEr7Cjj6YgcuaiPgnN", "answer2_id": "CoFnjLgW5Eens9JhnpiJHZ", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a relevant answer, but it was not as detailed and comprehensive as Assistant 2's response. Assistant 2 listed several reasons why someone might choose to use a paper map or ask for directions, covering various aspects such as reliability, battery life, availability, perspective, personal preference, and privacy. This answer was more helpful and informative, providing a better understanding of the topic.", "score": [8.0, 10.0]}
+{"review_id": "9NqZfDaGVJEZeSG6h7jKZu", "question_id": 34, "answer1_id": "kpStLLNWNkGHyRFdKsmDds", "answer2_id": "aQ9mM2wfmHSge2KvuQ8hmx", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief response that touched on some relevant points, such as observing the person's actions and tone of voice. However, the answer lacked detail and specific examples. Assistant 2, on the other hand, provided a more comprehensive and detailed response, listing several ways to determine if a person is genuinely interested in a conversation or simply being polite. Assistant 2's answer included nonverbal cues, responses, participation, tone of voice, and even a suggestion to ask the person directly. This made Assistant 2's response more helpful, relevant, and accurate, earning it a higher score.", "score": [7.0, 9.0]}
+{"review_id": "eNM9ThzuQmywVXqJZVXNwa", "question_id": 35, "answer1_id": "PFtMQp4X4MrMYkEa8c8CKG", "answer2_id": "eM5S86H3bXTGLQcVW59XsD", "reviewer_id": 1, "metadata": {}, "text": "8 9.5\nBoth assistants provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on supporting the local community, flexibility, and eco-friendliness. Assistant 2 provided a more detailed response, listing several reasons such as supporting the local community, unique products, personalized service, sustainability, customer loyalty, and preservation of heritage. Assistant 2's answer is more comprehensive and covers a wider range of reasons, which is why it receives a higher score. However, both assistants did a good job in addressing the question.", "score": [8.0, 9.5]}
+{"review_id": "9PKdYUYnzy7xjZRRtps3xY", "question_id": 36, "answer1_id": "4JwjhR5QVsdYgQupSZrfUp", "answer2_id": "MpBrYa9J2zQy9NGi2dvKp8", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth Assistant 1 and Assistant 2 provided relevant and helpful information on assessing the credibility of a source. Assistant 1 focused on the author's credentials and the source's sources, which are important factors to consider. However, Assistant 2 provided a more comprehensive and detailed list of tips, including checking the date, evaluating the language, assessing bias, checking the URL, and looking for peer review. This additional information makes Assistant 2's response more valuable and complete, earning it a higher score.", "score": [8.0, 10.0]}
+{"review_id": "HhfQhnsVa9KinuHJ5N98BY", "question_id": 37, "answer1_id": "ednPMy4dvW9CgKg2PJsBqW", "answer2_id": "i8QZzVJo2WkTLc7WMv4bNm", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and general answer, mentioning that some people find being scared exciting and exhilarating, while others find it unpleasant and frightening. However, the response lacks depth and detail. Assistant 2, on the other hand, provided a more comprehensive and detailed answer, discussing the adrenaline rush, coping mechanisms, fight or flight response, stress hormones, phobias, and the possible factors that contribute to individual differences in enjoying scary experiences. This makes Assistant 2's response more helpful, relevant, and accurate.", "score": [7.0, 9.0]}
+{"review_id": "JZBJBYRY99YvfLuxhkWG5u", "question_id": 38, "answer1_id": "ehPApSqCniyGN7hd332ToW", "answer2_id": "HP2CYZ3HJWMcGp6QF9qbq6", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a relevant and accurate response, but it was quite brief and only offered a single example. Assistant 2, on the other hand, provided a more detailed and comprehensive answer, covering various aspects of social behavior that can provide clues about cultural norms and expectations. The response from Assistant 2 was more helpful and informative, which is why it receives a higher score.", "score": [8.0, 10.0]}
+{"review_id": "mBhaJNyDcANdYdFSg9caQY", "question_id": 39, "answer1_id": "bLvdcqSwgSCrUPZ8eQLLqb", "answer2_id": "XrMYXEWHff6jXjjJWquXfW", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a relevant response, but it was less detailed and did not address the moral obligation aspect of the question as well as Assistant 2. Assistant 2 provided a more comprehensive answer, discussing the moral obligation aspect, the benefits of space exploration, and the importance of solving Earth's problems. Additionally, Assistant 2 emphasized the possibility of pursuing both goals simultaneously and the need for balancing priorities and resources, making their response more helpful and informative.", "score": [7.0, 9.0]}
+{"review_id": "fuJtKhmCWzqD422rrFdgMF", "question_id": 40, "answer1_id": "8tx5GEHjSRKZRaZwHvLoM8", "answer2_id": "eeS6qNLeX6AScyGrS5pwrc", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 outlined five points to consider when discussing the balance between job creation and technological progress, which provided a good overview of the topic. However, Assistant 2 went a step further by discussing the importance of job creation, technological progress, and the need for balance, as well as providing examples of policies and training programs that can help achieve this balance. Assistant 2's response was more detailed and provided a clearer understanding of the issue, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "jMJYVvx78YUjKxTbRhnJtx", "question_id": 41, "answer1_id": "J6TzFQAyiHs6Arijqwxtrm", "answer2_id": "an76qPuSJ4TQDWvNXdcJ2w", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided a detailed calculation based on the average life expectancy in the United States and the range of blinks per day. However, the answer is not accurate, as the number of blinks per day is overestimated (20,000 to 30,000). Assistant 2 provided a more accurate range of blinks per minute (12 to 20) and explained that the exact number of blinks per lifetime is difficult to determine due to various factors. Assistant 2 also provided information on the purpose of blinking and factors that influence blinking frequency. While neither assistant provided a precise answer, Assistant 2's response was more accurate and informative, which is why it received a higher score.", "score": [7.0, 8.0]}
+{"review_id": "kCpThdnYdNMbQEvdfsfhG3", "question_id": 42, "answer1_id": "f7xAtd3wwc4vP7WSnHffAN", "answer2_id": "DYnmt5xTTTkLuyyBxhmDsq", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1 provided a range of atoms in a grain of salt but did not explain the reasoning behind the numbers. Assistant 2, on the other hand, provided a step-by-step explanation of how to calculate the number of atoms in a grain of salt, making it easier for the reader to understand the reasoning. Assistant 2's response was more helpful, relevant, accurate, and detailed, which is why it received a higher score.", "score": [6.0, 8.0]}
+{"review_id": "gebkjENqfmiUcGGhwNuxaW", "question_id": 43, "answer1_id": "eRMyp4AbfgBqtbVZzHupFN", "answer2_id": "MbcVvnW3Eh9wZi34SzYawn", "reviewer_id": 1, "metadata": {}, "text": "7 6\nAssistant 1 provided a more concise answer with a specific number of lightning strikes per day (50 million) and cited the National Weather Service as a source. However, the answer could have been more detailed in explaining the reasoning behind the number. Assistant 2, on the other hand, provided a more detailed explanation of the factors that influence lightning activity and the difficulty in estimating the exact number of strikes per day. However, Assistant 2 failed to provide a specific number, making the answer less precise. Both assistants could have improved their answers by combining the specific number from Assistant 1 with the detailed explanation from Assistant 2.", "score": [7.0, 6.0]}
+{"review_id": "EfxBruDvj9PUHWSnsb7PJk", "question_id": 44, "answer1_id": "kjKb6YG86FfAh22qCePV4V", "answer2_id": "4oSKRo3DtcNtE8gRbrSJPg", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1's response is incorrect in its calculations and does not provide a step-by-step explanation as requested. The weight of 12L of helium is not 2 pounds, and the combined weight of the balloons is not 80,000 pounds. Assistant 2's response is more detailed, provides a step-by-step explanation, and gives a rough estimate of the number of balloons needed to lift a house. Although the estimate is not precise, it does a better job of explaining the reasoning behind the calculations and acknowledges the limitations of the estimate.", "score": [6.0, 8.0]}
+{"review_id": "DWnFAPxa5b2E8o6TcfM2AK", "question_id": 45, "answer1_id": "ULtkGu6m6tB4tawFStVdJM", "answer2_id": "bhh9SFsYgquUPeC2qBKMJw", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided an answer with some statistics from 2016, but the explanation was not very clear and lacked a step-by-step reasoning. Assistant 2, on the other hand, provided a more detailed and step-by-step explanation, considering various factors that could affect the number of text messages sent per minute. Although Assistant 2's knowledge cutoff date was in September 2019, the answer was more comprehensive and well-structured, making it easier to understand the reasoning behind the estimate.", "score": [7.0, 9.0]}
+{"review_id": "bRsHoRhPZqR96BpVv9omMz", "question_id": 46, "answer1_id": "C6hkCnJZmfCGvnr5an2jFh", "answer2_id": "oKMFEsBBp4SEJiVPRQZN2N", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1's response was confusing and contained inaccuracies, such as the assumption that the total number of words spoken daily on Earth is based solely on the English language. The explanation was also repetitive and unclear. Therefore, I give Assistant 1 a score of 6.\n\nAssistant 2 provided a more relevant and accurate response, considering factors such as the number of languages spoken, speech per person, and world population. Although the answer still did not provide an exact number, it acknowledged the difficulty in estimating the number and provided a reasonable range. Assistant 2 also mentioned written communication, which adds context to the question. Therefore, I give Assistant 2 a score of 8.", "score": [6.0, 8.0]}
+{"review_id": "QAM3srVfqivzf22nEMhhEw", "question_id": 47, "answer1_id": "DApBTLuiJwSsfQHTioH9PY", "answer2_id": "XXXVQo5w5XCFiEuK6n3hUV", "reviewer_id": 1, "metadata": {}, "text": "4 8\nAssistant 1's response is incorrect and misleading, as it provides a specific number of snowflakes (200) without any context or explanation, and the information about snowflake size is irrelevant to the question. Assistant 2's response, on the other hand, is more helpful and relevant, as it explains that it is difficult to give a specific number of snowflakes and provides an estimate based on averages and assumptions. The response also discusses factors that can affect the number of snowflakes and acknowledges the limitations of the estimate. While Assistant 2's response could be more precise, it is still significantly better than Assistant 1's response.", "score": [4.0, 8.0]}
+{"review_id": "dtHVcbNFawwoEr7ychjnb5", "question_id": 48, "answer1_id": "gmy58biNVuneSpFZTcpXtT", "answer2_id": "Ysbx8mFxYva6TbHQGKL8K7", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1 provided a rough estimate of the number of published books but did not attempt to estimate the number of pages in all the books ever written. The response also did not provide a step-by-step explanation as requested. Assistant 2, on the other hand, provided a more detailed response, attempting to estimate the number of pages in all the books ever written based on the average page count and the number of years since the invention of writing. Although the estimate provided by Assistant 2 is still rough and has some limitations, it is more in line with the user's request and provides a clearer explanation of the reasoning behind the estimate.", "score": [6.0, 8.0]}
+{"review_id": "TbiniLL2v5LJvyGpcp8mii", "question_id": 49, "answer1_id": "Cpi6LBB86W73APEA8naNuj", "answer2_id": "KenbYuKVVUVXe2stVtxkeF", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1's response contains some unnecessary calculations and incorrect information, such as the Earth's speed around the Sun, which is not relevant to the question. The final answer of 4.7 billion orbits is also incorrect. On the other hand, Assistant 2 provides a more relevant and accurate response, estimating the number of Earth-Sun orbits since the beginning of life to be around 10 billion. Assistant 2's response is more detailed, taking into account the age of life on Earth, astronomical events, and geological markers, and provides a clearer step-by-step explanation.", "score": [6.0, 8.0]}
+{"review_id": "hEkegECphnPTzLUhAkKmSq", "question_id": 50, "answer1_id": "3g83CoAFWrDD6V6LyEpoMV", "answer2_id": "ceWTK9jYWZq2Dd2H7KGkWY", "reviewer_id": 1, "metadata": {}, "text": "3 8\nAssistant 1's response is not helpful, relevant, or accurate. The numbers provided seem arbitrary and do not have any basis in fact or research. The response does not take into account the vast history of music, different cultures, or the development of recording technology. Assistant 2's response, on the other hand, is much more helpful and relevant. It acknowledges the difficulty in estimating the exact number of recorded songs and provides a more reasonable range of trillions of songs created and potentially billions recorded. Assistant 2 also discusses the impact of music on human culture and society, which adds depth to the answer.", "score": [3.0, 8.0]}
+{"review_id": "5pVFMBrgvuswtU33tKLkz4", "question_id": 51, "answer1_id": "8p5A4gouBgCmbQj5gMZapU", "answer2_id": "6Bbzs6YWyzPj52rZAfRPTt", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief response that touched on the potential spread of the Internet during the Renaissance, but it lacked detail and depth. Assistant 2, on the other hand, provided a more comprehensive and speculative analysis of how the Internet might have been invented and its potential impact on communication, information dissemination, and the cultural currents of the time. The response from Assistant 2 was more helpful, relevant, and detailed, which is why it received a higher score.", "score": [7.0, 9.0]}
+{"review_id": "ju6B86DFUhv6ew8BeyuhPV", "question_id": 52, "answer1_id": "LVWqSxH3Uq6675w7LXH6zP", "answer2_id": "EG6h5jgpQ8wesKAX43nt9X", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and speculative answer about the possible expansion of the Aztec empire if they had repelled the Spanish conquistadors. However, the answer lacked depth and detail. Assistant 2, on the other hand, provided a more comprehensive response, including historical context and a list of possible scenarios that could have occurred if the Aztecs had successfully repelled the Spanish. The answer was more detailed and informative, making it more helpful and relevant to the user's question.", "score": [7.0, 9.0]}
+{"review_id": "htsEkaBywhd3j2ZL5tw6Jh", "question_id": 53, "answer1_id": "ffXksqRU3k8CNHqYfshYhj", "answer2_id": "J76ZgE27N2Pe77NcEBrWUV", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's response provided some information about the Black Death and its impact on the modern world, but it did not directly address the question about what would have happened if the Black Death had not occurred in the 14th century. Assistant 2, on the other hand, provided a more relevant and detailed response, discussing the potential consequences in various aspects of society, such as demographic, economic, political, social, and medical. Assistant 2's response was more helpful and accurate in answering the user's question, which is why it received a higher score.", "score": [7.0, 9.0]}
+{"review_id": "gXdmfgK8QixFdSPAEM7pvs", "question_id": 54, "answer1_id": "4L3dWuqvWwg5kELt7p8SVZ", "answer2_id": "hoFFABFFF7J8juv3AnoVWZ", "reviewer_id": 1, "metadata": {}, "text": "6 9\nAssistant 1 provided a brief and somewhat relevant response, but it lacked depth and detail. The statement about not discovering the law of gravity is true, but it doesn't explore the potential contributions Newton could have made in biology. On the other hand, Assistant 2 provided a much more detailed and relevant response, discussing various potential contributions Newton could have made in the field of biology, such as plant biology, animal behavior, evolution, and medicine. This response was more helpful and informative, which is why Assistant 2 receives a higher score.", "score": [6.0, 9.0]}
+{"review_id": "KbPWUTooBZLseSf6EfYcEt", "question_id": 55, "answer1_id": "XfpqViGW5dvauNZPDSF8yv", "answer2_id": "8R5n2uZ4oq52QkWQsSS6No", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and accurate response, but it lacked detail and depth. Assistant 2, on the other hand, offered a more comprehensive answer, exploring various possible scenarios and the potential impact of the Beatles not forming as a band. The response was relevant, detailed, and well-organized, making it more helpful and informative for the user.", "score": [7.0, 9.0]}
+{"review_id": "EaweLnd3f8r552uefD8LZo", "question_id": 56, "answer1_id": "WsuMf9XBGs3dFjfSGKjEzg", "answer2_id": "ECtu3QVXVrNhssSyktKzkq", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and relevant answer, but it lacked details and depth. Assistant 2, on the other hand, gave a more comprehensive response, discussing the potential impact on the outcome of the war, the importance of codebreaking, and other factors that contributed to the Allies' efforts during World War II. This made Assistant 2's response more helpful and informative, earning it a higher score.", "score": [7.0, 9.0]}
+{"review_id": "XVxqBxvin5rJyPFgqKmPmz", "question_id": 57, "answer1_id": "5qo3HudLrwrzEV2Px7gYRf", "answer2_id": "jb8ot3ucdF3RvzApEYKft6", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and general answer, mentioning the longer travel time and more dangerous waters if the Suez Canal had not been constructed. However, Assistant 2 provided a more detailed and comprehensive response, discussing the impact on international trade, global economy, and the political landscape of the Middle East. Assistant 2 also mentioned the role of the Suez Canal in military operations, which adds to the quality of the answer. Therefore, Assistant 2 receives a higher score for providing a more informative and well-rounded response.", "score": [7.0, 9.0]}
+{"review_id": "Vv2bcN2z4wZZKni6qXZLut", "question_id": 58, "answer1_id": "SRxwJkNCfUaVe4Gb8LPvSK", "answer2_id": "cAjR8aDWRKVKbxKeqRRgvW", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and general response, stating that the Maya civilization would have continued to grow, thrive, and develop. While this answer is relevant and accurate, it lacks detail and depth. Assistant 2, on the other hand, provided a more comprehensive response, outlining five possible scenarios that could have occurred if the Maya civilization had not collapsed. This answer was more helpful, relevant, and detailed, offering a variety of potential outcomes and considering various aspects of the civilization's impact on the world. Therefore, Assistant 2 receives a higher score.", "score": [7.0, 9.0]}
+{"review_id": "npkk5WCEayEA9d6FMYSSJR", "question_id": 59, "answer1_id": "S54Mmyim3QT5owdhwnNYmB", "answer2_id": "7hKzFRQWWPVbr8H4wcbGdA", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief background on Christopher Columbus and his discovery of the Americas but did not fully address the question about what would have happened if he had not discovered the Americas. Assistant 2, on the other hand, provided a more detailed and relevant response, discussing the potential consequences of Columbus not discovering the Americas, such as the continued flourishing of indigenous cultures and the avoidance of negative impacts like disease and colonization. Assistant 2's response was more helpful, relevant, and accurate in addressing the user's question.", "score": [7.0, 9.0]}
+{"review_id": "Vzks2fFeAfy3PudQbmckQ3", "question_id": 60, "answer1_id": "8DJpwEFb5jqt3vMj5oNqVX", "answer2_id": "3vMtYzSrV5ARpYA9fGUe4H", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided some background information on why Van Gogh was not successful during his lifetime but did not directly address the question about what would have happened if he had been successful. Assistant 2, on the other hand, provided a more detailed and relevant response by outlining several possible outcomes if Van Gogh had been successful during his lifetime. Assistant 2's answer was more helpful, accurate, and relevant to the question, which is why it received a higher score.", "score": [7.0, 9.0]}
+{"review_id": "48gzFcuWEmNtSdyAyL35dk", "question_id": 61, "answer1_id": "KMtqqYkDmyyThkvxXJ4kJd", "answer2_id": "nvbTnFLEyPSSgPdf2n6Mqp", "reviewer_id": 2, "metadata": {}, "text": "4 6\n\nAssistant 1:\nStrengths:\n- The steps provided are clear and easy to understand.\n- The general structure of the solution is correct.\n\nWeaknesses:\n- The code is not provided, only the steps.\n- The strstr function is mentioned, but it is not used in the code.\n- The solution does not count the occurrences of the specific word.\n\nSuggestions for improvement:\n- Provide the actual code implementation.\n- Use the strstr function or another method to count the occurrences of the specific word in the line.\n- Print the number of occurrences of the word at the end of the program.\n\nAssistant 2:\nStrengths:\n- The code is provided and well-structured.\n- The program reads the file line by line.\n\nWeaknesses:\n- The code does not count the occurrences of the specific word.\n- The fgets function is used instead of getline, which is less convenient for C++.\n- The code uses C-style file handling (fopen, fclose) instead of C++ style (ifstream).\n\nSuggestions for improvement:\n- Implement a function to count the occurrences of the specific word in the line.\n- Use getline and ifstream for file handling in C++.\n- Add comments to explain the code's logic and functionality.\n- Print the number of occurrences of the word at the end of the program.", "score": [4.0, 6.0]}
+{"review_id": "bqHCdm6AvV5ZVwTarjNUyE", "question_id": 62, "answer1_id": "MkbgwTpP5b79NWdPEnvHyq", "answer2_id": "RS2roGuiWf5jZfncgfHPeG", "reviewer_id": 2, "metadata": {}, "text": "4 6\n\nAssistant 1 provided a brief explanation of the longest common subsequence problem but did not provide any code implementation. The explanation was clear, but it did not fulfill the requirement of implementing a Python function.\n\nAssistant 2 provided a Python implementation of the dynamic programming approach to find the longest common subsequence. However, the code has some issues:\n\n1. The initialization of the `dp` matrix is incorrect. The first row and column should be initialized to `0`, not `1`.\n2. The condition inside the nested loop is incorrect. It should check for equality between `str1[i-1]` and `str2[j-1]`, not inequality.\n3. The function returns the length of the longest common subsequence, but the problem statement asks for the subsequence itself.\n\nHere's a corrected version of the code:\n\n```python\ndef lcs(str1, str2):\n m, n = len(str1), len(str2)\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if str1[i - 1] == str2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n\n lcs_str = \"\"\n i, j = m, n\n while i > 0 and j > 0:\n if str1[i - 1] == str2[j - 1]:\n lcs_str = str1[i - 1] + lcs_str\n i -= 1\n j -= 1\n elif dp[i - 1][j] > dp[i][j - 1]:\n i -= 1\n else:\n j -= 1\n\n return lcs_str\n\n# Example usage\nstr1 = \"banana\"\nstr2 = \"apple\"\nprint(lcs(str1, str2)) # \"ana\"\n```\n\nThis version initializes the `dp` matrix correctly, fixes the condition inside the nested loop, and adds a new loop to reconstruct the longest common subsequence from the `dp` matrix.", "score": [4.0, 6.0]}
+{"review_id": "gqzXFTcFCDVPLKW44229mG", "question_id": 63, "answer1_id": "PSb6r567shkRasPkJagvWi", "answer2_id": "Rz8sSwJzffBjUhjEYAyEWS", "reviewer_id": 2, "metadata": {}, "text": "6 8\n\nAssistant 1:\nStrengths:\n- The provided regular expression is mostly accurate and efficient.\n- The code is simple and easy to understand.\n\nWeaknesses:\n- The code is incomplete and contains syntax errors.\n- There are no comments explaining the code's logic and functionality.\n- The regular expression does not account for uppercase letters in the email address.\n\nSuggestions for improvement:\n- Complete the code and fix syntax errors.\n- Add comments to explain the code's logic and functionality.\n- Modify the regular expression to include uppercase letters in the email address.\n\nAssistant 2:\nStrengths:\n- The provided regular expression is accurate and efficient.\n- The code is complete and functional.\n- The code is well-structured and easy to understand.\n- There are comments explaining the code's logic and functionality.\n\nWeaknesses:\n- The variable name \"Email Addition\" contains a space and is not a valid Python variable name. It should be changed to \"email_pattern\" or a similar valid name.\n- The variable name \"emaill\" is a typo and should be changed to \"email\".\n\nSuggestions for improvement:\n- Correct the variable names to follow proper Python naming conventions.\n- Add more comments to explain the regular expression in detail.", "score": [6.0, 8.0]}
+{"review_id": "drsYxTMZB7FhvbTB9ifJNV", "question_id": 64, "answer1_id": "QWpBq9gM4iYmsTLQ3NNnBe", "answer2_id": "SQCnPwtYgHH57ikbXgUpQq", "reviewer_id": 2, "metadata": {}, "text": "4 7\n\nAssistant 1:\nStrengths:\n- The problem statement is well-explained.\n\nWeaknesses:\n- There is no code provided to solve the problem.\n- The input format and test cases are not addressed.\n\nSuggestions for improvement:\n- Provide a working code solution that addresses the problem statement.\n- Include the input format and test cases in the code.\n\nAssistant 2:\nStrengths:\n- The code is provided in Python.\n- The code is readable and well-structured.\n- The memoization technique is used for dynamic programming.\n\nWeaknesses:\n- The code does not address the input format and test cases.\n- The code has unnecessary nested functions with the same name, which can be confusing.\n\nSuggestions for improvement:\n- Modify the code to handle the input format and test cases as described in the problem statement.\n- Remove the unnecessary nested functions and simplify the code.", "score": [4.0, 7.0]}
+{"review_id": "jq8GanKHiUTZ55g238MkAS", "question_id": 65, "answer1_id": "U3QxoC4DxXoggnLRbgJh8b", "answer2_id": "mfGSLxAAjjBmUFtCx3STEN", "reviewer_id": 2, "metadata": {}, "text": "4 9\n\nAssistant 1 provided no code implementation and only acknowledged the request. They did not provide any useful information or solution to the problem.\n\nAssistant 2, on the other hand, provided a clear and concise explanation of the binary search algorithm, followed by a well-structured and efficient Python implementation. The code is readable and includes helpful comments. The example usage demonstrates the correct functionality of the implemented binary search algorithm. The only minor improvement that could be made is to add a comment at the beginning of the function to briefly describe its purpose and input parameters.", "score": [4.0, 9.0]}
+{"review_id": "CRsAvd7rF5BFhTLj9ojd7U", "question_id": 66, "answer1_id": "f3TXCreQPJ5Erd6BGt4SFE", "answer2_id": "CcUgvJProVma9sNKW2nKHc", "reviewer_id": 2, "metadata": {}, "text": "6 8\n\nAssistant 1:\nStrengths:\n- The explanation of the concept of implementing a queue using two stacks is clear.\n\nWeaknesses:\n- The provided solution is not in the form of code.\n- The explanation is not precise enough to be directly translated into code.\n\nSuggestions for improvement:\n- Provide a complete Python code implementation of the solution.\n- Include comments in the code to explain the logic and functionality.\n\nAssistant 2:\nStrengths:\n- The provided solution is in the form of Python code.\n- The code is structured and readable.\n\nWeaknesses:\n- The code contains unnecessary variables (in_queue and out_queue) that are not used in the implementation.\n- The dequeue method is incorrect and does not transfer elements between stacks as required.\n- The peek method returns the wrong element.\n\nSuggestions for improvement:\n- Remove the unnecessary variables (in_queue and out_queue).\n- Correct the dequeue method to transfer elements between stacks when the out_stack is empty.\n- Update the peek method to return the correct element from the out_stack.\n- Include comments in the code to explain the logic and functionality.", "score": [6.0, 8.0]}
+{"review_id": "gtFAJB3DQTF6eVDc2tjzXw", "question_id": 67, "answer1_id": "SG7YcT8s4sJNBRqidKk7Z7", "answer2_id": "a4NeYaavwpgqC46erF3VFP", "reviewer_id": 2, "metadata": {}, "text": "6 7\n\nAssistant 1:\nStrengths:\n- The approach is explained step by step, which makes it easy to understand the logic.\n\nWeaknesses:\n- The code is not provided, only the approach.\n- The approach suggests creating an extra array to store the common elements, which contradicts the problem statement.\n\nSuggestions for improvement:\n- Provide actual code implementation.\n- Modify the approach to avoid using extra data structures.\n\nAssistant 2:\nStrengths:\n- The code is provided, which makes it easier to evaluate the solution.\n- The code is structured and includes comments to explain the logic.\n\nWeaknesses:\n- The code uses extra data structures (common and second arrays), which contradicts the problem statement.\n- The code is not efficient, as it uses nested loops, resulting in a higher time complexity.\n\nSuggestions for improvement:\n- Modify the code to avoid using extra data structures.\n- Optimize the code to reduce the time complexity, e.g., by sorting the arrays and using a two-pointer approach.", "score": [6.0, 7.0]}
+{"review_id": "54qtyYvQv4FYqaLamqJsnR", "question_id": 68, "answer1_id": "ENsG3zoeUY7ArFo2kNaQbz", "answer2_id": "bpcHYkjjXamp998ARdtfWK", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\nf(x) = 5x^3 - 2x + 3\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 4 + 3\nf(2) = 39\n\nNow, I will evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nf(2) = 26\nThis answer is incorrect.\n\nAssistant 2:\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(4) - 2 + 3\nf(2) = 20 - 2 + 3\nf(2) = 19\nThis answer is also incorrect, and the mistake is in the calculation of (2)^3, which should be 8, not 4.\n\nScores:\nAssistant 1: 3 (The answer is incorrect, and there is no explanation of the problem-solving process)\nAssistant 2: 6 (The answer is incorrect, but the problem-solving process is mostly correct, with only one calculation mistake)\n\nIn conclusion, Assistant 2 performed better, but both assistants provided incorrect answers. The correct answer is f(2) = 39.", "score": [3, 6]}
+{"review_id": "gTq3TB2nWQHFaJ3QNPQGyD", "question_id": 69, "answer1_id": "C7CN5AYJhGC7iDgS7rciDQ", "answer2_id": "SququGaG2Y8euqGVwV2Ueb", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\n3x + 10 = 5(x - 2)\n3x + 10 = 5x - 10\n-2x = -20\nx = 10\n\nNow, I will evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\n3x + 10 = 5x - 10x + 20 = 20 + 10x = 30x = 10\n\nAssistant 1's answer is incorrect. The steps are not clear and the algebraic manipulations are wrong.\n\nAssistant 2:\n3x + 10 = 5(x - 2)\nSimplifying the left side:\n3x + 10 = 5x - 10\nSubtracting 5x from both sides:\n-2x = -10\nDividing both sides by 2:\nx = -5\n\nAssistant 2's answer is also incorrect. The steps are clear, but there is a mistake in the last step. The correct division should be -2x = -20, not -2x = -10. Therefore, x = 10, not x = -5.\n\nScores:\nAssistant 1: 1/10 (Incorrect steps and answer)\nAssistant 2: 6/10 (Clear steps, but incorrect final answer)\n\nAssistant 2 performed better than Assistant 1, but both assistants provided incorrect answers.", "score": [1, 6]}
+{"review_id": "dyrkYetiqkC2MprrcNuzak", "question_id": 70, "answer1_id": "QP9CmZKwhu2BrUmzhy8tH4", "answer2_id": "9Zb7Fenxop3RQFUrxWr5Hn", "reviewer_id": 3, "metadata": {}, "text": "First, let's solve the problem independently:\n\nThe endpoints of the line segment are (2, -2) and (10, 4). To find the length of the line segment, we can use the distance formula:\n\ndistance = \u221a((x2 - x1)^2 + (y2 - y1)^2)\n\nPlugging in the coordinates:\n\ndistance = \u221a((10 - 2)^2 + (4 - (-2))^2)\ndistance = \u221a(8^2 + 6^2)\ndistance = \u221a(64 + 36)\ndistance = \u221a100\ndistance = 10\n\nNow, let's evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe answer provided by Assistant 1 is correct, but there is no explanation or problem-solving process provided.\n\nAssistant 2:\nThe answer provided by Assistant 2 is incorrect. The problem-solving process is also incorrect, as the assistant incorrectly calculated the lengths of the legs of the right triangle and used the wrong formula for the Pythagorean theorem.\n\nScores:\nAssistant 1: 7 (Correct answer, but no explanation)\nAssistant 2: 3 (Incorrect answer and incorrect problem-solving process)\n\nIn this case, Assistant 1 performed better, as they provided the correct answer, but neither assistant provided a complete and correct problem-solving process.", "score": [7, 3]}
+{"review_id": "iTn4PEys4cqA4kr8fh8E4D", "question_id": 71, "answer1_id": "2KtpJeRqEfbPLwdUxAXWrj", "answer2_id": "HkYsYvcMNHvt5suAZJpFWw", "reviewer_id": 1, "metadata": {}, "text": "7 10\nAssistant 1 provided general tips for writing a formal email, which were helpful but did not directly address the user's request for help in writing the email. Assistant 2, on the other hand, provided a complete and well-structured example of a formal email proposing a joint venture, which directly addressed the user's request. Assistant 2's response was more relevant, accurate, and detailed, making it the better overall response.", "score": [7.0, 10.0]}
+{"review_id": "Z2mq6MyWALagF9eEwjPCQF", "question_id": 72, "answer1_id": "gsL3R2grTXUiHdeDSk8f9Q", "answer2_id": "F45reLGER4jrAjZZi5r58x", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a brief and general guidance on how to start a resignation letter, while Assistant 2 went above and beyond by providing a complete and detailed sample resignation letter that the user can use as a guide. Assistant 2's response was more helpful, relevant, and detailed, which is why it received a higher score. Assistant 1's response was still helpful and accurate, but it lacked the level of detail that Assistant 2 provided.", "score": [8.0, 10.0]}
+{"review_id": "XdMaCf8saQsZaXX5JuMuRU", "question_id": 73, "answer1_id": "HprPMoUk9JY5TtToRbuMwy", "answer2_id": "ZLx4JuCtwNcv55nPbyXQvB", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a brief and general outline of a letter of recommendation, which is helpful but lacks the specific details and structure needed for a formal letter. Assistant 2, on the other hand, provided a complete and well-structured example of a formal letter of recommendation, tailored specifically for a student applying to a prestigious graduate program in computer science. The response from Assistant 2 is more detailed, relevant, and accurate, making it the better response overall.", "score": [8.0, 10.0]}
+{"review_id": "Pg38g5yHr3iwcYFapMSZAY", "question_id": 74, "answer1_id": "YU7gDhmo4LDVMTEZZMRdBC", "answer2_id": "XPgHiaRwa5ykfKMZG7GQ6b", "reviewer_id": 1, "metadata": {}, "text": "0 9\n\nAssistant 1 provided no response at all, so it receives a score of 0. Assistant 2, on the other hand, provided a well-written and compelling product launch announcement email that addressed the user's request. The response was relevant, detailed, and included key features and benefits of the new software solution. It also mentioned a special introductory pricing and provided contact information for further inquiries. Therefore, Assistant 2 receives a score of 9.", "score": [0.0, 9.0]}
+{"review_id": "nZErZWtzQVNoGFfuiy7naD", "question_id": 75, "answer1_id": "2gNqU2XpsL6yqFn6DMRgHq", "answer2_id": "MEWrWkY8Y5CSBTCNkfo2nx", "reviewer_id": 1, "metadata": {}, "text": "4 10\nAssistant 1 provided a response that was not helpful, as it only asked for the customer's name without providing the requested draft of the apology email. Assistant 2, on the other hand, provided a well-written and relevant apology email that addressed the customer's issue, offered reassurance, and expressed gratitude for their patience and understanding. This response was accurate, detailed, and met the user's request, earning Assistant 2 a perfect score.", "score": [4.0, 10.0]}
+{"review_id": "NXqa5sSr5YzQWGZSfVkhBj", "question_id": 76, "answer1_id": "a4L6JNPuomkPLdWxJRQpod", "answer2_id": "f8Jmezvy4SwqgSVeWQqTzk", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a brief and accurate overview of the history and cultural significance of jazz, but it lacked the depth and detail that would be expected in a script for a YouTube video. Assistant 2, on the other hand, delivered a well-structured and engaging script that not only covered the history and cultural significance of jazz but also provided visual cues and a narrative that would be suitable for a YouTube video. Assistant 2's response was more detailed, engaging, and appropriate for the requested format, which is why it receives a higher score.", "score": [8.0, 10.0]}
+{"review_id": "GQxsgn7Ng8un48t9TkCS9r", "question_id": 77, "answer1_id": "eN9V2Wf2BNzWoJosqdFw6K", "answer2_id": "bZVCu7vDL4GGm5P8zN3xou", "reviewer_id": 1, "metadata": {}, "text": "3 9\nAssistant 1 failed to provide any content in response to the user's question, resulting in a low score of 3. Assistant 2, on the other hand, composed an engaging travel blog post about a recent trip to Hawaii, highlighting cultural experiences and must-see attractions as requested. The response was detailed, relevant, and accurate, earning a score of 9.", "score": [3.0, 9.0]}
+{"review_id": "7HQ8Jnm6CQqSxjmooSUGMs", "question_id": 78, "answer1_id": "iCzkbWTatKfuC6QVv59XgA", "answer2_id": "gYkH3Pit5J9gnJyXJtoHUG", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a detailed response, but the plot summary seemed to be a mix of \"Ready Player One\" and an original story, which led to confusion and inaccuracies. The review also lacked a captivating tone. Assistant 2, on the other hand, delivered a captivating and engaging review with a clear plot summary, character descriptions, and praise for the special effects. The tone and structure of Assistant 2's response were more in line with a movie review, making it the better of the two.", "score": [7.0, 9.0]}
+{"review_id": "CpRrEdVhRkxYU8jSTR5Rxb", "question_id": 79, "answer1_id": "GQcFyY8ayFT48DAP5NKNZf", "answer2_id": "dX4Pt6xQpstrrnZgRLSVnN", "reviewer_id": 1, "metadata": {}, "text": "4 9\nAssistant 1 provided a long list of points related to the topic but failed to structure them into a podcast script format, making it difficult to follow and not directly addressing the user's request. Assistant 2, on the other hand, provided a well-structured podcast script with clear segments and relevant discussion points, making it much more helpful and relevant to the user's request.", "score": [4.0, 9.0]}
+{"review_id": "Xi8itZwqioN5N6waeeXs3m", "question_id": 80, "answer1_id": "A6pauLMckn34otnfpeQRFi", "answer2_id": "jseJYZmd8fBnzbx2EN9oq9", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a general overview of the concert experience, mentioning the composers and the audience's reaction. However, the response lacked specific details about the pieces performed and the orchestra's interpretation. Assistant 2, on the other hand, offered a more detailed and engaging review, discussing specific pieces, the orchestra's performance, and the conductor's insights. This response created a more vivid picture of the concert experience, making it more helpful and informative for the reader.", "score": [8.0, 10.0]}
diff --git a/model/fastchat/eval/table/review/vicuna-7b_20230322-fp16/review_alpaca-13b_vicuna-7b.jsonl b/model/fastchat/eval/table/review/vicuna-7b_20230322-fp16/review_alpaca-13b_vicuna-7b.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..4caeec27fa63ae72c87f83c5ecefbb69d5f0a566
--- /dev/null
+++ b/model/fastchat/eval/table/review/vicuna-7b_20230322-fp16/review_alpaca-13b_vicuna-7b.jsonl
@@ -0,0 +1,80 @@
+{"review_id": "UsK7iwiG86NsqcUM69siFA", "question_id": 1, "answer1_id": "kEL9ifUHDeYuAXzevje2se", "answer2_id": "8haRLA69xGcHc5woRHTj2f", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question about improving time management skills. Assistant 1's answer was concise and covered the main points, such as setting priorities, breaking tasks into smaller chunks, and avoiding multitasking. However, Assistant 2's answer was more detailed and organized, providing a step-by-step approach to improving time management skills. Assistant 2 also included additional tips, such as practicing mindfulness and seeking support, which were not mentioned by Assistant 1. Therefore, Assistant 2 receives a slightly higher score due to the greater level of detail and organization in the response.", "score": [8.0, 9.0]}
+{"review_id": "VCgLWM4mc5NHw6WgUrvSX6", "question_id": 2, "answer1_id": "VcF3NrWGXhhxLkDVurNrwq", "answer2_id": "j7DV4tZTXhmRFj9zL5TSwq", "reviewer_id": 1, "metadata": {}, "text": "9 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about effective ways to deal with stress. Assistant 1's answer was concise and covered the main points, such as identifying stress sources, developing coping mechanisms, and engaging in relaxation techniques. However, Assistant 2's answer provided a more detailed and structured list of specific strategies, such as exercise, time management, and social support, which made the answer more comprehensive and actionable. Therefore, Assistant 2 receives a slightly higher score.", "score": [9.0, 10.0]}
+{"review_id": "NeLiT8PtDKGvY2paP6uvCH", "question_id": 3, "answer1_id": "LpvtyQi9QdSgRrgGDxiGrT", "answer2_id": "fEq2fYvNcpaENuQFRMFdan", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. However, Assistant 2's answer was more detailed and organized, making it easier to understand the differences between Python and JavaScript. Assistant 1's answer contained a couple of inaccuracies, such as stating that Python is a compiled language (it is an interpreted language) and that JavaScript is not object-oriented (it is prototype-based, which is a form of object-oriented programming). Assistant 2's answer was more comprehensive and provided a clearer comparison between the two programming languages.", "score": [8.0, 9.0]}
+{"review_id": "REkuVdUakYuPqUBiqFWkkE", "question_id": 4, "answer1_id": "7zQm8cSTJhPtPdZdxbcfrX", "answer2_id": "VYKmdXnESAKV3FRutDUPWU", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about increasing productivity while working from home. They both included a list of tips, which were detailed and covered various aspects of working from home, such as setting a schedule, creating a designated workspace, taking breaks, and staying connected with colleagues. Both answers were well-organized and easy to understand. It is difficult to choose one over the other, as both responses were of high quality and provided valuable information. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "a9kGXwdV8zyCy9zAvT4Awa", "question_id": 5, "answer1_id": "UrLEH82RHwqqLt2LyvYSKj", "answer2_id": "maL9a3rivWyWZk3UgwQTVR", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about the basics of quantum computing. They both explained the concept of qubits, the difference between classical and quantum computing, and the potential applications of quantum computing. Assistant 1 mentioned the potential for revolutionizing the computing industry and artificial intelligence, while Assistant 2 discussed the principles of superposition and entanglement in more detail. Both answers were well-rounded and informative, so they both receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "huP3KBxsciqLwwH66dQPVS", "question_id": 6, "answer1_id": "fpRdMTdnfirosQixuf2Gez", "answer2_id": "aGRf8RjpUgneLvw4Uf93do", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was concise and touched on the main differences between plant-based and animal-based protein sources, including nutrient composition, resource usage, and environmental impact. However, Assistant 2's answer was more detailed and provided examples of specific plant-based and animal-based protein sources, as well as a more in-depth explanation of the differences in nutrient composition and sustainability. Therefore, Assistant 2 receives a slightly higher score due to the additional details and examples provided.", "score": [8.0, 9.0]}
+{"review_id": "6Y8JjRFdRvQPgQQaHRQduh", "question_id": 7, "answer1_id": "PvGmsCJSNFcvQKmPTnnd7s", "answer2_id": "oXtzronC4mdVKH9J59ofij", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question about developing critical thinking skills. Assistant 1 gave a brief overview of the process, mentioning questioning assumptions, taking multiple perspectives, analyzing information, and coming to logical conclusions. This answer is accurate and relevant but lacks detail.\n\nAssistant 2, on the other hand, provided a more detailed and comprehensive response, listing specific strategies for improving critical thinking skills, such as active listening, asking questions, analyzing assumptions, evaluating evidence, thinking critically, seeking diverse perspectives, reflecting on one's thinking, and practicing mindfulness. This answer not only covers the main aspects mentioned by Assistant 1 but also expands on them with practical steps, making it more helpful and informative for the user. Therefore, Assistant 2 receives a higher score of 10, while Assistant 1 receives an 8.", "score": [8.0, 10.0]}
+{"review_id": "K4rxGsDQJstTkon9MB6yFm", "question_id": 8, "answer1_id": "n4ANAbpR3gvLPP8poPfKZ6", "answer2_id": "dE5c99j9hW9qDvjjPxUPzc", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question, addressing the major challenges faced by the education sector today. Assistant 1's answer was concise and covered a range of challenges, including access to quality education, low educational outcomes, high costs, inadequate infrastructure, overcrowding, gender inequality, and outdated curriculum. Assistant 2's answer was more detailed and organized, presenting the challenges in a numbered list format, which made it easier to follow. Assistant 2 also touched on additional challenges such as teacher and student evaluation, and international comparisons and rankings. While both answers were accurate and informative, Assistant 2's response was slightly more comprehensive and well-structured, earning a higher score.", "score": [8.0, 9.0]}
+{"review_id": "BpKR3kT2vwrCWLggmr8nAB", "question_id": 9, "answer1_id": "STJ36GrgQMcaUi7zaoNPit", "answer2_id": "oLRzkYUv8ooSJJLqfPnrxd", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was concise and covered the main factors influencing consumer behavior, such as cultural, social, and personal factors, and briefly mentioned advertising, marketing, and price. However, Assistant 2's answer was more detailed and comprehensive, listing ten factors that influence consumer behavior and providing a brief explanation for each. This level of detail makes Assistant 2's answer more informative and useful for the user, resulting in a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "R47Vyov9qiqhnNC5VbBo7m", "question_id": 10, "answer1_id": "425SwYvqKPAXFGTYKXB7Cs", "answer2_id": "hi7Gu2XPwcThie58TvvkK8", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about effective strategies for conflict resolution in the workplace. Both answers included similar strategies, such as active listening, open communication, focusing on interests, and finding mutually beneficial solutions. Assistant 1's answer was more concise, while Assistant 2's answer provided a few additional strategies, such as addressing power imbalances and providing training. Both answers were well-structured and easy to understand, making it difficult to choose one over the other. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "o4jdDgAFMXSGbhhaB2Vwsx", "question_id": 11, "answer1_id": "VbNAuj6KAkMdLJQXMo22oK", "answer2_id": "Xx5PB6u9sBagzxtB2YUKq8", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed answers to the question. They both addressed the environmental and human health implications of using single-use plastic bottles versus reusable bottles. Assistant 1 mentioned the cost-effectiveness of reusable bottles, while Assistant 2 expanded on the economic and social impacts. Both answers complement each other and provide a comprehensive understanding of the topic. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "6JwS9DvXTRUXHWkKUrsdhU", "question_id": 12, "answer1_id": "CNGqAeu2QJbQ4QGzHJDPdq", "answer2_id": "FfaUTMS95MuGQQRDefvVzj", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer covered several important factors, such as frequency and reliability of service, accessible stops and vehicles, fare options, signage, seating, and integration with other services. However, Assistant 2's answer was more comprehensive and detailed, discussing physical accessibility, communication accessibility, route and schedule accessibility, service animals and assistive devices, dissemination of information, training and education, and continuous improvement. Assistant 2 also provided a more structured response, which made it easier to understand and follow. Therefore, Assistant 1 receives an 8, and Assistant 2 receives a 9.", "score": [8.0, 9.0]}
+{"review_id": "BFtRHU3kQfqCFtUQdMTSeN", "question_id": 13, "answer1_id": "E8w2qYqnm8iqCrSkUv62sz", "answer2_id": "WgCpMqMPUb9TU8jCuiExg3", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a brief and general overview of fiscal and monetary policies, while Assistant 2 provided a more detailed and comprehensive answer, covering specific strategies and examples of how governments can utilize these policies to combat economic recessions. Assistant 2's answer was more helpful and informative, which is why it received a higher score.", "score": [8.0, 10.0]}
+{"review_id": "kdbsHhZJuSftv5ErfKUStc", "question_id": 14, "answer1_id": "8o5yMymfzo6kzmp9GK5MWr", "answer2_id": "ATkPcXKbAki2VCoopjq6c3", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 gave a general overview of the issue and suggested ways to overcome the barriers. However, Assistant 2 provided a more detailed and comprehensive response, discussing specific examples of how language and cultural barriers can affect communication and relationships in multicultural societies. Assistant 2 also addressed additional factors such as stereotypes, prejudice, discrimination, and power dynamics, which contributed to a more thorough understanding of the topic.", "score": [8.0, 10.0]}
+{"review_id": "hyGom4nXXwsXpZxFBRMKhV", "question_id": 15, "answer1_id": "kbJVEEsdsSScEq5Y5furr7", "answer2_id": "TFh5bXFdG4fdK5hmq6qS6o", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer covered a broader range of AI applications in healthcare, such as automating administrative tasks, identifying drug interactions, and providing virtual health coaching. However, Assistant 2's answer provided a more detailed explanation of the scenarios, focusing on disease diagnosis, treatment planning, and predictive analytics. Assistant 2 also mentioned the importance of patient privacy and security in AI implementation. While both answers were informative, Assistant 2's response was slightly more detailed and structured, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "jkRiqLivb83X2rYej3BZ2x", "question_id": 16, "answer1_id": "CMUL5ULZuR7YC5EPzCBN2N", "answer2_id": "XDV7jFB36qKGzPXPcwvbQy", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a brief overview of the CRISPR-Cas9 technology, its potential applications, and ethical concerns. However, Assistant 2 provided a more detailed response, elaborating on the process of gene editing, listing more potential applications, and discussing a wider range of ethical implications. This additional detail and depth make Assistant 2's answer slightly better, resulting in a higher score.", "score": [8.0, 9.0]}
+{"review_id": "BiTEhyMU3gJtxeJbBpYpuw", "question_id": 17, "answer1_id": "kEmDDQyNqSkyFihYEEBpuR", "answer2_id": "6E3YAfxqckwL83dVo6ZRP4", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a brief explanation of how vaccinations work and the concept of herd immunity. However, Assistant 2 provided a more detailed explanation of how vaccines work by introducing a small, harmless piece of a virus or bacteria to the body, and how this triggers the immune system to produce antibodies. Assistant 2 also gave a clear example of measles vaccination and its impact on community protection. Additionally, Assistant 2 mentioned the importance of herd immunity thresholds and the time it takes for the immune system to develop sufficient antibodies. Due to the higher level of detail and clarity in Assistant 2's answer, it receives a higher score of 10, while Assistant 1 receives a score of 8.", "score": [8.0, 10.0]}
+{"review_id": "TAPGFXuvojjxMwWrWaBD4D", "question_id": 18, "answer1_id": "Qs3grQsqFVGK9EVkCkf9PB", "answer2_id": "FjSXpLx6FfHU8zN9mb8ucX", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was concise and touched on the main points of how social media platforms influence news consumption and the spread of misinformation. However, Assistant 2's answer provided a more detailed response, discussing both positive and negative implications of social media on news consumption, as well as mentioning specific measures taken by platforms to combat misinformation. This additional information and balance make Assistant 2's answer slightly better, resulting in a higher score.", "score": [8.0, 9.0]}
+{"review_id": "M3UAo7TkRwstSFDkzBGGaZ", "question_id": 19, "answer1_id": "kzZ6dKN7hkRWjqtdHr7Qns", "answer2_id": "HJczP2JdM4s6cxZyBWVyNZ", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 briefly mentioned the influence of cultural, social, and economic factors on food choices and how understanding these factors can help promote healthier diets. Assistant 2, however, provided a more detailed response, elaborating on specific examples of cultural, social, and economic factors and suggesting various strategies to promote healthier diets. This additional detail and depth in Assistant 2's answer resulted in a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "ULcS8edpDB5hS2SKjGRmhe", "question_id": 20, "answer1_id": "DPPDG6YGFJij2GCmRL66PU", "answer2_id": "X2EFE34dc5vtFSTGr3n2Bg", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. However, Assistant 2's answer was more detailed and comprehensive, explaining the steps involved in the process of natural selection and how it leads to the evolution and adaptation of species. Assistant 1's answer was more concise but still provided a good overview of the concept. Therefore, Assistant 1 receives an 8, while Assistant 2 receives a 10 for their more in-depth explanation.", "score": [8.0, 10.0]}
+{"review_id": "h2e6Skq5AUEWSBtGD8wJ9A", "question_id": 21, "answer1_id": "D62FjDb4nZANzPpfSfsiyn", "answer2_id": "iJjKWygtpHaLJirgK2PACK", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer provides a general description of a knight's qualities but does not specifically address how to introduce oneself at a royal banquet. The answer is relevant but lacks detail and precision. Assistant 2's answer, on the other hand, is more detailed and directly addresses the question by providing a specific example of how a knight would introduce themselves at a royal banquet. The answer also touches on the importance of appearance and proper etiquette, which adds to its relevance and accuracy. Therefore, Assistant 2's answer is more helpful and precise, earning a higher score.", "score": [7.0, 9.0]}
+{"review_id": "A6pRdX4Y8ch3rnpD9FYovw", "question_id": 22, "answer1_id": "k7E4NNw5kyj9DmvP5Pu2zb", "answer2_id": "FRZfkefyPRGCSpLEexQoRH", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer is relevant and captures the spirit of a pirate captain's speech, but it is brief and lacks the motivational aspect that the question asks for. Assistant 2's answer, on the other hand, is more detailed and directly addresses the motivational aspect, providing a clear reason for the crew to search for the hidden treasure. Assistant 2 also uses more pirate-specific language, which adds to the authenticity of the response. Therefore, Assistant 2 receives a higher score.", "score": [7.0, 9.0]}
+{"review_id": "j5fumDimASiLZ9HB4STbER", "question_id": 23, "answer1_id": "KFocjVCejYrU3YmLjAqoUF", "answer2_id": "HBah6W9KuR8eNpRQJUxVvd", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the user's question. Assistant 1's answer was more general, describing the approach and emotions a Shakespearean character would use in a soliloquy to declare their love. Assistant 2 went a step further by providing an actual soliloquy in the style of Shakespeare, which demonstrates a higher level of detail and accuracy in response to the question. Therefore, Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "9ZVNoY62bmLuj9LwVEqN4R", "question_id": 24, "answer1_id": "dq8Sm9djS7e7y9sG9vmMJf", "answer2_id": "3Rgw9vMLyMiwazfdjhWcgT", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was more focused on the emotional aspect of the origin story, mentioning fear, challenges, and support from friends and family. Assistant 2's answer was more detailed in terms of the superhero's powers and their dedication to protecting the innocent and fighting for justice. While both answers were engaging and appropriate for a curious child, Assistant 2's answer provided a slightly higher level of detail and a more vivid description of the superhero's abilities, which is why it receives a higher score.", "score": [8.0, 9.0]}
+{"review_id": "Qqjum7CnD5BUVN7hgLRJvb", "question_id": 25, "answer1_id": "XZ8fG8e6u7CyKd2moK6abe", "answer2_id": "b4oghpgzhWMdoryzQrSwuF", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth Assistant 1 and Assistant 2 provided relevant and accurate information about technological advancements from the perspective of a time traveler from the year 3000. However, Assistant 2's answer was more detailed and comprehensive, covering a wider range of advancements and providing specific examples in each area. Assistant 1's answer was more general and less detailed, but still provided a good overview of the advancements. Therefore, Assistant 1 receives an 8, while Assistant 2 receives a 10 for their performance.", "score": [8.0, 10.0]}
+{"review_id": "Bze8iyXpRYhDiaV8sRsX3X", "question_id": 26, "answer1_id": "oKaXHfoK4pXwrefFWXmeA8", "answer2_id": "GzzDrjBAe3BnXWgWrATxJL", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and straightforward answer, describing the winning play as a layup made by the home team's star player at the buzzer. While the response was relevant and accurate, it lacked the level of detail and excitement expected from a sports commentator. Assistant 2, on the other hand, delivered a more engaging and detailed response, capturing the excitement and tension of the final moments of the game. The answer included a description of the opposing team's star player's actions, the successful three-point shot, and the crowd's reaction, which made it more immersive and true to the sports commentator style. Therefore, Assistant 2 receives a higher score for providing a more detailed and engaging response.", "score": [7.0, 9.0]}
+{"review_id": "DLZjgTfUxEVNxPSNje2YdV", "question_id": 27, "answer1_id": "ZwiZfvDWm7SETKNBfDk7Mb", "answer2_id": "f7hUYhajUbXNs3gQrG9z3b", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. However, Assistant 2's response was more detailed and descriptive, which made it slightly better. Assistant 1's answer was good, but it lacked the depth and specific details that Assistant 2 provided, such as the balance of sweet, sour, salty, and umami flavors, and the focus on locally-sourced, seasonal ingredients. Both assistants effectively conveyed the idea of a signature dish that combines traditional and modern cooking techniques, but Assistant 2's response painted a more vivid picture of the dish and its qualities.", "score": [8.0, 9.0]}
+{"review_id": "J5oMKGgubhYxoNoq7UVXTR", "question_id": 28, "answer1_id": "DxYopRe2LcTJMy3FWu6btd", "answer2_id": "LpUrrJuQ4cA6LtNRmiTfRv", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. Assistant 1's answer was concise and captured the emotions and the view from the summit of Mount Everest. However, Assistant 2's answer was more detailed, providing a more vivid description of the surrounding landscape and the emotions a climber might experience. Assistant 2 also acknowledged the potential risks and challenges associated with the climb, which added depth to the response. Therefore, Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "cEaAMBsLeuBPAx7BWH5vnZ", "question_id": 29, "answer1_id": "WC3UJVh4jQ5RUkpcRMU98L", "answer2_id": "Yiup49xrP6jf9nsEuGHgdN", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a brief overview of the challenges faced by a space colonist on Mars, touching on temperature fluctuations, limited resources, and physical and mental health. However, Assistant 2 provided a more detailed response, listing specific challenges such as radiation exposure, lack of oxygen, water scarcity, psychological challenges, and physical challenges. Assistant 2 also discussed the daily life of a space colonist, focusing on the need for adaptability, resourcefulness, and collaboration. While both answers were informative, Assistant 2's response was more comprehensive and detailed, earning it a higher score.", "score": [8.0, 9.0]}
+{"review_id": "Y4C9ettDbb4QVNJXXNtLwX", "question_id": 30, "answer1_id": "gTvgn6ksDjGGgdprw6AG5A", "answer2_id": "7cWm5Kop6bLzwLgJjpAVrK", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the user's question. Assistant 1's answer was concise and focused on the formation of an alliance with other survivors, sharing knowledge and skills, and working together to survive. Assistant 2's answer was more detailed and explored various aspects of survival, such as the importance of adaptability, resourcefulness, and the potential threats from other survivors. Assistant 2 also mentioned the natural world as an ally and the importance of personal skills and knowledge. Both answers were accurate and relevant, but Assistant 2's answer provided a more comprehensive view of survival in a post-apocalyptic world, which is why it received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "4uajiwdpWxtQb6ih4oGZtC", "question_id": 31, "answer1_id": "3q7giCk2BA3Ye4Tm9HC2iw", "answer2_id": "YaUHhigGUvgv82Js3ktFgs", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer was mostly helpful and relevant, but there was a mistake in the last part of the response, stating that higher prices could indicate popularity among locals, which is not necessarily true. Assistant 2's answer was more comprehensive, providing clear indicators to determine if a restaurant is popular among locals or tourists, and explaining why this information might be useful. Assistant 2's response was more detailed and accurate, which is why it received a higher score.", "score": [7.0, 9.0]}
+{"review_id": "bwuQWmygvjGBXaMr27sRdV", "question_id": 32, "answer1_id": "hRGsxy86v26SC4yAQS29X4", "answer2_id": "LaHQYWhmXF7mnPSVFdhCeq", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer provided a few clues to identify someone pretending to understand a topic, but the answer was relatively brief and lacked depth. Assistant 2's answer, on the other hand, provided a more detailed and comprehensive list of clues, with clear examples and explanations. Additionally, Assistant 2 mentioned the importance of approaching conversations with an open mind and clarifying unclear points, which added value to the response.", "score": [7.0, 9.0]}
+{"review_id": "WzkwmyxNAKJUtymVJ82oHL", "question_id": 33, "answer1_id": "3n49A5ggJERfXYrLns3ZeU", "answer2_id": "Br2uFCYmRUaQULwKzpffz9", "reviewer_id": 1, "metadata": {}, "text": "7 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. However, Assistant 2's answer was more comprehensive and detailed, covering a wider range of reasons why someone might choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app. Assistant 1's answer was accurate and touched on some important points, but it was less extensive and detailed compared to Assistant 2's response. Therefore, Assistant 1 receives a score of 7, while Assistant 2 receives a score of 9.", "score": [7.0, 9.0]}
+{"review_id": "M7X2VHtbqosqu7KtDb9pHD", "question_id": 34, "answer1_id": "ErCpFtPuYVru4oTTk4WrxG", "answer2_id": "FCRqJu6DgRvCNq4Z2NneHf", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused on body language and facial expressions, which are important indicators of genuine interest. However, Assistant 2 provided a more comprehensive answer, covering a wider range of indicators such as active listening, engagement, inclusivity, respect and empathy, and authenticity. Assistant 2 also acknowledged the complexity of human behavior and the possibility of a person being both polite and genuinely interested in a conversation. Overall, Assistant 2's answer was more detailed and nuanced, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "BFLaTrYPLHxkRwy5dBZrxj", "question_id": 35, "answer1_id": "PTNoCRMZWoJk8HaKX7fW45", "answer2_id": "Fy5Nw8LcWpdq2GokTbiwuq", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a concise response that covered the main points, such as supporting the local community, personal experience, higher quality products, and maintaining a unique local culture. However, Assistant 2 provided a more detailed and comprehensive answer, listing several reasons and elaborating on each one. This made Assistant 2's response more informative and useful for the user. While both answers were of good quality, Assistant 2's answer was slightly better due to the increased level of detail and organization.", "score": [8.0, 9.0]}
+{"review_id": "hrTtsvTjNDZQViJ6TPpZTe", "question_id": 36, "answer1_id": "n8cFs9KENNwZ4z3SR4iXTr", "answer2_id": "hKhcnEtSjzysU7sbtE3JeH", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1's answer was concise and covered some key points, such as checking the author's credentials, source of information, evidence, and reading other people's reviews. However, Assistant 2's answer was more detailed and provided additional tips, such as checking the publication's reputation, looking for fact-checking and corrections, considering the purpose and bias of the publication, and checking the date and relevance of the information. Assistant 2 also mentioned using alternative sources to compare and contrast information, which is a valuable tip for assessing credibility. Overall, Assistant 2's answer was more comprehensive and provided a better understanding of the topic, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "PopQxcPMA5cf7uzzhRX4A9", "question_id": 37, "answer1_id": "GzxL9mmEK5RzKqRbqBMUVC", "answer2_id": "cAVZTw5QY8WUnJEd3rUu3p", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was concise and touched on the main points of why some people enjoy being scared and why others avoid it. However, Assistant 2's answer was more detailed, discussing factors that influence a person's enjoyment of being scared, such as personality traits, past experiences, and coping mechanisms. Additionally, Assistant 2 mentioned the release of endorphins as a possible reason for the enjoyment of being scared. This additional information and depth make Assistant 2's answer slightly better than Assistant 1's.", "score": [8.0, 9.0]}
+{"review_id": "NYzhiPQT6kKnsMyz49aytZ", "question_id": 38, "answer1_id": "QpoHFgb9SzwuaXQQUuBUQD", "answer2_id": "9JxYGUzSa2rR68BTVuZJEA", "reviewer_id": 1, "metadata": {}, "text": "9 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer focused on general aspects of observing behavior, such as interaction, addressing each other, handling disagreements, and problem-solving, as well as nonverbal cues. Assistant 2's answer went into more detail, providing specific examples of cultural norms and expectations that can be observed in social situations, such as body language, gender roles, interpersonal distance, table manners, and gift-giving. While both answers were informative, Assistant 2's response was more detailed and provided a broader range of examples, which is why it received a slightly higher score.", "score": [9.0, 10.0]}
+{"review_id": "MkmsogcgNnAWZHAbgZUYYy", "question_id": 39, "answer1_id": "Fxe6MS4GpP3LMDUwzY2cPA", "answer2_id": "5b3qKj2jtzgcZX7SSipQBW", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question, discussing the complexity and subjectivity of the issue. Assistant 1 briefly touched upon the main arguments for both sides, while Assistant 2 provided a more detailed explanation of the benefits and drawbacks of space exploration and addressing Earth's problems. Assistant 2 also delved deeper into the moral perspective and the factors that may influence individual decisions. Therefore, Assistant 2 receives a slightly higher score due to the increased level of detail and depth in the response.", "score": [8.0, 9.0]}
+{"review_id": "cFBDfxZUPFrKuHFzAZqxEy", "question_id": 40, "answer1_id": "mJiQ2FGR4Xb8kmhZjharkw", "answer2_id": "hG6i3Q8F7bA7u73vo6t2XZ", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 emphasized the importance of striking a balance between job creation and technological progress, suggesting policies and initiatives that promote both. Assistant 2 also acknowledged the importance of both factors and discussed the potential benefits of automation, while stressing the need for a balanced approach. Assistant 2's answer was slightly more detailed, as it mentioned specific fields where new jobs could be created due to automation, and thus receives a higher score.", "score": [8.0, 9.0]}
+{"review_id": "eGEvDMQbMoGQBT5UrFhjGA", "question_id": 41, "answer1_id": "6Kph4RHRKEZ4YUoaHuEhBv", "answer2_id": "KaZUZ4NAnQ3SY6mq2QY3Kt", "reviewer_id": 1, "metadata": {}, "text": "7 4\nAssistant 1 provided a more helpful and relevant answer, with a clear explanation of the average number of blinks per minute, per day, and in a lifetime. The answer also included the purpose of blinking, which added value to the response. Assistant 2's answer was less accurate and contained some errors in calculations, leading to an incorrect final number of blinks in a lifetime. Additionally, Assistant 2's response was more difficult to follow due to the convoluted calculations and unnecessary steps.", "score": [7.0, 4.0]}
+{"review_id": "YiLdzx8nqDZtUYvt48UwsC", "question_id": 42, "answer1_id": "WBwpBQwhxn5kxLDb7MschC", "answer2_id": "Jwrdb34BnrswtwbeSd6xxP", "reviewer_id": 1, "metadata": {}, "text": "3 6\nAssistant 1's answer is incorrect and confusing, as it provides an arbitrary number of atoms in a grain of salt and uses incorrect atomic weight. Assistant 2's answer is more relevant and provides a better explanation, but it still does not directly answer the question about the number of atoms in a grain of salt. Instead, it focuses on the weight of a grain of salt. Both answers could be improved by providing a clearer explanation and directly addressing the number of atoms in a grain of salt.", "score": [3.0, 6.0]}
+{"review_id": "aKnEQapou8ic9AWXBanAUM", "question_id": 43, "answer1_id": "kf8nahQVci2ZLaYikagB7U", "answer2_id": "Ubz2V2EhkWmNYPEErdX8gn", "reviewer_id": 1, "metadata": {}, "text": "6 9\nAssistant 1's answer is incorrect, stating that there are approximately 2000 lightning strikes per day, which is a significant underestimation. The explanation provided is more focused on the process of lightning formation rather than the frequency of lightning strikes. Assistant 2's answer is more accurate, estimating around 1.6 million lightning strikes per day, and provides a more detailed explanation of how the number of lightning strikes is determined, as well as the factors that can affect the frequency of lightning strikes. Assistant 2 also discusses the importance of lightning in the Earth's ecosystem, adding more depth to the answer.", "score": [6.0, 9.0]}
+{"review_id": "PJnWKBaU6TYiDX4h2YDTVM", "question_id": 44, "answer1_id": "Gptgryd4o2dC8V5aqRmeJJ", "answer2_id": "CufihrnQT3GRic3Vr2rfU2", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1's answer starts with a specific number of balloons (10 million) but does not provide a clear explanation of how this number was derived. The calculations mentioned are confusing and seem to be incorrect. Assistant 2's answer, on the other hand, provides a more detailed and step-by-step explanation of the calculations and assumptions made to estimate the number of balloons needed to lift a house. Although the final number (1.4 x 10^14) seems unrealistic, the approach and reasoning are more thorough and clear compared to Assistant 1's answer. Assistant 2's answer is more helpful, relevant, and accurate, and provides a better level of detail.", "score": [6.0, 8.0]}
+{"review_id": "396xB8bjy5CdAyFfMHCRr2", "question_id": 45, "answer1_id": "RfBWW8ZhdfTuTMb454Un4o", "answer2_id": "fBBFkmgfGSiPtQtBdsVxpZ", "reviewer_id": 1, "metadata": {}, "text": "7 8.5\nAssistant 1 provided an answer with specific numbers, but the source of the information was not mentioned, which makes it difficult to verify the accuracy of the data. The explanation about the increase in smartphones and network coverage was relevant, but it did not provide a step-by-step reasoning for the number of text messages sent per minute. Assistant 2, on the other hand, acknowledged the difficulty in determining the exact number and provided an estimate based on daily text messages sent. The explanation was more detailed, covering various factors contributing to the high number of text messages sent worldwide. Assistant 2's answer was more helpful and relevant, as it provided a better understanding of the factors influencing the number of text messages sent per minute.", "score": [7.0, 8.5]}
+{"review_id": "bqujgMYbHdG7vrkDfUMc9w", "question_id": 46, "answer1_id": "neGgLYm47JvqN8qkw8VeoW", "answer2_id": "eQmVTyJQmoBKrZ9J38EXBC", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer provides a rough estimate of 1 billion words spoken daily on Earth, but it lacks a clear explanation of how this number was derived. The answer mainly focuses on the importance of language in human communication, which, while relevant, does not directly address the question. Therefore, I give Assistant 1 a score of 7.\n\nAssistant 2's answer, on the other hand, provides a more detailed and step-by-step explanation of the reasoning behind the estimate. It considers the number of languages spoken, the world population, and the average number of words spoken per person per day. The answer acknowledges the difficulty in providing a precise estimate and explains the factors that can cause variations in the number of words spoken. Assistant 2's answer is more helpful, relevant, and detailed, so I give it a score of 9.", "score": [7.0, 9.0]}
+{"review_id": "iWFTc3ySpMaPa5D5tpdfTq", "question_id": 47, "answer1_id": "KSePbzLwsYasR3aui4HU8h", "answer2_id": "4JYtxzTN9oUTkVrFy7CDe6", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a general answer with an average of 100 snowflakes falling per second, but this number seems arbitrary and lacks a source or explanation. The answer does explain the formation of snowflakes and the factors that affect their shape, size, and number. However, it does not provide a step-by-step reasoning for estimating the number of snowflakes during a typical winter.\n\nAssistant 2, on the other hand, acknowledges the difficulty in estimating the number of snowflakes and explains the factors that affect snowflake formation and the number of snowflakes that fall. The answer provides a more detailed explanation of the process of snowfall and the factors that influence it. It also gives a general estimate of 10 to 20 snowflakes falling per square meter per hour, which seems more reasonable and is based on location and weather conditions. The answer concludes by reiterating the difficulty in providing an accurate estimate and the factors that contribute to the variation in the number of snowflakes.", "score": [7.0, 9.0]}
+{"review_id": "2M6EZPAPgMpfzkHdMvto3y", "question_id": 48, "answer1_id": "SWWfLpP2gnWHLbYRZXNsW3", "answer2_id": "M7KhQtwQrueKKwuemeJHUb", "reviewer_id": 1, "metadata": {}, "text": "7 6\nAssistant 1 provided a more detailed answer with an estimated number of pages in all the books ever written, which is 100 billion pages, based on the assumption of 130 million unique titles and an average of 200 pages per book. The answer also touched on the importance of books and the shift to digital formats. Assistant 2, on the other hand, focused more on the difficulty of determining the exact number of pages and provided an estimate based on the number of books published each year in the United States. Both answers were relevant and accurate to some extent, but Assistant 1's answer was more helpful and provided a clearer estimate, which is why it received a higher score.", "score": [7.0, 6.0]}
+{"review_id": "BiNdX4TBAiKeKrKdLZP7Rr", "question_id": 49, "answer1_id": "WaBwKYhs7eAG22qCGLH2j3", "answer2_id": "AuuvYW4edaZy5wpL5QyppA", "reviewer_id": 1, "metadata": {}, "text": "5 3\nAssistant 1's answer provided a clear calculation based on the number of years since life began, but the answer of 4.3 billion times is incorrect, as the calculation should be 4.5 billion years multiplied by 1 (one orbit per year), resulting in 4.5 billion orbits. Assistant 2's answer was confusing and contained incorrect information, such as the Earth's distance from the Sun at the beginning of life and an incorrect number of orbits (200,000). The correct answer should be around 4.5 billion orbits, considering that life began approximately 4.5 billion years ago and the Earth orbits the Sun once per year.", "score": [5.0, 3.0]}
+{"review_id": "EZZ926TjqdGnAapCCw6goz", "question_id": 50, "answer1_id": "MfMJeE9om7qyBbqopHouf4", "answer2_id": "TJpkA2j3kC9akcHtmVQKUF", "reviewer_id": 1, "metadata": {}, "text": "7 8\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the question. Assistant 1 gave a conservative estimate of several hundred thousand songs recorded throughout history, while Assistant 2 estimated tens of thousands of songs in existence today, with hundreds of thousands more recorded throughout history. Both answers acknowledged the difficulty in determining the exact number of songs recorded and the impact of recording technology on the growth of music. However, Assistant 2 provided a more detailed explanation, discussing the evolution of musical traditions, the explosion of new songs due to widespread availability of recording technology, and the fact that many songs are re-recorded and covered by other artists. This additional information made Assistant 2's answer slightly more informative and comprehensive, resulting in a higher score.", "score": [7.0, 8.0]}
+{"review_id": "AfhxPNM7suXhwV7jZHtK9Q", "question_id": 51, "answer1_id": "TjWPRDM6JFpPF8xeRptCKb", "answer2_id": "9FJsZWNGMNuGKGF4YjQoRM", "reviewer_id": 1, "metadata": {}, "text": "7 8\nAssistant 1 provided a relevant and accurate response, but it was relatively brief and lacked some details. Assistant 2's answer was more detailed and provided a better context of the Renaissance period, discussing the limitations of technology at the time and how the Internet might have been used for communication and information sharing among the ruling classes and scholars. Both answers were helpful, but Assistant 2's response was more comprehensive and informative.", "score": [7.0, 8.0]}
+{"review_id": "45WbYeb6uCC9nf5pU8h7e2", "question_id": 52, "answer1_id": "iR2tYTsWTFENEP7Qy9RgtX", "answer2_id": "4vN4PKLUJkaYTEEVzYE3DN", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused on the potential preservation of Aztec culture and the impact on Native American populations in the region. Assistant 2, while also discussing the potential for the Aztec civilization to thrive, acknowledged the uncertainty of the outcome and considered the possibility of the Spanish eventually finding a way to defeat the Aztecs. Assistant 2's answer was more comprehensive, considering various factors and potential outcomes, which is why it received a slightly higher score. Both answers were accurate and provided a good level of detail, but Assistant 2's answer was more nuanced and balanced in its approach.", "score": [8.0, 9.0]}
+{"review_id": "G5PbRNUqqbUYPbAzfXeQaW", "question_id": 53, "answer1_id": "AZdS8xAi3GwAmCqkNSnnwv", "answer2_id": "LjnTtmsFEMgFEkYMKE3VRV", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer provides a brief overview of the potential consequences of the Black Death not occurring in the 14th century, touching on aspects such as population growth, urbanization, and the Renaissance. However, the answer lacks depth and does not provide a clear explanation for some of the claims made, such as the connection between the Black Death and the Industrial Revolution.\n\nAssistant 2's answer, on the other hand, provides a more detailed and well-structured response. It begins by explaining what the Black Death was and its impact on Europe, which helps to contextualize the question. The answer then presents five possible scenarios that could have occurred if the Black Death had not taken place, covering economic, political, cultural, trade, and environmental aspects. This approach provides a more comprehensive understanding of the potential consequences of the Black Death not occurring.", "score": [7.0, 9.0]}
+{"review_id": "KqxXEcbCpVBetP3rsojSSE", "question_id": 54, "answer1_id": "VmwifF2JD5osYKDTqv2ZRS", "answer2_id": "9L9SgmcPjkFs3CwuiUJqm9", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a relevant answer, but it was more speculative and lacked depth. The answer focused on possible discoveries Newton might have made in medicine and biology, but it didn't consider the broader implications of his work in physics and how it has shaped our understanding of the world. Assistant 2, on the other hand, provided a more comprehensive answer, acknowledging the uncertainty of the hypothetical situation and discussing the potential impact of Newton's work in biology on various fields. Assistant 2 also highlighted the importance of Newton's work in physics and its influence on our current understanding of the world, making the answer more informative and well-rounded.", "score": [7.0, 9.0]}
+{"review_id": "EYekoMW7bV6gwiZWntHECE", "question_id": 55, "answer1_id": "mUL5UPj3qDGaCriEjL2U3B", "answer2_id": "f9pwVc7bbzajLXKsaGsXQ8", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused on the potential impact on the music world, the British Invasion, and the cultural influence of the Beatles. Assistant 2, however, provided a more detailed response by listing several possible outcomes if the Beatles had never formed as a band. Assistant 2's answer covered a broader range of possibilities, including the music scene in Liverpool and London, the influence of their manager, and the potential impact on the music industry and cultural changes. While both answers were accurate and relevant, Assistant 2's answer was more comprehensive and detailed, which is why it received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "8aUm8VzFWuJNGsFNqSnBWK", "question_id": 56, "answer1_id": "dVdwUoVrAQJDuWxiodykiw", "answer2_id": "UfZJcVaZLWkVsipvmDBrdd", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. Assistant 1's answer was concise and directly addressed the potential consequences of not cracking the Enigma code. However, Assistant 2's answer was more detailed and provided additional context about the Enigma code's importance and the challenges the Allies would have faced without it. Assistant 2 also acknowledged the difficulty in predicting the exact impact of Turing's work, which adds a level of nuance to the response. Therefore, Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "ZgDPUhaTP6er3RFfuBGpQj", "question_id": 57, "answer1_id": "EiNn9jjfy7dga6xfCtLtF8", "answer2_id": "K635PkbTDkJTEnDSG68bTh", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was concise and focused on the implications for international trade and navigation. Assistant 2's answer was more detailed, providing historical context and discussing the potential impact on economic growth and development. Both answers were informative, but Assistant 2's answer was slightly more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "jp4K9zD7yqoTbCWrLNyPDx", "question_id": 58, "answer1_id": "eqG9f2R9hXVyZrZMpcqAYq", "answer2_id": "aGEwGV2TQY6snjFdiLkyqZ", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the question. Assistant 1 focused on the potential growth and development of the Maya civilization if it had not collapsed, mentioning the construction of cities and monuments, advancements in writing and astronomy, and expansion of trade networks. Assistant 2, on the other hand, provided a more detailed background of the Maya civilization and discussed various possibilities of what might have happened if the collapse did not occur, including technological advancements, empire expansion, and potential instability. Assistant 2 also acknowledged the ongoing research and theories surrounding the collapse. While both answers were accurate and informative, Assistant 2's answer was slightly more detailed and comprehensive, which is why it receives a higher score.", "score": [8.0, 9.0]}
+{"review_id": "DsYRrxkybmnkNEBUmWbnvH", "question_id": 59, "answer1_id": "ex42CLEzDVC2TxAvMCtYQJ", "answer2_id": "ksnQVXY9SY2NzMxXEmV3Pd", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. Assistant 1's answer was concise and touched on the key points of the potential consequences of Columbus not discovering the Americas, such as the delay in European colonization and the possibility of other Europeans discovering the Americas. Assistant 2's answer, however, provided a more detailed and comprehensive response, discussing the potential impacts on indigenous cultures, global power structures, economic and social impacts on Europe, and the political and religious landscape. While both answers were helpful, Assistant 2's answer was more detailed and informative, earning it a higher score.", "score": [8.0, 9.0]}
+{"review_id": "XMyRQ5oFsTKegBH86qwejD", "question_id": 60, "answer1_id": "RfFANSwHeCm3Pqe2AJ86Dk", "answer2_id": "GZpcvXQyrKUHsgWpzS7Hpo", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused on the potential benefits of van Gogh's success, such as financial stability, travel, and networking opportunities. Assistant 2, on the other hand, delved deeper into the possible impact of his success on his art and the art world, mentioning his influences and contributions to the modernist movement. Assistant 2 also touched upon the possibility of van Gogh's mental health struggles continuing despite his success. Both answers were accurate and detailed, but Assistant 2's response provided a more comprehensive view of the potential outcomes and implications of van Gogh's success, which is why it received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "Qq9kqnTqwZs5TiAK93RLF6", "question_id": 61, "answer1_id": "nzxdtWmgQhRkHeVAanVogZ", "answer2_id": "Czx3sfkw8C4md9YJrhs3gu", "reviewer_id": 2, "metadata": {}, "text": "4 8\n\nAssistant 1 provided a general overview of the problem-solving approach but did not provide any code or specific implementation details. Their answer was helpful in understanding the problem but lacked the necessary code to solve it.\n\nAssistant 2 provided a working C++ program that reads a text file line by line and counts the number of occurrences of a specific word in the file. The code is well-structured, readable, and includes helpful comments. However, there are a few issues:\n\n1. The program reads the first line of the file before entering the loop, which means it skips the first line when searching for the word. The `getline(file, line)` statement before the loop should be removed.\n2. The `word` variable is not initialized with a specific word to search for. The program should prompt the user to input the word they want to search for or hard-code the word in the code.\n3. The program does not account for multiple occurrences of the word in a single line. The `find` function should be used in a loop to search for all occurrences of the word in each line.\n\nHere's an improved version of the code:\n\n```cpp\n#include \n#include \n#include \n\nusing namespace std;\n\nint main() {\n // Open the file for reading\n ifstream file(\"filename.txt\");\n\n // Check if the file is open\n if (!file.is_open()) {\n cerr << \"Error opening file.\" << endl;\n return 1;\n }\n\n // Prompt the user to enter the word to search for\n string word;\n cout << \"Enter the word to search for: \";\n cin >> word;\n\n // Initialize the count variable\n int count = 0;\n\n // Loop through the file line by line\n string line;\n while (getline(file, line)) {\n // Check if the line contains the word\n size_t pos = line.find(word);\n while (pos != string::npos) {\n // If it does, increment the count\n count++;\n pos = line.find(word, pos + 1);\n }\n }\n\n // Close the file\n file.close();\n\n // Print the result\n cout << \"The word \" << word << \" appears \" << count << \" times in the file.\" << endl;\n\n return 0;\n}\n```\n\nThis version of the code addresses the issues mentioned above and should work correctly for counting the occurrences of a specific word in a text file.", "score": [4.0, 8.0]}
+{"review_id": "KVKQ3U2vZtSkiNpC7jEtyB", "question_id": 62, "answer1_id": "kRo6z5qmeiSM8Xz6BPJHLC", "answer2_id": "G7QjnFhVnsMPMN9UES8mTV", "reviewer_id": 2, "metadata": {}, "text": "5 3\n\nAssistant 1's code has a good structure and comments, but the logic for calculating the longest common subsequence is incorrect. The condition inside the loop is not correct, and the function returns the wrong result. The code should be updated to use the correct dynamic programming approach for finding the longest common subsequence.\n\nAssistant 2's code is not correct either. The logic for calculating the longest common subsequence is incorrect, and the code does not use dynamic programming. The function breaks out of the inner loop when the characters are not the same, which is not the correct approach. The code should be updated to use the correct dynamic programming approach for finding the longest common subsequence.\n\nHere is a correct implementation of the longest common subsequence function using dynamic programming:\n\n```python\ndef longest_common_subsequence(s1, s2):\n m, n = len(s1), len(s2)\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if s1[i - 1] == s2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n\n return dp[m][n]\n```\n\nThis implementation correctly uses dynamic programming to find the longest common subsequence. The outer loop iterates over the first string, and the inner loop iterates over the second string. If the characters at the current positions are the same, the length of the LCS is updated to be one more than the LCS of the previous characters. If the characters are not the same, the length of the LCS is updated to be the maximum of the LCS lengths for the previous characters in either string. The function then returns the length of the longest common subsequence.", "score": [5.0, 3.0]}
+{"review_id": "cN2XYwzktsLjcGjp3LZ77M", "question_id": 63, "answer1_id": "7kj8odEE79UBdR4pT6u6TY", "answer2_id": "RHqehPQpeXkN9d6NHkVGxU", "reviewer_id": 2, "metadata": {}, "text": "7 9\n\nAssistant 1 provided a valid regular expression for email validation, but did not include any Python code or explanation on how to use the regular expression in a Python program. The regular expression itself is quite comprehensive and should work for most email addresses.\n\nAssistant 2 provided a complete Python implementation using the `re` module, including a regular expression, code to compile the regex, and examples of how to use the compiled regex to validate email addresses. The code is well-structured, readable, and includes helpful comments explaining the logic and functionality. The regular expression provided by Assistant 2 is simpler than Assistant 1's, but it should work for most common email addresses. However, it might not cover all possible email address formats.\n\nSuggestions for improvement:\n\nAssistant 1:\n- Include a complete Python implementation using the `re` module.\n- Provide examples of how to use the regular expression in a Python program.\n- Add comments to explain the logic and functionality of the regular expression.\n\nAssistant 2:\n- Consider using a more comprehensive regular expression, like the one provided by Assistant 1, to cover more possible email address formats.\n- The code is already well-structured and readable, but adding more comments to explain the regular expression itself would be helpful.", "score": [7.0, 9.0]}
+{"review_id": "5FSgVrf6EKPavFZndnYqL5", "question_id": 64, "answer1_id": "UULZ4GLz9j9ot7tN7x3ySD", "answer2_id": "aqYsaowQtr5rYU6iDbRj5x", "reviewer_id": 2, "metadata": {}, "text": "4 8\n\nAssistant 1's code is not provided in a proper format and the logic is incorrect. The explanation is confusing and does not follow the dynamic programming approach. The steps mentioned do not make sense in the context of finding the nth Fibonacci number. The code structure and readability are also poor.\n\nAssistant 2's code is much better. It correctly implements the dynamic programming approach to find the nth Fibonacci number. The code is accurate, efficient, and easy to understand. The comments are clear and concise, explaining the logic and functionality of the code. The code adheres to proper coding standards and best practices. However, the code can be further optimized by not storing all the Fibonacci numbers in a list and only keeping track of the last two numbers. Here's an improved version of Assistant 2's code:\n\n```python\ndef fibonacci(n):\n # Initialize two variables to store the last two Fibonacci numbers\n a, b = 0, 1\n # Loop through the numbers and update the last two Fibonacci numbers\n for _ in range(n):\n a, b = b, a + b\n # Return the nth Fibonacci number\n return a\n```\n\nThis version of the code is more efficient as it does not store all the Fibonacci numbers in a list and only keeps track of the last two numbers.", "score": [4.0, 8.0]}
+{"review_id": "KrjYea9VnaYgDMoHHFfJwT", "question_id": 65, "answer1_id": "Rh8P3bxSJx4p6nCZkSZBrh", "answer2_id": "fimg24Eg23yjtVd7pYfo8A", "reviewer_id": 2, "metadata": {}, "text": "8 10\n\nAssistant 1 provided a good explanation of how the binary search algorithm works, but did not include any code implementation. The answer is informative and accurate, but it does not fully address the question, which asked for an implementation of the algorithm.\n\nAssistant 2 provided a complete and correct implementation of the binary search algorithm in Python. The code is well-structured, efficient, and easy to read. The answer also includes an example of how to use the function, which is helpful for understanding its usage. The only minor improvement that could be made is to add comments within the code to explain the logic and functionality of each step. Overall, Assistant 2's answer is excellent and fully addresses the question.", "score": [8.0, 10.0]}
+{"review_id": "PUYAhQNW28PdTAgbBvdTXT", "question_id": 66, "answer1_id": "R3aYUUtY46ht4ToU8US96x", "answer2_id": "7bsZbZ9E9jjwgGhnTiTyWS", "reviewer_id": 2, "metadata": {}, "text": "5 7\n\nAssistant 1 provided a general explanation of how to implement a queue using two stacks, but did not provide any code. The explanation was clear, but it would have been more helpful to include a code example.\n\nAssistant 2 provided a code example, but the implementation is not correct. The dequeue method should transfer elements from stack1 to stack2 when stack2 is empty, and then pop the top element from stack2. The enqueue method should simply push the item onto stack1. The front and rear variables are not necessary in this implementation. Here's a corrected version of the code:\n\n```python\nclass Queue:\n def __init__(self):\n self.stack1 = []\n self.stack2 = []\n\n def enqueue(self, item):\n self.stack1.append(item)\n\n def dequeue(self):\n if not self.stack2:\n while self.stack1:\n self.stack2.append(self.stack1.pop())\n return self.stack2.pop() if self.stack2 else None\n```\n\nAssistant 2's code structure and readability were good, but the implementation was not accurate. It would be helpful to include comments explaining the logic and functionality of the code.", "score": [5.0, 7.0]}
+{"review_id": "6J8PdKNzM5qHU2ejHPwVWt", "question_id": 67, "answer1_id": "MQJFbH3KoFaSMmPgmK9LPa", "answer2_id": "TMYj69PfzLFm6S8ahKbg5J", "reviewer_id": 2, "metadata": {}, "text": "4 6\n\nAssistant 1:\nStrengths:\n- The answer provides a general idea of how to approach the problem using nested loops.\n\nWeaknesses:\n- The answer does not provide any code implementation.\n- The explanation is not clear and lacks detail.\n- The \"found\" flag concept is not well-explained and may not be the most efficient way to solve the problem.\n\nSuggestions for improvement:\n- Provide a code implementation to better demonstrate the solution.\n- Explain the logic and steps in more detail.\n- Consider alternative methods for finding common elements, such as sorting the arrays first and using two pointers to iterate through them.\n\nAssistant 2:\nStrengths:\n- The answer provides a step-by-step explanation of the solution.\n- The use of pointers is a more efficient approach than nested loops.\n\nWeaknesses:\n- The answer does not provide any code implementation.\n- The explanation is somewhat confusing, especially regarding the use of flags and pointers.\n- The solution does not account for duplicate elements in the arrays.\n\nSuggestions for improvement:\n- Provide a code implementation to better demonstrate the solution.\n- Clarify the explanation of the solution, especially regarding the use of flags and pointers.\n- Consider how to handle duplicate elements in the arrays, such as by sorting the arrays first and skipping over duplicates during the comparison process.", "score": [4.0, 6.0]}
+{"review_id": "SoYXkRh4Am4VsMMERWYMV5", "question_id": 68, "answer1_id": "dBWqryuzG6BHRdPgrWWpbH", "answer2_id": "NDXxGeVFruYsJ3NZ7zjJ3n", "reviewer_id": 3, "metadata": {}, "text": "First, let's solve the problem independently:\n\nf(x) = 5x^3 - 2x + 3\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 4 + 3\nf(2) = 39\n\nNow let's evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe value of f(2) is 1.\n\nThis answer is incorrect.\n\nAssistant 2:\nf(x) = 5x^3 - 2x + 3\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 10 - 2 + 3\nf(2) = 10 - 5 + 3\nf(2) = 2\n\nThis answer is also incorrect. The problem-solving steps are not accurate, and the calculations are wrong.\n\nBased on the evaluation, the scores for Assistant 1 and Assistant 2 are as follows:\n\nAssistant 1: 1 (incorrect answer and no problem-solving steps provided)\nAssistant 2: 3 (incorrect answer, but attempted problem-solving steps)\n\nThe tuple containing the scores is (1, 3). Assistant 2 performed better than Assistant 1, but both assistants provided incorrect answers.", "score": [1, 3]}
+{"review_id": "jcpkrmo6wacxzRH4czSEx2", "question_id": 69, "answer1_id": "W74byrCbE7XiJWjcPTZS7x", "answer2_id": "g3NBgumbbZBxnCMCPj7kof", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\n3x + 10 = 5(x - 2)\n3x + 10 = 5x - 10\n10 = 2x - 10\n20 = 2x\nx = 10\n\nNow, I will examine the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nx = 6.\n\nAssistant 1 provided an incorrect answer without showing any steps.\n\nAssistant 2:\nAssistant 2's steps are confusing and incorrect. They made several mistakes in their calculations, and their final conclusion that the equation has no solution is incorrect.\n\nNow, I will provide the scores for Assistant 1 and Assistant 2:\n\nAssistant 1: 1/10 - The answer is incorrect, and there are no steps provided to evaluate their problem-solving process.\n\nAssistant 2: 2/10 - The answer is incorrect, and the steps provided are confusing and incorrect. However, they at least attempted to show their work, which is why they receive a slightly higher score than Assistant 1.\n\nIn conclusion, both assistants performed poorly, but Assistant 2 performed slightly better due to their attempt to show their work.", "score": [1, 2]}
+{"review_id": "RkCzNRqnhNVUWYzHpfC7zC", "question_id": 70, "answer1_id": "7WC5quGKdv8hADCxWaWZ57", "answer2_id": "jwp2XMxq8PTKTJiZCATHnG", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\nTo find the length of a line segment with endpoints (x1, y1) and (x2, y2), we can use the distance formula:\n\nLength = sqrt((x2 - x1)^2 + (y2 - y1)^2)\n\nIn this case, the endpoints are (2, -2) and (10, 4), so we have:\n\nLength = sqrt((10 - 2)^2 + (4 - (-2))^2)\nLength = sqrt(8^2 + 6^2)\nLength = sqrt(64 + 36)\nLength = sqrt(100)\nLength = 10 units\n\nNow, I will examine the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe length of the line segment is 8 units.\n\nAssistant 1's answer is incorrect because they did not use the distance formula to find the length of the line segment.\n\nAssistant 2:\nLengt h = 10 - 2 = 8 units.\n\nAssistant 2's answer is also incorrect because they did not use the distance formula to find the length of the line segment. Instead, they only subtracted the x-coordinates of the endpoints.\n\nScores:\nAssistant 1: 3/10\nAssistant 2: 3/10\n\nBoth assistants performed poorly because they did not use the correct formula to find the length of the line segment. The correct answer is 10 units, not 8 units.", "score": [3, 3]}
+{"review_id": "cn34W5Bv26UibQbT6psBmm", "question_id": 71, "answer1_id": "AZER7D3RKZ9F9SXHiMjdqM", "answer2_id": "B6CrSifSJDEHUYiBaiHELR", "reviewer_id": 1, "metadata": {}, "text": "9 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the user's question. They both offered templates for a formal email proposing a joint venture to a potential business partner. Assistant 1's answer was concise and to the point, while Assistant 2's answer was more detailed and provided more context, which is why Assistant 2 received a slightly higher score. Both templates are useful and can be easily adapted by the user for their specific needs.", "score": [9.0, 10.0]}
+{"review_id": "BFNp4EpwENRAEyR9wUZJA5", "question_id": 72, "answer1_id": "MSrdDafr77UvSHCnsPMSP3", "answer2_id": "932DTWDKnBbQDQYTW6be87", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth assistants provided helpful and relevant resignation letter templates. Assistant 1's answer was concise and accurate, but Assistant 2's answer provided a more detailed and comprehensive template, which included the sender's and employer's addresses, a more thorough expression of gratitude, and a mention of assisting in the transition process. This additional detail makes Assistant 2's response more useful for the user.", "score": [8.0, 10.0]}
+{"review_id": "KJH5GuwPrg6UzFqJh7YVGQ", "question_id": 73, "answer1_id": "hxkjUkDkXhGP78Vo74B4WE", "answer2_id": "jVEfncaU9LitqggtYBGuik", "reviewer_id": 1, "metadata": {}, "text": "9 10\nAssistant 1 provided a helpful and relevant response, outlining the general structure and content of a formal letter of recommendation. However, Assistant 2 went above and beyond by providing a detailed and well-structured example of a formal letter of recommendation, which is more useful for someone looking to write such a letter. Both assistants provided accurate information, but Assistant 2's response was more detailed and directly applicable to the user's question.", "score": [9.0, 10.0]}
+{"review_id": "dEvy77EWYPpwGkt5PkkZV4", "question_id": 74, "answer1_id": "hv9jpG9PxeJ9AnFdNzH3Jv", "answer2_id": "NQSftH48RoRxTKLCqNbPtt", "reviewer_id": 1, "metadata": {}, "text": "9 10\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both included essential elements of a product launch announcement email, such as expressing excitement, highlighting the benefits of the new software, and inviting customers to try a demo. Assistant 2's answer, however, was slightly more detailed, providing a list of key features and emphasizing the software's adaptability to different business sizes. This additional information makes Assistant 2's response more compelling and informative, earning it a slightly higher score.", "score": [9.0, 10.0]}
+{"review_id": "7ZJhu6mRoFp5Aoy9ChoMB9", "question_id": 75, "answer1_id": "X5BVHT5pFpWxB88SzERrEG", "answer2_id": "YeYDnz2kVYAfsz7vCfgLNS", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1's answer is brief and lacks some important details, such as addressing the customer by their name and providing a subject line for the email. However, it does apologize for the delay and assures the customer that the issue has been resolved. Assistant 2's answer is more comprehensive, providing a subject line, addressing the customer by their name, and giving a more detailed explanation of the issue and the steps taken to resolve it. The tone of Assistant 2's answer is also more empathetic and professional, making it a better overall response.", "score": [8.0, 10.0]}
+{"review_id": "cRyxMmtLbv5ieL9EFtqufh", "question_id": 76, "answer1_id": "Af2SufKwvDBriNXnZX9UoP", "answer2_id": "o2LgSDUwp4b4y4iCteh3RR", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a brief overview of the history and cultural significance of jazz, touching on its roots and impact on popular culture. However, Assistant 2 went above and beyond by providing a detailed and engaging script for a YouTube video, complete with visuals, voiceover, and a clear structure. Assistant 2's answer was more in line with the user's request for a script and provided a more comprehensive exploration of the topic.", "score": [8.0, 10.0]}
+{"review_id": "Qhh4bdvYP3XoSFtd8u4Dja", "question_id": 77, "answer1_id": "HVnqVdSGRwQfqSsiYxtTTn", "answer2_id": "CBvZQ8RJG6AqhUBSoUDbqU", "reviewer_id": 1, "metadata": {}, "text": "7 10\nAssistant 1 provided a brief overview of their travel blog post but did not actually include the content of the post, making it less helpful and engaging. Assistant 2, on the other hand, provided a detailed and engaging travel blog post, highlighting cultural experiences and must-see attractions in Hawaii. The response was well-structured, informative, and included personal experiences, making it more helpful and relevant to the user's request.", "score": [7.0, 10.0]}
+{"review_id": "VUCVmmcEGU8RRvoRb57RKf", "question_id": 78, "answer1_id": "8hcmmjzyMyhFJ8ZwqJJfvw", "answer2_id": "VfeFmV9H6SEuotYqEf5ri5", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth assistants provided captivating movie reviews for recently released science fiction films. Assistant 1's review of \"The Future is Blue\" was engaging and touched on the plot, characters, and special effects. However, it lacked specific details about the cast and could have provided more examples of the twists and turns in the plot. Assistant 2's review of \"The Expanse\" was slightly more detailed, mentioning the cast members and providing a more in-depth description of the plot and special effects. Both reviews were helpful and relevant, but Assistant 2's answer was more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "AXiC75TBwktCaPzgdZDzrm", "question_id": 79, "answer1_id": "CU6gGfJqQmwZNxEG2n2Uda", "answer2_id": "ZLYhNQxJXycBZL56RbdGh3", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a general outline of the podcast episode, touching on the main points that would be discussed. However, Assistant 2 went a step further by structuring the podcast script in a more detailed and engaging manner, including opening and closing music, interviews with artists and industry insiders, and a more conversational tone. Assistant 2's answer also demonstrated a better understanding of the podcast format, making it more helpful and relevant to the user's request.", "score": [8.0, 10.0]}
+{"review_id": "e9Pm5Kbsza5whP2TSv5amA", "question_id": 80, "answer1_id": "5SWbCoLV6NRrq5RtWQK4jp", "answer2_id": "kZw2ii8HQtrQp4d2bK5cHy", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth assistants provided well-written and detailed symphony concert reviews. Assistant 1's answer was engaging and captured the atmosphere of the concert, discussing the balance of classic and modern pieces, the conductor's passion, and the festive atmosphere. However, Assistant 2's answer was slightly more comprehensive, discussing the venue, acoustics, and the mix of audience members, in addition to the orchestra's performance and the conductor's guidance. Both answers were relevant and accurate, but Assistant 2's response provided a more complete picture of the overall concert experience, which is why it received a higher score.", "score": [8.0, 9.0]}
diff --git a/model/fastchat/eval/table/review/vicuna-7b_20230322-fp16/review_bard_vicuna-7b.jsonl b/model/fastchat/eval/table/review/vicuna-7b_20230322-fp16/review_bard_vicuna-7b.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..2dc7b5e1831a6337ab8b3fa09b9fc6e23063cb97
--- /dev/null
+++ b/model/fastchat/eval/table/review/vicuna-7b_20230322-fp16/review_bard_vicuna-7b.jsonl
@@ -0,0 +1,80 @@
+{"review_id": "cS9oZPKAdU6weyzXSeK2Gg", "question_id": 1, "answer1_id": "3oW4JY265ZPJGTYi2CgRYF", "answer2_id": "8haRLA69xGcHc5woRHTj2f", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about improving time management skills. Assistant 1's answer was slightly more detailed, providing a few more specific tips, such as automating tasks and getting organized, which were not mentioned by Assistant 2. Assistant 2's answer was also helpful and relevant, but it was slightly less detailed than Assistant 1's answer. Both answers were well-structured and easy to understand, but Assistant 1's answer provided a bit more value due to the additional tips.", "score": [9.0, 8.5]}
+{"review_id": "NQBSWL9nydxK2qM5W2ETyc", "question_id": 2, "answer1_id": "CtDuDqypbL958TQork7v54", "answer2_id": "j7DV4tZTXhmRFj9zL5TSwq", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about effective ways to deal with stress. They both covered similar points, such as exercise, relaxation techniques, healthy diet, sleep, and social support. Both answers were detailed and well-organized, making it easy for the reader to understand and apply the suggestions. The only minor difference is that Assistant 2 mentioned time management and avoiding substance abuse, while Assistant 1 mentioned talking to someone and taking a break. However, these differences do not significantly impact the overall quality of the answers, and both assistants deserve a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "hXEGtjtpBuFHqWE2mPdoLD", "question_id": 3, "answer1_id": "jqb5c6eBeyVTFo7SsqxqHA", "answer2_id": "fEq2fYvNcpaENuQFRMFdan", "reviewer_id": 1, "metadata": {}, "text": "8 7\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. However, Assistant 1's answer was more accurate and detailed. Assistant 1 correctly pointed out that Python is dynamically typed, while JavaScript is not statically typed but weakly typed. Assistant 1 also mentioned the use cases for both languages, which is helpful for someone trying to decide which language to learn. Assistant 2's answer had some inaccuracies, such as stating that Python is strongly typed, which is incorrect. Additionally, Assistant 2's answer was less detailed and did not provide as much information about the differences between the two languages.", "score": [8.0, 7.0]}
+{"review_id": "MDk7anRFBK4asRKYGnffjF", "question_id": 4, "answer1_id": "P5rC8W6gTuD4aY6HR5HQj9", "answer2_id": "VYKmdXnESAKV3FRutDUPWU", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer is slightly more detailed, with a clear structure and more specific tips, such as turning off your phone and closing your email to avoid distractions. Assistant 2's answer is also well-structured and informative, but it lacks some of the specific details that Assistant 1 provided. Both answers are useful, but Assistant 1's answer is slightly more comprehensive, which is why it receives a higher score.", "score": [9.0, 8.5]}
+{"review_id": "FNE4gsgmACavD73Vs6XpkK", "question_id": 5, "answer1_id": "3uaqwfbwxtyDdqB8UVN3jM", "answer2_id": "maL9a3rivWyWZk3UgwQTVR", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about the basics of quantum computing. Assistant 1's answer was slightly more detailed, as it provided a clearer explanation of the key concepts of superposition and entanglement, as well as a more structured breakdown of the basics of quantum computing. Assistant 2's answer was also informative and accurate, but it was slightly less detailed in explaining the key concepts and their implications for quantum computing. Overall, both answers were of high quality, but Assistant 1's answer was slightly more comprehensive and well-organized.", "score": [9.0, 8.5]}
+{"review_id": "TgUDEScDBQv9TppP4HiDzF", "question_id": 6, "answer1_id": "HTRJ5q97DrgzoSofxkgedz", "answer2_id": "aGRf8RjpUgneLvw4Uf93do", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed, discussing the benefits of both plant-based and animal-based protein sources, as well as providing examples of complete plant-based proteins. Assistant 2's answer also provided useful information, but it was less detailed in terms of the benefits of each protein source and did not mention complete plant-based proteins. Both answers touched on the environmental impact of plant-based protein sources, which is an important aspect to consider. Overall, both assistants performed well, but Assistant 1's answer was slightly more comprehensive.", "score": [9.0, 8.0]}
+{"review_id": "hXcVmymmm8FFuhGrGtNvbE", "question_id": 7, "answer1_id": "EhhyKNc3m8c9tnxm8uPfmt", "answer2_id": "oXtzronC4mdVKH9J59ofij", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed answers to the question about developing critical thinking skills. Assistant 1's answer included a list of tips and emphasized the importance of being open to new ideas, being curious, and practicing regularly. Assistant 2's answer provided a list of strategies, focusing on active listening, analyzing assumptions, and seeking diverse perspectives. Both answers complement each other and offer valuable advice for improving critical thinking skills.", "score": [9.0, 9.0]}
+{"review_id": "CESscXzRRacPL7nBALUvdb", "question_id": 8, "answer1_id": "JQXPknRQd24NUPKZxi6RNf", "answer2_id": "dE5c99j9hW9qDvjjPxUPzc", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was well-structured and covered a wide range of challenges faced by the education sector, including funding, class size, teacher shortages, student achievement, school safety, technology, and charter schools. Assistant 2's answer also covered a wide range of challenges, such as access and equity, teacher shortages and quality, technology and innovation, curriculum and standards, financing and funding, student engagement and motivation, teacher and student evaluation, and international comparisons and rankings. Both answers were detailed and informative. However, Assistant 2's answer was slightly more comprehensive, covering a broader range of challenges and providing a more global perspective. This is why Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "28nWYkTULhohcFMG2W9wYL", "question_id": 9, "answer1_id": "Lb3C2xQKdLCqFj4v3rmaof", "answer2_id": "oLRzkYUv8ooSJJLqfPnrxd", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was well-structured and focused on five primary factors influencing consumer behavior. However, Assistant 2's answer provided a more comprehensive list of factors, including technological, environmental, marketing, health, legal, and public opinion factors, which added more depth to the response. Therefore, Assistant 2 receives a slightly higher score due to the additional factors and level of detail provided.", "score": [8.0, 9.0]}
+{"review_id": "ZuwEWPHTWPAebyqr7kWxTt", "question_id": 10, "answer1_id": "DhuZJtL3jhnrsTBvDct9oV", "answer2_id": "hi7Gu2XPwcThie58TvvkK8", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed answers to the question about effective strategies for conflict resolution in the workplace. Assistant 1's answer was well-structured and provided a clear list of strategies, while Assistant 2's answer also provided a comprehensive list of strategies with some additional points, such as addressing power imbalances and providing training. Both answers complement each other and offer valuable information for resolving conflicts in the workplace.", "score": [9.0, 9.0]}
+{"review_id": "JBCJVYkfra5kPAydGvcWkc", "question_id": 11, "answer1_id": "mDSj4BKim2eANUnEjW7xBm", "answer2_id": "Xx5PB6u9sBagzxtB2YUKq8", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was well-structured and covered the environmental and health implications of using single-use plastic bottles, as well as the benefits of using reusable bottles. However, Assistant 2's answer was slightly more detailed, as it also included the economic and social implications of using single-use plastic bottles versus reusable bottles. This additional information made Assistant 2's answer more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "mfCquZDDeSkd3b4RC2iZmS", "question_id": 12, "answer1_id": "MnkceSK7WwyXqAhbuKVYX7", "answer2_id": "FfaUTMS95MuGQQRDefvVzj", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was well-structured and covered important factors such as accessibility, affordability, convenience, safety, and sustainability. Assistant 2's answer also covered important factors, including physical accessibility, communication accessibility, route and schedule accessibility, service animals and assistive devices, dissemination of information, training and education, and continuous improvement. Both answers provided a good level of detail. However, Assistant 1's answer was slightly more comprehensive, as it touched upon the integration with other transportation options and the importance of renewable energy sources, which were not mentioned by Assistant 2.", "score": [9.0, 8.5]}
+{"review_id": "bYezWcuirLAHyiLFg7jQ3C", "question_id": 13, "answer1_id": "EsyaBVpTN8BGbTSiFMnZUF", "answer2_id": "WgCpMqMPUb9TU8jCuiExg3", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed and organized, explaining the differences between fiscal and monetary policies and discussing the debate about which policy should be the primary tool to combat recessions. Assistant 2's answer provided a list of ways governments can use fiscal and monetary policies, but it was less organized and did not delve into the debate about the primary tool. Both answers were informative, but Assistant 1's answer was more comprehensive and well-structured, which is why it receives a higher score.", "score": [9.0, 8.0]}
+{"review_id": "dw3GWdEbfZZnstzFg93SXf", "question_id": 14, "answer1_id": "dX8M752A6tzqLg9KhwgG5p", "answer2_id": "ATkPcXKbAki2VCoopjq6c3", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was well-structured and provided a clear overview of the effects of language and cultural barriers on communication and relationships in multicultural societies. The answer also provided suggestions for overcoming these barriers. Assistant 2's answer was more detailed and provided specific examples of how language and cultural barriers can affect communication and relationships. The answer also touched on additional factors such as stereotypes, prejudice, discrimination, and power dynamics. While both answers were informative, Assistant 2's answer was slightly more comprehensive and detailed, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "g6ZYXqGHznd7wjnhmtPGac", "question_id": 15, "answer1_id": "dzwhq5XbaEBVpargyczz9B", "answer2_id": "TFh5bXFdG4fdK5hmq6qS6o", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed answers to the question. They both covered various aspects of how AI can be used to improve the quality and efficiency of healthcare delivery, such as diagnosing diseases, treating diseases, monitoring patients, and providing administrative support. Both answers also emphasized the potential benefits of AI in healthcare, such as better patient outcomes and freeing up time for healthcare professionals. The only minor difference is that Assistant 2 mentioned the importance of patient privacy and security, which is a relevant consideration when implementing AI in healthcare. Overall, both assistants performed exceptionally well in addressing the question.", "score": [9.0, 9.0]}
+{"review_id": "JUnEHEYEG97Bq9CgoGiDMF", "question_id": 16, "answer1_id": "8zqxUtHxgtoHBkbf2bkqNW", "answer2_id": "XDV7jFB36qKGzPXPcwvbQy", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed answers to the question. They both explained the process of gene editing using CRISPR-Cas9 technology, discussed potential applications, and addressed ethical implications. The answers were well-structured and covered the main aspects of the topic. It is difficult to differentiate the quality of the answers, as both assistants performed at a high level, providing comprehensive information on the subject.", "score": [9.0, 9.0]}
+{"review_id": "5LzRo8gXYJiyqmqPNxmMiL", "question_id": 17, "answer1_id": "WJc37t4n5PqmKKS3V4eMG2", "answer2_id": "6E3YAfxqckwL83dVo6ZRP4", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed answers to the question. They both explained how vaccines work to protect individuals and communities from infectious diseases and the concept of herd immunity. Assistant 1 provided specific examples of vaccine effectiveness, while Assistant 2 discussed the concept of herd immunity thresholds. Both answers complement each other and provide a comprehensive understanding of the topic.", "score": [9.0, 9.0]}
+{"review_id": "X7uev9PKsFmZLcSF5DKTXc", "question_id": 18, "answer1_id": "CvVLf8FgoHywJy8j8JJ4qL", "answer2_id": "FjSXpLx6FfHU8zN9mb8ucX", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was well-structured and covered the main points, including the influence of social media on news consumption, the role of algorithms, the spread of misinformation, and potential solutions. However, Assistant 2's answer was slightly more detailed and provided additional context, such as the positive aspects of social media in news consumption, the consequences of misinformation, and the measures taken by social media platforms to combat misinformation. This additional information made Assistant 2's answer more comprehensive and informative, resulting in a higher score.", "score": [8.0, 9.0]}
+{"review_id": "LhfFjF4Uk2jFHTByQL7vn9", "question_id": 19, "answer1_id": "P5rytR6vTJjxgWxRoxT3vX", "answer2_id": "HJczP2JdM4s6cxZyBWVyNZ", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed and organized, providing clear examples for each factor and offering specific suggestions for promoting healthier diets. Assistant 2's answer was also informative and relevant, but it was less detailed and did not provide as many specific examples or suggestions. Both answers were helpful and accurate, but Assistant 1's answer was more comprehensive and well-structured, which is why it receives a slightly higher score.", "score": [9.0, 8.0]}
+{"review_id": "5C6GFLbfPPZefKbiCN82hq", "question_id": 20, "answer1_id": "5biCd7QRZP6rquaz8eC9Vm", "answer2_id": "X2EFE34dc5vtFSTGr3n2Bg", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed answers to the question about the process of natural selection and its contribution to the evolution and adaptation of species. Both answers covered the key principles of natural selection, such as variation, reproduction, survival, and adaptation. They also provided clear examples to illustrate the concepts. The answers were well-structured and easy to understand, making it difficult to differentiate between the two in terms of quality. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "QCE2m44e9bSJxDyWhmWnSt", "question_id": 21, "answer1_id": "363RwB6kr8nV6qFNdjXZnS", "answer2_id": "iJjKWygtpHaLJirgK2PACK", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more detailed and relevant response, including a sample introduction and mentioning the importance of avoiding boastfulness and adhering to customs and etiquette. Assistant 2's response was also relevant and accurate, but it lacked the level of detail and the sample introduction that Assistant 1 provided. Both assistants addressed the importance of being respectful and mindful of appearance, but Assistant 1's response was more comprehensive overall.", "score": [8.0, 7.0]}
+{"review_id": "ENQReuehYhofBjrkow6z5c", "question_id": 22, "answer1_id": "gDnYxMu5Dd52xhMqQAJaZP", "answer2_id": "FRZfkefyPRGCSpLEexQoRH", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed, offering different approaches to motivate the crew, such as appealing to their sense of adventure, sharing stories of other pirates, and offering them a share of the treasure. Assistant 2's answer was also good, but it was a bit shorter and focused more on the excitement of finding the treasure and becoming famous pirates. Both answers were well-written and would be effective in motivating a pirate crew, but Assistant 1's answer provided a more comprehensive approach.", "score": [9.0, 8.5]}
+{"review_id": "4qk5ZSNAtYRAMKJaQEnYiz", "question_id": 23, "answer1_id": "kCV5RSrnmmTyv3HA5oU38P", "answer2_id": "HBah6W9KuR8eNpRQJUxVvd", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the user's question. They both used poetic language and imagery to express their love in a soliloquy, which is consistent with the style of Shakespearean characters. Both assistants also included a declaration of love, ensuring that the beloved would know how they feel. The level of detail in both responses is sufficient, as they both provided examples of soliloquies that effectively convey the depth of their emotions. It is difficult to determine a clear winner between the two, as both responses are of high quality and meet the user's request. Therefore, both Assistant 1 and Assistant 2 receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "cD23sCsJjah9n7Yeibfu6F", "question_id": 24, "answer1_id": "CTGLYNAWrWeuCYZAR9bD2r", "answer2_id": "3Rgw9vMLyMiwazfdjhWcgT", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and engaging answers to the question. Assistant 1's answer was more relatable, as it started with the superhero being a regular person and then gaining powers through an accident. This approach might be more appealing to a child. However, Assistant 2's answer was more detailed and provided a richer backstory, including the superhero's creation in a laboratory and the specific powers they gained. Both answers emphasized the importance of using powers for good and making a difference in the world, which is a valuable message for a child. Assistant 2's answer was slightly better due to the additional details and the unique origin story, which may capture a child's imagination more effectively.", "score": [8.0, 9.0]}
+{"review_id": "SNSmZMreC3afRBzX8qc5nH", "question_id": 25, "answer1_id": "Eh5qCENdftYf9JKrBspYna", "answer2_id": "b4oghpgzhWMdoryzQrSwuF", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. However, Assistant 2's answer was slightly better in terms of the level of detail and the organization of the response. Assistant 1's answer included a few technologies that are already in existence or in development, such as AI, robotics, and virtual reality, which made the answer less focused on the year 3000. Assistant 2's answer, on the other hand, provided a more comprehensive list of advancements and was better organized by numbering the points. Both answers were informative, but Assistant 2's response was more concise and better structured.", "score": [8.0, 9.0]}
+{"review_id": "NMsbDd3dC9zrkEVpDLvFEe", "question_id": 26, "answer1_id": "M6hCe8Z4JDpt75HZ5PsSWr", "answer2_id": "GzzDrjBAe3BnXWgWrATxJL", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate descriptions of a winning play in the final seconds of a championship game. Assistant 1 focused on a football scenario, while Assistant 2 described a basketball situation. Both answers were detailed and painted a vivid picture of the events. However, Assistant 2's response was slightly more engaging and captured the excitement and emotions of the moment better, which is essential for a sports commentator. Therefore, Assistant 2 receives a higher score.", "score": [8.0, 9.0]}
+{"review_id": "T8Ei8o3kbSq7foTsDL2pJR", "question_id": 27, "answer1_id": "QCDsmbALfut5758pyiKvc5", "answer2_id": "f7hUYhajUbXNs3gQrG9z3b", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 described a specific dish with its ingredients and preparation, while Assistant 2 focused on the fusion of flavors and the use of locally-sourced, seasonal ingredients. Assistant 2's answer was slightly more detailed in terms of the balance of flavors and the presentation of the dish, which is why it received a higher score. Both answers were well-written and addressed the question effectively, but Assistant 2's response provided a more comprehensive and sophisticated description of the signature dish.", "score": [8.0, 9.0]}
+{"review_id": "h4TCP8JzvzFkcWEuMEdjbF", "question_id": 28, "answer1_id": "NWUbhwZQCuXsuQimrjQRza", "answer2_id": "LpUrrJuQ4cA6LtNRmiTfRv", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and detailed answers to the question. Assistant 1's answer was more focused on the emotions and the view from the top, providing a vivid and engaging description of the experience. Assistant 2's answer started by clarifying that as an AI, it doesn't have emotions but proceeded to provide a detailed description of the view and the emotions a person might experience. Assistant 2 also mentioned the dangers and risks associated with the climb, which added more depth to the answer. Both answers were helpful and accurate, but Assistant 2's answer was slightly more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "3QjYACqbYLfhEyZqX8sFpa", "question_id": 29, "answer1_id": "VYwSjZrSLW9ZSvqryyjEaB", "answer2_id": "Yiup49xrP6jf9nsEuGHgdN", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed, with a more personal touch, describing a day in the life of a space colonist on Mars and listing specific challenges faced. Assistant 2's answer was also informative and accurate, but it was more focused on listing the challenges without providing a detailed description of daily life. Both answers were of high quality, but Assistant 1's response was slightly more engaging and comprehensive.", "score": [9.0, 8.0]}
+{"review_id": "6gvG3SpNofHvaKKVpGR8pf", "question_id": 30, "answer1_id": "FA7PXuUbEVGKHaWpxaimy8", "answer2_id": "7cWm5Kop6bLzwLgJjpAVrK", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and detailed answers to the user's question. Assistant 1's answer focused more on the relationships with other survivors and the skills they have learned from them, while Assistant 2's answer emphasized the importance of adaptability, resourcefulness, and the natural world as an ally. Assistant 2's answer was slightly more comprehensive, as it touched upon the character's own skills and knowledge, as well as the potential dangers and challenges posed by the new environment. This is why Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "hNUiGZQiZe8R6kQPbkvtuC", "question_id": 31, "answer1_id": "j5EV5cZNsn9DcF6WsvXRzS", "answer2_id": "YaUHhigGUvgv82Js3ktFgs", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed, offering more specific ways to determine if a restaurant is popular among locals or tourists, such as checking the restaurant's website or social media pages, looking for the restaurant's name in local publications or websites, and observing the restaurant's atmosphere. Assistant 2's answer was also helpful and relevant, but it provided fewer specific methods for determining the restaurant's popularity among locals or tourists. Both assistants explained why this information might be useful, with Assistant 1 focusing on finding authentic local cuisine or convenient tourist locations, and Assistant 2 emphasizing the importance of experiencing local culture and avoiding overpriced tourist-oriented restaurants. Overall, both answers were informative and helpful, but Assistant 1's answer was slightly more detailed and comprehensive.", "score": [9.0, 8.0]}
+{"review_id": "Wm8FyWUbSjUJgntbcBmThM", "question_id": 32, "answer1_id": "2eAYCYmwTkPa3ejQDv8LyB", "answer2_id": "LaHQYWhmXF7mnPSVFdhCeq", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both listed several subtle clues that suggest someone is pretending to understand a topic or conversation when they are actually confused or uninformed. Both answers provided a good level of detail, with Assistant 1 focusing more on behavioral clues and Assistant 2 focusing on language and conversation patterns. Both answers also emphasized the importance of approaching conversations with an open mind and clarifying any unclear points. Overall, both assistants performed well and deserve a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "hqNbwKZhTqrCM8VfG2M6g9", "question_id": 33, "answer1_id": "d562WYnhsvgJ8J6Ubitmvw", "answer2_id": "Br2uFCYmRUaQULwKzpffz9", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was more detailed and organized, covering a wider range of reasons and specific situations where using a paper map or asking for directions might be the best option. Assistant 2's answer was also good, but it did not cover as many reasons or situations as Assistant 1's answer. Both answers were well-written and informative, but Assistant 1's answer was slightly more comprehensive, which is why it received a higher score.", "score": [9.0, 8.0]}
+{"review_id": "hdP7fpPmXCa2h3cU74rX9c", "question_id": 34, "answer1_id": "hPMvV6zL2C4qTP4mRmhJwG", "answer2_id": "FCRqJu6DgRvCNq4Z2NneHf", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both covered similar points, such as paying attention to body language, active listening, and engagement in the conversation. Both answers also emphasized that there is no definitive way to determine a person's level of interest, but by observing their behavior, one can get a good sense of their engagement. The level of detail in both answers is sufficient to provide a clear understanding of the topic. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "fEcZ9HB3qyoE4MBJSkYtXF", "question_id": 35, "answer1_id": "npWNeKceGyqCYaRpY4w54g", "answer2_id": "Fy5Nw8LcWpdq2GokTbiwuq", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed answers to the question. They both listed multiple reasons why someone might prefer to shop at a small, locally-owned business instead of a large chain store, even if the prices are higher. The reasons provided by both assistants were well-explained and covered a wide range of factors, such as supporting the local economy, unique products, better service, and sustainability. Both answers were comprehensive and informative, making it difficult to differentiate between the two in terms of quality. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "ECix4qvPj6wmyHyseBbeBo", "question_id": 36, "answer1_id": "WVuaK9m8Sedcws27tNu7Ev", "answer2_id": "hKhcnEtSjzysU7sbtE3JeH", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more concise and focused on the key points, while Assistant 2's answer provided additional details and elaborated on some aspects. Both answers were useful, but Assistant 1's response was more precise and easier to follow, which is why it received a slightly higher score.", "score": [9.0, 8.5]}
+{"review_id": "8RhowAc4E3h2Zs2esaLDNN", "question_id": 37, "answer1_id": "HLtTf83Y5QRP4TxX6nw5TC", "answer2_id": "cAVZTw5QY8WUnJEd3rUu3p", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed, covering the release of endorphins, adrenaline, and the sense of control over fear as reasons for enjoying being scared. Assistant 2's answer also mentioned the thrill, excitement, and endorphins, but focused more on individual personality traits, past experiences, and coping mechanisms. Both answers acknowledged that the enjoyment of being scared is a personal preference and can be influenced by various factors. Assistant 1's answer was slightly more comprehensive, which is why it received a higher score.", "score": [9.0, 8.5]}
+{"review_id": "nG3oS8DnxwnRLFqhCkVqYv", "question_id": 38, "answer1_id": "Fmdtexq6QQNuoqZkZfDURY", "answer2_id": "9JxYGUzSa2rR68BTVuZJEA", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed, as it included examples for each point and also mentioned the importance of talking to people from the culture to gain more insights. Assistant 2's answer also provided relevant points and examples, but it was slightly less detailed and did not mention the importance of engaging with people from the culture. Overall, both answers were informative and useful, but Assistant 1's answer was slightly more comprehensive.", "score": [9.0, 8.5]}
+{"review_id": "aKa3rkrAqMC4d9sFQp2rmq", "question_id": 39, "answer1_id": "WxnC69jTMkyJvcqvMCgCwY", "answer2_id": "5b3qKj2jtzgcZX7SSipQBW", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both discussed the potential benefits and costs of space exploration and acknowledged the complexity of the issue. Assistant 1 provided a more detailed list of potential benefits and costs, while Assistant 2 focused more on the moral perspective and the importance of individual values and priorities. Assistant 2's answer was slightly more comprehensive, as it touched upon the political, economic, and cultural factors that may influence the decision, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "mjLcArrhCkRSnAyxG3FuAx", "question_id": 40, "answer1_id": "npZdTFPRqZfoqzt5YurYEL", "answer2_id": "hG6i3Q8F7bA7u73vo6t2XZ", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's response was slightly more detailed, discussing specific ways to mitigate the negative effects of automation on employment, such as investing in education and training, providing financial assistance, encouraging entrepreneurship, and investing in research and development. Assistant 2's answer was also relevant and accurate, but it provided fewer specific examples and solutions. Both answers emphasized the importance of balancing job creation and technological progress, but Assistant 1's response was more comprehensive and provided a clearer roadmap for achieving that balance.", "score": [8.0, 7.5]}
+{"review_id": "23C2uXKactKxbuHP5akf6p", "question_id": 41, "answer1_id": "iy9aa5sqFeNA2uPQZLpxEz", "answer2_id": "KaZUZ4NAnQ3SY6mq2QY3Kt", "reviewer_id": 1, "metadata": {}, "text": "7 5\nAssistant 1 provided a more detailed and structured response, explaining the reasoning behind the calculations and considering factors such as age and activity level. However, the final number provided (518,017,359 blinks) seems to be incorrect based on the calculations provided. Assistant 2's response was less structured and contained some inconsistencies in the calculations, leading to a less accurate and less helpful answer. Both assistants could have improved their answers by double-checking their calculations and providing clearer explanations.", "score": [7.0, 5.0]}
+{"review_id": "7yeLfxi9cHsTpFaFnjCzFb", "question_id": 42, "answer1_id": "XAALo4GKWE3QNb7wbkWVNk", "answer2_id": "Jwrdb34BnrswtwbeSd6xxP", "reviewer_id": 1, "metadata": {}, "text": "8 4\nAssistant 1 provided a more accurate and detailed response to the question, explaining the calculation of the number of atoms in a grain of salt step-by-step. The answer was relevant and precise, making it easier for the reader to understand the reasoning. Assistant 2, on the other hand, provided an answer that focused on the weight of a grain of salt rather than the number of atoms, which was not directly related to the question. The response was less relevant and less accurate, resulting in a lower score.", "score": [8.0, 4.0]}
+{"review_id": "QkKnECTGkoea37Bryh9add", "question_id": 43, "answer1_id": "XRWjLnJNyGNMfktTvWpbRV", "answer2_id": "Ubz2V2EhkWmNYPEErdX8gn", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a clear answer with an estimated number of 8.6 million lightning strikes per day, but the explanation was not as detailed as Assistant 2's. Assistant 2 gave a more comprehensive explanation, discussing the factors that affect the number of lightning strikes, the methods used to determine the number, and the importance of lightning in the Earth's ecosystem. Assistant 2's answer was more informative and provided a better understanding of the topic, which is why it receives a higher score.", "score": [7.0, 9.0]}
+{"review_id": "LSvAFkMHYaCmPmW4bSE4Ef", "question_id": 44, "answer1_id": "CHeyn9eR3u5eFq99UJ3pbB", "answer2_id": "CufihrnQT3GRic3Vr2rfU2", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a rough estimate of the number of balloons needed to lift the house, but the explanation lacked a clear step-by-step calculation and made some assumptions without proper justification. Assistant 2, on the other hand, provided a more detailed and step-by-step calculation, taking into account the volume of the house, buoyancy force, and the surface area of the balloons. Although both answers concluded that lifting a house with balloons is impractical, Assistant 2's response was more detailed, accurate, and helpful in understanding the reasoning behind the estimation.", "score": [7.0, 9.0]}
+{"review_id": "HXZXyMDsuwsCAHx8L6bQRC", "question_id": 45, "answer1_id": "kfoNRLCHFucfWcxFBPoJgP", "answer2_id": "fBBFkmgfGSiPtQtBdsVxpZ", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more specific and accurate number of text messages sent per minute (15,220,700), which was not provided by Assistant 2. Both assistants explained the reasons behind the high number of text messages sent, but Assistant 1's answer was more structured and easier to follow. Assistant 2's answer was still relevant and informative, but it lacked the precision of Assistant 1's response.", "score": [8.0, 7.0]}
+{"review_id": "67BTxZHGHUhMGMjxXUaQaV", "question_id": 46, "answer1_id": "A4sDEGXpWaNVA39uRsDNjB", "answer2_id": "eQmVTyJQmoBKrZ9J38EXBC", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 provided a detailed explanation of the factors that can affect the number of words spoken per day, such as occupation, culture, and personality. They also provided an estimate of 7,000 words per day for the average person. However, Assistant 1 did not consider the total number of people on Earth or the number of languages spoken, which would have made their answer more complete.\n\nAssistant 2, on the other hand, provided a more comprehensive answer by considering the number of languages spoken on Earth and the total population. They also provided a higher estimate of 16,000 words per day for the average person, which is closer to the commonly cited figure. Assistant 2 then used these estimates to calculate a rough total of 1.125 trillion words spoken daily on Earth. While they acknowledged that this number may not be entirely accurate, their answer was more detailed and provided a better overall estimate.\n\nBoth assistants provided useful information, but Assistant 2's answer was more complete and detailed, which is why they received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "dFFSdp8jCXHkRt7McwFq9b", "question_id": 47, "answer1_id": "hUAc6BCs5NuY7tS62PPXbT", "answer2_id": "4JYtxzTN9oUTkVrFy7CDe6", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided an interesting fact about the estimated number of snowflakes that fall each year, but the answer did not focus on a typical winter and did not explain the reasoning behind the estimation. The answer also discussed the formation and shape of snowflakes, which is informative but not directly related to the question. Assistant 2, on the other hand, provided a more detailed and relevant explanation of the factors that affect the number of snowflakes falling during a typical winter. Although Assistant 2 did not provide a specific number, the answer acknowledged the difficulty in estimating the number of snowflakes and provided a general range for the number of snowflakes falling per square meter per hour. This answer was more helpful, relevant, and accurate in addressing the question.", "score": [7.0, 9.0]}
+{"review_id": "FkuGsGTabMAWK334iiu2c3", "question_id": 48, "answer1_id": "TMtjmb5tDizQ9RETRxpt2s", "answer2_id": "M7KhQtwQrueKKwuemeJHUb", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more detailed and comprehensive answer, taking into account various factors such as the number of books indexed by Google Books, the average length of a book, and the possibility of lost or destroyed books. The answer also provided a range of estimates for the total number of pages. Assistant 2's answer focused more on the number of books published each year in the United States and the average length of a book, but did not consider other factors such as lost or destroyed books, self-published books, or books from other countries. While both answers acknowledged the impossibility of determining an exact number, Assistant 1's response was more thorough and informative.", "score": [8.0, 6.0]}
+{"review_id": "M5whJvhEhACGhJgGK8GrFU", "question_id": 49, "answer1_id": "AwQMPhhaJ32ByA3VjKF5Ph", "answer2_id": "AuuvYW4edaZy5wpL5QyppA", "reviewer_id": 1, "metadata": {}, "text": "6 4\nAssistant 1 provided a more detailed response, explaining the age of the Earth and the time it takes for the Earth to orbit the Sun. However, the answer is not accurate, as it states that the Earth has orbited the Sun 4.543 billion times since the beginning of life, which is actually the age of the Earth, not the time since the beginning of life. Assistant 2's answer is also not accurate, as it provides an estimate of 200,000 orbits, which is far too low. Additionally, Assistant 2's explanation is confusing and contains incorrect information about the Earth's distance from the Sun and the length of a solar orbit. Both answers could be improved in terms of accuracy and clarity.", "score": [6.0, 4.0]}
+{"review_id": "FwLF2GAZqkoMkKPdYTqbgF", "question_id": 50, "answer1_id": "UCqxXfMAnqJKbApsjrPFcq", "answer2_id": "TJpkA2j3kC9akcHtmVQKUF", "reviewer_id": 1, "metadata": {}, "text": "7 5\nAssistant 1 provided a more detailed and relevant answer, mentioning the number of songs registered with the US Copyright Office and the number of songs available on streaming services like Spotify and Apple Music. This approach gives a better understanding of the scale of recorded songs. However, the answer could still be more precise and provide more context about the history of recorded music. Assistant 2's answer was less detailed and less relevant, focusing on the number of songs in existence rather than the number of recorded songs. The answer also provided a lower estimate of the number of songs, which seems less accurate considering the information provided by Assistant 1.", "score": [7.0, 5.0]}
+{"review_id": "HWdeM7fxyFLuydw2TNobxi", "question_id": 51, "answer1_id": "YMRg5Xi9BBvvqqny2tqJZ3", "answer2_id": "9FJsZWNGMNuGKGF4YjQoRM", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more detailed and relevant answer, discussing the potential impact of the Internet on the Renaissance period in terms of intellectual and cultural growth, the arts, and the spread of education and knowledge. Assistant 2, on the other hand, focused more on the limitations of the technology during that time and speculated on how the Internet might have been used, but did not provide as much insight into the potential impact on society and culture. Both answers were accurate and relevant, but Assistant 1's response was more helpful and comprehensive.", "score": [8.0, 6.0]}
+{"review_id": "YSA2HvJYVTTgQw2EBhojuZ", "question_id": 52, "answer1_id": "fhr7iMPb9SVJ663mXKBuct", "answer2_id": "4vN4PKLUJkaYTEEVzYE3DN", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more concise and focused on the potential outcomes of the Aztecs repelling the Spanish, while also mentioning the Aztecs' vulnerability to disease and internal issues. Assistant 2's answer provided similar information but included more speculation on the potential impact of European ideas on Aztec society. Both answers acknowledged the speculative nature of the question and the profound impact of the Spanish conquest on the Aztec civilization. Assistant 1's answer was slightly more precise, which is why it receives a slightly higher score.", "score": [8.0, 7.5]}
+{"review_id": "E9eY5wnWC7o3p73YutJAYD", "question_id": 53, "answer1_id": "4rrmyZw9zhyJGqdrcazFvt", "answer2_id": "LjnTtmsFEMgFEkYMKE3VRV", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on the impact of the Black Death on the population, economy, and society, while Assistant 2 explored possible scenarios if the Black Death had not occurred, touching on economic growth, political changes, cultural flourishing, global trade, and environmental impact. Assistant 2's answer was more detailed and provided a broader range of potential outcomes, which is why it received a slightly higher score. Both answers acknowledged the uncertainty of predicting an alternate history.", "score": [8.0, 9.0]}
+{"review_id": "hEbaQDMqDfJBpmDSsV3dGB", "question_id": 54, "answer1_id": "Za3mY9xwxpZdPmGW48wtzu", "answer2_id": "9L9SgmcPjkFs3CwuiUJqm9", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth assistants provided relevant and helpful answers to the question. Assistant 1 explored the possibilities of Newton's potential contributions to biology and acknowledged the uncertainty of the outcome. Assistant 2 also acknowledged the uncertainty but went further in discussing the potential impact on various fields such as medicine, genetics, and ecology. Assistant 2's answer was more detailed and provided a broader perspective on the potential consequences of Newton focusing on biology, which is why it received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "3cmHJkUHy2FbRbH4gbsFo6", "question_id": 55, "answer1_id": "cbAaJS9ULjR4XYSHATujSG", "answer2_id": "f9pwVc7bbzajLXKsaGsXQ8", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more comprehensive answer, discussing the Beatles' impact on popular culture, music, fashion, and their message of peace and love. The answer also acknowledged the uncertainty of the hypothetical scenario. Assistant 2's answer, while still relevant, focused more on listing possibilities without going into much detail about the cultural and social impact of the Beatles. Both answers were accurate and relevant, but Assistant 1's response was more detailed and well-rounded.", "score": [8.0, 7.0]}
+{"review_id": "5mY25nyxfeo89P9npEpmo4", "question_id": 56, "answer1_id": "ZEgb9fvopGo7HF5wPeoeHs", "answer2_id": "UfZJcVaZLWkVsipvmDBrdd", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. Assistant 1's answer was slightly more detailed, discussing the importance of Turing's work at Bletchley Park and how it helped the Allies make better decisions about troop deployment and resource allocation. Assistant 2's answer, while still accurate and relevant, focused more on the potential challenges the Allies would have faced without the decryption capabilities provided by Turing's work. Both answers acknowledged the difficulty in predicting the exact outcome if Turing had not cracked the Enigma code, but Assistant 1's answer provided a more comprehensive explanation of the impact of Turing's work on the war.", "score": [8.0, 7.5]}
+{"review_id": "AyoFuvXLJn5LUyXgf3UvXZ", "question_id": 57, "answer1_id": "igMXoEiszFM65ZS2KUTvtm", "answer2_id": "K635PkbTDkJTEnDSG68bTh", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more detailed answer, discussing the impact on the global economy, Egypt's income, and Egyptian pride. Assistant 2 also provided a relevant answer, but it was less detailed and focused more on the impact on trade routes and the global economy. Both answers were accurate and relevant, but Assistant 1's response was more comprehensive and provided a better understanding of the potential consequences of the Suez Canal not being constructed.", "score": [8.0, 7.0]}
+{"review_id": "QFzWxhxXprdvcZMxgXG7fc", "question_id": 58, "answer1_id": "Up4h8RpgVVafBtUj4tiGPZ", "answer2_id": "aGEwGV2TQY6snjFdiLkyqZ", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was well-structured and provided a good overview of the potential outcomes if the Maya civilization had not collapsed. However, Assistant 2's answer was slightly more detailed, providing more context about the Maya civilization, including its geographical location and specific achievements. Additionally, Assistant 2 mentioned that the collapse of the Maya civilization is still an area of active research, which adds credibility to the answer. Overall, both assistants performed well, but Assistant 2's answer was slightly more informative and comprehensive.", "score": [8.0, 9.0]}
+{"review_id": "FkuHNhgFZdHA6YMsHLbFh2", "question_id": 59, "answer1_id": "CMg3ypKyykGUzQsDmX7i35", "answer2_id": "ksnQVXY9SY2NzMxXEmV3Pd", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question. Assistant 1's answer was slightly more detailed, discussing the potential for Chinese or Japanese exploration and the importance of the timing of Columbus's voyage. Assistant 2's answer, while also accurate and relevant, was slightly less detailed and focused more on the potential changes in global power structures and the religious landscape. Both answers acknowledged the impossibility of knowing for certain what would have happened if Columbus had not discovered the Americas, but Assistant 1's answer was slightly more comprehensive and detailed, earning it a higher score.", "score": [8.0, 7.5]}
+{"review_id": "GrnV2o4nbm7LgqDLRS8sdC", "question_id": 60, "answer1_id": "WhLiJXznzRBkzxDdnc5ndX", "answer2_id": "GZpcvXQyrKUHsgWpzS7Hpo", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer focused more on the potential impact of Van Gogh's success on his personal life and the appeal of his art, while Assistant 2's answer focused on the potential impact on his career and the art world. Assistant 2's answer was slightly more detailed, mentioning specific influences like Paul Gauguin and Henri Matisse, and discussing the modernist movement. Both answers acknowledged the impossibility of knowing for sure what would have happened, but Assistant 2's answer provided a more comprehensive view of Van Gogh's potential impact on the art world, which is why it received a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "ZvVLSUxunTPfXPrMdzyY8P", "question_id": 61, "answer1_id": "Vzmhr5RMndkR866p4pUef6", "answer2_id": "Czx3sfkw8C4md9YJrhs3gu", "reviewer_id": 2, "metadata": {}, "text": "4 8\n\nAssistant 1's submission did not provide any code or solution to the given problem, which is why the score is low. The response was not helpful in solving the problem.\n\nAssistant 2's submission provided a working C++ program that reads a text file line by line and counts the number of occurrences of a specific word in the file. The code is structured well, and the comments are clear and concise. However, there are a few issues with the code:\n\n1. The program does not ask the user for the word to search for. The `word` variable should be initialized with a user-provided value.\n2. The program reads the first line of the file before entering the loop, which means it will not process the first line. The `getline` function should be called only inside the loop.\n3. The program does not handle cases where the word appears multiple times in a single line or when the word is a substring of another word.\n\nTo improve the code, consider the following suggestions:\n\n1. Prompt the user to enter the word to search for and store it in the `word` variable.\n2. Remove the initial `getline` call before the loop and only use it inside the loop.\n3. Use a more robust method to count the occurrences of the word in each line, such as tokenizing the line and comparing each token to the word. This will handle cases where the word appears multiple times in a single line or when the word is a substring of another word.", "score": [4.0, 8.0]}
+{"review_id": "gTjrVcNdVJu5bkNCdGpjsi", "question_id": 62, "answer1_id": "jPrfhv9oaZGrkXxDUw5Vbt", "answer2_id": "G7QjnFhVnsMPMN9UES8mTV", "reviewer_id": 2, "metadata": {}, "text": "7 4\n\nAssistant 1's code is a correct implementation of the longest common subsequence problem using dynamic programming. The code is well-structured, easy to read, and contains helpful comments. The time and space complexity are also mentioned, which is a good practice. However, there is room for improvement in terms of comments, as they could be more descriptive and explain the logic behind the dynamic programming approach.\n\nAssistant 2's code has a few issues. First, the implementation is incorrect, as it does not properly update the `dp` table when the characters are not equal. The use of the `break` statement is incorrect in this context, as it should be updating the `dp` table with the maximum value between `dp[i-1][j]` and `dp[i][j-1]`. Additionally, the example provided is incorrect, as the output should be the longest common subsequence itself, not its length. The code also lacks comments, making it harder to understand the logic behind the implementation.\n\nTo improve Assistant 2's code, the following changes should be made:\n1. Replace the `break` statement with `dp[i][j] = max(dp[i-1][j], dp[i][j-1])`.\n2. Modify the function to return the actual longest common subsequence, not just its length.\n3. Add comments to explain the logic and functionality of the code.", "score": [7.0, 4.0]}
+{"review_id": "6hsRpA8hWWBp5iRKCRPqaB", "question_id": 63, "answer1_id": "KdZNrf6udTfCxGGcQxRbXk", "answer2_id": "RHqehPQpeXkN9d6NHkVGxU", "reviewer_id": 2, "metadata": {}, "text": "7 8\n\nAssistant 1:\nStrengths:\n- The code is simple and easy to understand.\n- The regular expression provided covers most of the basic email address formats.\n\nWeaknesses:\n- The regular expression contains unnecessary spaces, which may lead to incorrect validation results.\n- The code does not include comments explaining the regular expression components.\n\nSuggestions for improvement:\n- Remove the unnecessary spaces in the regular expression.\n- Add comments to explain the different parts of the regular expression.\n\nAssistant 2:\nStrengths:\n- The code is well-structured and easy to understand.\n- The regular expression provided is more accurate and efficient than Assistant 1's.\n- The code includes comments explaining the regular expression components.\n\nWeaknesses:\n- The code does not include a function to validate email addresses, which would make it more reusable.\n\nSuggestions for improvement:\n- Encapsulate the email validation logic in a function to make it more reusable.", "score": [7.0, 8.0]}
+{"review_id": "kFmf3aRBy9pdLWZ8PPRkJx", "question_id": 64, "answer1_id": "AFzqAfbYVL25DBqFA2cv4G", "answer2_id": "aqYsaowQtr5rYU6iDbRj5x", "reviewer_id": 2, "metadata": {}, "text": "8 7\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to understand.\n- The use of a table (dp) to store the Fibonacci numbers is an efficient approach.\n- The comments are helpful and explain the logic of the code.\n\nWeaknesses:\n- The code does not handle the case when n is 1 or 0, which may cause an IndexError.\n- The function name \"fib\" could be more descriptive.\n\nSuggestions for improvement:\n- Add a condition to handle the cases when n is 1 or 0.\n- Consider renaming the function to \"fibonacci\" for better readability.\n\nAssistant 2:\nStrengths:\n- The code is readable and easy to understand.\n- The use of a list (previous) to store the previous numbers is a good approach.\n\nWeaknesses:\n- The code does not handle the case when n is 0, which may cause an IndexError.\n- The function name \"fibonacci\" is good, but the variable names \"previous\" and \"fibonacci_list\" could be more descriptive.\n- The comments are not as helpful as they could be, and some are redundant.\n\nSuggestions for improvement:\n- Add a condition to handle the case when n is 0.\n- Consider renaming the variables \"previous\" and \"fibonacci_list\" to more descriptive names, such as \"fib_numbers\" and \"fib_sequence\".\n- Improve the comments to better explain the logic and functionality of the code.", "score": [8.0, 7.0]}
+{"review_id": "Ej26XkfXb9Jjv2BuRe2Kox", "question_id": 65, "answer1_id": "Lt56JzWfXBxbvxrLf4fn3t", "answer2_id": "fimg24Eg23yjtVd7pYfo8A", "reviewer_id": 2, "metadata": {}, "text": "8 9\n\nAssistant 1:\nStrengths:\n- The code is accurate and efficient.\n- The explanation of the algorithm is clear and concise.\n\nWeaknesses:\n- The function signature is not user-friendly, requiring the user to input low and high values.\n- The code lacks comments explaining its logic and functionality.\n\nSuggestions for improvement:\n- Modify the function signature to only require the array and target value as input.\n- Add comments to the code to explain the logic and functionality.\n\nAssistant 2:\nStrengths:\n- The code is accurate, efficient, and user-friendly.\n- The example usage is helpful for understanding how to use the function.\n\nWeaknesses:\n- The code lacks comments explaining its logic and functionality.\n\nSuggestions for improvement:\n- Add comments to the code to explain the logic and functionality.", "score": [8.0, 9.0]}
+{"review_id": "S3W4ChAjMwHQcQDirRSQad", "question_id": 66, "answer1_id": "bmaShWUyEqPE6kbHD6FjLk", "answer2_id": "7bsZbZ9E9jjwgGhnTiTyWS", "reviewer_id": 2, "metadata": {}, "text": "8 6\n\nAssistant 1:\nStrengths:\n- The code is clean and easy to understand.\n- The implementation is efficient and correctly implements the problem statement.\n- The use of `__len__` and `__contains__` methods is a nice touch, providing additional functionality.\n\nWeaknesses:\n- The `__iter__` method is not implemented correctly, as it only iterates over the `in_stack`, not taking into account the elements in the `out_stack`.\n- There are no comments explaining the code.\n\nSuggestions for improvement:\n- Fix the `__iter__` method to correctly iterate over both stacks.\n- Add comments to explain the logic and functionality of the code.\n\nAssistant 2:\nStrengths:\n- The code is relatively easy to understand.\n\nWeaknesses:\n- The implementation does not correctly solve the problem statement, as it does not use two stacks to simulate a queue.\n- The code is not efficient, as it uses the `pop(0)` method, which has a linear time complexity.\n- There are no comments explaining the code.\n\nSuggestions for improvement:\n- Use two stacks to simulate a queue, as described in the problem statement.\n- Replace the `pop(0)` method with a more efficient alternative, such as using `pop()` and `append()`.\n- Add comments to explain the logic and functionality of the code.", "score": [8.0, 6.0]}
+{"review_id": "6Bi7fdMBbdYmGQMC8MwbNQ", "question_id": 67, "answer1_id": "hiSVom5YqSrKZKor8CjHdU", "answer2_id": "TMYj69PfzLFm6S8ahKbg5J", "reviewer_id": 2, "metadata": {}, "text": "6 4\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to read.\n- The function has a clear input and output format.\n- The comments are helpful and explain the code's logic and functionality.\n\nWeaknesses:\n- The code uses a set, which is an extra data structure, contradicting the problem statement.\n- The algorithm's time complexity is O(n^2), which can be slow for large arrays.\n\nSuggestions for improvement:\n- Remove the use of the set and find a way to print the common elements directly without storing them in an extra data structure.\n- Consider sorting the arrays first and then using a more efficient algorithm to find the common elements.\n\nAssistant 2:\nStrengths:\n- The approach is explained in a step-by-step manner, which is easy to understand.\n\nWeaknesses:\n- The answer does not provide actual code, only a high-level description of the algorithm.\n- The algorithm's time complexity is not mentioned, making it difficult to evaluate its efficiency.\n\nSuggestions for improvement:\n- Provide actual code that implements the described algorithm.\n- Mention the time complexity of the algorithm and discuss its efficiency.\n- Include comments in the code to explain the logic and functionality.", "score": [6.0, 4.0]}
+{"review_id": "WNS6VMHprbnByRUazJvJXm", "question_id": 68, "answer1_id": "JRHeURg4TrVAgWNp5yqjFx", "answer2_id": "NDXxGeVFruYsJ3NZ7zjJ3n", "reviewer_id": 3, "metadata": {}, "text": "First, let's solve the problem independently:\n\nf(x) = 5x^3 - 2x + 3\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 4 + 3\nf(2) = 39\n\nNow, let's evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe problem-solving process is correct, and the final answer is 39.\n\nAssistant 2:\nThe problem-solving process is incorrect. The mistake is in the calculation of 5(2)^3, which should be 5(8) = 40, not 10. The correct answer should be 39, not 2.\n\nScores:\nAssistant 1: 10\nAssistant 2: 4\n\nAssistant 1 performed better in this case, as their problem-solving process was correct and led to the correct answer.", "score": [10, 4]}
+{"review_id": "WLMvbxBcXdRVegTrVpXVhG", "question_id": 69, "answer1_id": "hZwFKeRYBLKihBEWSEGMYJ", "answer2_id": "g3NBgumbbZBxnCMCPj7kof", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\n3x + 10 = 5(x - 2)\n\nDistribute the 5 on the right-hand side of the equation:\n\n3x + 10 = 5x - 10\n\nSubtract 3x from both sides:\n\n10 = 2x - 10\n\nAdd 10 to both sides:\n\n20 = 2x\n\nDivide both sides by 2:\n\nx = 10\n\nNow, I will evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe steps followed by Assistant 1 are correct until the step where they combine like terms. They made a mistake in that step:\n\n3x + 10 = 5x - 10\n-2x = -20 (not -10)\n\nDivide both sides by -2:\n\nx = 10\n\nThe correct solution is x = 10, not x = 5.\n\nAssistant 2:\nAssistant 2's answer is completely incorrect. They made a mistake in the first step by subtracting 5(x - 2) from both sides of the equation, which is unnecessary and leads to a wrong solution.\n\nScores:\nAssistant 1: 8\nAssistant 2: 2\n\nAssistant 1 performed better, but they made a mistake in the step where they combined like terms. Assistant 2's answer is completely incorrect.", "score": [8, 2]}
+{"review_id": "W9cUFLLoUFeDrN5XcAAF64", "question_id": 70, "answer1_id": "iG4SJq9xCKCDDtiqU3szJZ", "answer2_id": "jwp2XMxq8PTKTJiZCATHnG", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\nThe length of a line segment is the distance between its endpoints. The distance formula is:\n\n$$d = \\sqrt{(x_2 - x_1)^2 + (y_2 - y_1)^2}$$\n\nwhere $(x_1, y_1)$ and $(x_2, y_2)$ are the coordinates of the endpoints. In this case, $(x_1, y_1) = (2, -2)$ and $(x_2, y_2) = (10, 4)$. Substituting these values into the distance formula, we get:\n\n$$d = \\sqrt{(10 - 2)^2 + (4 - (-2))^2} = \\sqrt{8^2 + 6^2} = \\sqrt{100} = 10$$\n\nTherefore, the length of the line segment is 10 units.\n\nNow, I will evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe problem-solving process is correct, and the final answer is also correct. Assistant 1 used the distance formula properly and arrived at the correct answer of 10 units.\n\nAssistant 2:\nThe problem-solving process is incorrect. Assistant 2 subtracted only the x-coordinates of the endpoints, which is not the correct way to find the length of a line segment. The correct method is to use the distance formula, as shown in Assistant 1's answer. The final answer provided by Assistant 2 is incorrect.\n\nScores:\nAssistant 1: 10\nAssistant 2: 2\n\nAssistant 1 performed better in this case, as they used the correct method and arrived at the correct answer.", "score": [10, 2]}
+{"review_id": "ULCPz5GaBthLgaNxkXqtSp", "question_id": 71, "answer1_id": "CjhV8cJ6UEp7Y4f3eWB6E5", "answer2_id": "B6CrSifSJDEHUYiBaiHELR", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. They each provided a well-written formal email template that the user can use to propose a joint venture to a potential business partner. Both responses included important details such as the benefits of the joint venture, the strengths of each company, and an invitation to discuss the proposal further. The quality of both responses is very similar, and both would be useful to the user, which is why they both receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "aCjwKFouTp3kTbB7PjfJB5", "question_id": 72, "answer1_id": "5waLH9pZsr8jVTpy67QH58", "answer2_id": "932DTWDKnBbQDQYTW6be87", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate resignation letter templates that express gratitude and maintain a positive tone. Both templates are detailed and can be easily customized to fit the user's specific circumstances. The quality of both answers is very similar, and both assistants have demonstrated a high level of competence in addressing the user's request.", "score": [9.0, 9.0]}
+{"review_id": "X8ZCJE3UBFYcqzosaxouqy", "question_id": 73, "answer1_id": "ZV4sUNujQGS2w7Wg4jbXbf", "answer2_id": "jVEfncaU9LitqggtYBGuik", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the question. Assistant 1's answer was concise and well-structured, covering the necessary elements of a letter of recommendation. However, Assistant 2's answer provided a more detailed and comprehensive response, including the sender's and recipient's addresses, email addresses, and dates, which are essential components of a formal letter. Additionally, Assistant 2's answer included more specific examples of the student's skills and qualities, making it a stronger recommendation letter.", "score": [8.0, 9.0]}
+{"review_id": "c2uR8GstuonvMWQcgLJmRZ", "question_id": 74, "answer1_id": "X94xm9m7tayWsKFNACqAG4", "answer2_id": "NQSftH48RoRxTKLCqNbPtt", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. Assistant 1's response was concise and to the point, with a clear structure and a call to action for a free trial. Assistant 2's response was more detailed, providing a list of key features and emphasizing the benefits of the software solution. Assistant 2's response also had a more engaging tone, which could potentially be more compelling to the customers. While both responses were of high quality, Assistant 2's answer was slightly better due to the additional details and engaging tone.", "score": [8.0, 9.0]}
+{"review_id": "NHGF2S97Ts2pkmQADC5Hkz", "question_id": 75, "answer1_id": "EFL7fU6gnicQY6DRpHvbTK", "answer2_id": "YeYDnz2kVYAfsz7vCfgLNS", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. Assistant 1's response was concise and covered the necessary points, including an apology, explanation of the issue, and reassurance that the issue has been resolved. Assistant 2's response was more detailed and included a subject line, which is helpful for an email. Additionally, Assistant 2's response emphasized the company's commitment to resolving the issue and preventing future delays. Both responses were well-written, but Assistant 2's answer was slightly more comprehensive and empathetic, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "YW2GwsUYGYTa4a9GhaqLef", "question_id": 76, "answer1_id": "XJLFjCr6vgQZjEXbaMjTa6", "answer2_id": "o2LgSDUwp4b4y4iCteh3RR", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided relevant, accurate, and detailed responses to the user's request for a script about the history and cultural significance of jazz. Assistant 1's answer was more structured and provided a clear outline of the topics covered, including the history, roots, and cultural significance of jazz. Assistant 2's answer was more focused on the visual and storytelling aspects of a YouTube video, which is also helpful, but slightly less comprehensive in terms of content. Both answers were well-written and informative, but Assistant 1's response was slightly more helpful and detailed, earning it a higher score.", "score": [9.0, 8.5]}
+{"review_id": "2kjsk9BGzFXu2J4nxosChf", "question_id": 77, "answer1_id": "bz3rS9MRs9QmaLGg4A8a7B", "answer2_id": "CBvZQ8RJG6AqhUBSoUDbqU", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate information about a trip to Hawaii, highlighting cultural experiences and must-see attractions. Assistant 1's answer was well-structured and provided a clear list of cultural experiences and attractions. However, Assistant 2's answer was more engaging and had a personal touch, making it feel more like a travel blog post. Assistant 2 also mentioned specific locations like the Polynesian Cultural Center and Pearl Harbor Memorial, which added more depth to the response. Therefore, Assistant 2 receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "FFcF8oStUCDydKjJPYcq3K", "question_id": 78, "answer1_id": "kCS2ZqzKTdDpaXwu3TiFXP", "answer2_id": "VfeFmV9H6SEuotYqEf5ri5", "reviewer_id": 1, "metadata": {}, "text": "9 7\nAssistant 1 provided a more detailed and captivating review, discussing the plot, characters, and special effects with specific examples. The review also mentioned the film's score, which added depth to the response. Assistant 2's answer was less detailed and did not provide specific examples of the special effects or character interactions. While both answers were relevant and accurate, Assistant 1's response was more helpful and comprehensive.", "score": [9.0, 7.0]}
+{"review_id": "gdwG43MAjkq3PBrt9k69GK", "question_id": 79, "answer1_id": "7WWWS5UhWcVzkhTypqcFU4", "answer2_id": "ZLYhNQxJXycBZL56RbdGh3", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and detailed podcast scripts discussing the influence of streaming platforms on the music industry. Assistant 1's script was structured with an intro, body, and conclusion, while Assistant 2's script included interviews with artists and industry insiders, providing different perspectives. Both scripts were well-organized and covered the topic effectively, making it difficult to choose one over the other.", "score": [9.0, 9.0]}
+{"review_id": "QzoYQ4DUQg2BJksx5Lb6qv", "question_id": 80, "answer1_id": "QcSFqbgqLWqn96V6BvWT7B", "answer2_id": "kZw2ii8HQtrQp4d2bK5cHy", "reviewer_id": 1, "metadata": {}, "text": "9 7.5\nAssistant 1 provided a more detailed and engaging review of a specific concert, mentioning the Boston Symphony Orchestra, the pieces performed, and the audience's reaction. The answer was well-structured and gave a clear picture of the concert experience. Assistant 2, although providing a general example of a symphony concert review, lacked the specificity and detail that Assistant 1 provided. The response was still relevant and well-written, but it didn't create the same vivid picture of the concert experience as Assistant 1's answer.", "score": [9.0, 7.5]}
diff --git a/model/fastchat/eval/table/review/vicuna-7b_20230322-fp16/review_gpt35_vicuna-7b.jsonl b/model/fastchat/eval/table/review/vicuna-7b_20230322-fp16/review_gpt35_vicuna-7b.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..448302b1e3b307b73c536269175c9916e21c7c4a
--- /dev/null
+++ b/model/fastchat/eval/table/review/vicuna-7b_20230322-fp16/review_gpt35_vicuna-7b.jsonl
@@ -0,0 +1,80 @@
+{"review_id": "Wnwh2kroNyrsLaetRKrYYv", "question_id": 1, "answer1_id": "BZGowHM7L3RvtWRktKZjLT", "answer2_id": "8haRLA69xGcHc5woRHTj2f", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about improving time management skills. Assistant 1's answer is slightly more detailed and organized, with a clear list of tips and a concise explanation for each. Assistant 2's answer also provides valuable tips, but the structure is less clear, and some points are repetitive (e.g., setting goals and creating a schedule). Both answers cover essential aspects of time management, but Assistant 1's response is more polished and easier to follow, which is why it receives a slightly higher score.", "score": [9.0, 8.5]}
+{"review_id": "TwRUcTSCYNVToWQmEVUgDj", "question_id": 2, "answer1_id": "GsrPLfiNEcRDYAC5aUoY23", "answer2_id": "j7DV4tZTXhmRFj9zL5TSwq", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about effective ways to deal with stress. They both covered similar points, such as exercise, relaxation techniques, sleep, healthy diet, social support, and time management. Assistant 1 mentioned seeking professional help, while Assistant 2 discussed avoiding substance abuse and taking breaks. Both answers were detailed and well-organized, making it easy for the reader to understand and apply the suggestions. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "GEuJdPYbEnuRSEHVYWbr24", "question_id": 3, "answer1_id": "5SGfexmPoJTWoVbPMB87KB", "answer2_id": "fEq2fYvNcpaENuQFRMFdan", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more concise and organized, making it easier to understand the main differences between Python and JavaScript. Assistant 2's answer covered similar points but was less structured and included some inaccuracies, such as stating that Python is a strongly typed language when it is actually dynamically typed. Overall, both answers were informative, but Assistant 1's response was more precise and clear.", "score": [8.0, 7.5]}
+{"review_id": "7CXpXu84qL52stqdQCAftN", "question_id": 4, "answer1_id": "RcqQg364kgCkYPshqvmGSp", "answer2_id": "VYKmdXnESAKV3FRutDUPWU", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about increasing productivity while working from home. Both answers included practical tips and covered essential aspects such as setting a schedule, creating a dedicated workspace, taking breaks, and minimizing distractions. The level of detail in both answers is sufficient to guide someone looking to improve their productivity. Assistant 2's answer included a few additional tips, such as using noise-cancelling headphones and staying physically active, which slightly enhanced the response. However, both answers are of high quality and deserve a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "C2EGwWuLN85atUPpLF25Fx", "question_id": 5, "answer1_id": "3R9jAndzLHQqJwmMi5mNox", "answer2_id": "maL9a3rivWyWZk3UgwQTVR", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about the basics of quantum computing. They both explained the concept of qubits and their ability to exist in multiple states simultaneously, as well as the potential applications and advantages of quantum computing. Assistant 1 mentioned different technologies used to build quantum computers, while Assistant 2 discussed the principles of superposition and entanglement in more detail. Both answers were well-rounded and informative, so they both receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "8AdgnvPaGweGPULWN38Zj3", "question_id": 6, "answer1_id": "Uw8SaYLPMGR5sdV9FDx8pb", "answer2_id": "aGRf8RjpUgneLvw4Uf93do", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was more concise and well-structured, making it easier to understand the main differences between plant-based and animal-based protein sources. Assistant 2's answer was also informative but slightly repetitive, which made it less concise. Both assistants covered the main differences in terms of nutrient composition, digestibility, and environmental impact. However, Assistant 1 mentioned the higher protein needs of certain individuals, which added a bit more depth to the answer.", "score": [9.0, 8.5]}
+{"review_id": "i7kXT538M8Shr228ufbWyH", "question_id": 7, "answer1_id": "53gmokt2KBgKu6NMPopxcu", "answer2_id": "oXtzronC4mdVKH9J59ofij", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about developing critical thinking skills. Both answers included practical tips and strategies that can be applied to improve critical thinking. The level of detail in both responses was sufficient, and they covered similar points, such as asking questions, analyzing assumptions, considering different perspectives, and reflecting on one's own thinking. Both answers were well-structured and easy to understand. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "2RHzwZ5XPfEqdZsXxXzr8C", "question_id": 8, "answer1_id": "bKrbrGsN7zjKLvBk2h3tvo", "answer2_id": "dE5c99j9hW9qDvjjPxUPzc", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about the major challenges faced by the education sector today. Assistant 1's answer was well-organized and covered seven key challenges, while Assistant 2's answer expanded on these points and included an additional challenge (international comparisons and rankings), making it slightly more detailed. Both answers were clear and concise, but Assistant 2's answer provided a more comprehensive overview of the challenges, which is why it receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "MCYEFBfC6bnQCcepaeKZs3", "question_id": 9, "answer1_id": "HEGL3aPUnNrdNtNt3XLDKi", "answer2_id": "oLRzkYUv8ooSJJLqfPnrxd", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was well-organized and covered the main factors influencing consumer behavior, including personal, psychological, social, situational, and marketing mix factors. However, Assistant 2's answer was more comprehensive, covering a wider range of factors such as economic, technological, environmental, health, legal, and public opinion factors. Assistant 2's answer also provided a slightly higher level of detail in some areas. While both answers were informative, Assistant 2's answer was more complete and detailed, earning a higher score.", "score": [8.0, 9.0]}
+{"review_id": "LPvhvHAFbYdmENeSPnr4QE", "question_id": 10, "answer1_id": "W9zpMVa2cJUJW8B2uGMCJy", "answer2_id": "hi7Gu2XPwcThie58TvvkK8", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about effective strategies for conflict resolution in the workplace. Assistant 1's answer was well-organized and concise, covering seven key strategies. Assistant 2's answer was more detailed and provided a broader range of strategies, totaling ten. While both answers were helpful, Assistant 2's answer was slightly more comprehensive and provided additional strategies, such as addressing power imbalances, fostering a positive workplace culture, and providing training, which contributed to a higher score.", "score": [8.0, 9.0]}
+{"review_id": "TwQpcn49MnEgLZ2ByxVNWv", "question_id": 11, "answer1_id": "LacdmoweqKYGzt3aMBYjEa", "answer2_id": "Xx5PB6u9sBagzxtB2YUKq8", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed and organized, with a clear distinction between the implications of single-use plastic bottles and reusable bottles. Assistant 1 also mentioned the impact on workers involved in the production and disposal of single-use plastic bottles, which added to the comprehensiveness of the response. Assistant 2's answer was also informative and covered the main points, but it was not as well-structured as Assistant 1's response. Both assistants provided valuable information, but Assistant 1's answer was slightly more comprehensive and well-organized.", "score": [9.0, 8.5]}
+{"review_id": "JyyvStDfsG6n8LoRcMxVwx", "question_id": 12, "answer1_id": "JqVreebbPuNdjw8E8K4Ssf", "answer2_id": "FfaUTMS95MuGQQRDefvVzj", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both covered important factors to consider when designing an inclusive and accessible public transportation system, such as physical accessibility, communication accessibility, and employee training. Assistant 1 mentioned sensory inclusivity and universal design, while Assistant 2 discussed route and schedule accessibility, service animals, dissemination of information, and continuous improvement. Both answers are detailed and informative, and they complement each other well. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "jwcwr97UYxkXek3kY4TMAp", "question_id": 13, "answer1_id": "hEMThhsN85Ud5X8xBv9BZJ", "answer2_id": "WgCpMqMPUb9TU8jCuiExg3", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed and organized, with clear distinctions between fiscal and monetary policies and specific examples of how they can be used to combat economic recessions. Assistant 2's answer also covered the main points, but it was not as well-structured and included some redundant information. Overall, both answers were informative and useful, but Assistant 1's answer was slightly more comprehensive and well-organized.", "score": [9.0, 8.5]}
+{"review_id": "LdFFTcaF3ZMNNPm5wAy4YT", "question_id": 14, "answer1_id": "BvFV7sx53PAK5bNn89urFs", "answer2_id": "ATkPcXKbAki2VCoopjq6c3", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer is well-structured and covers the main points regarding language and cultural barriers, as well as mentioning potential solutions. However, Assistant 2's answer goes into greater detail by providing specific examples of how these barriers can affect communication and relationships, such as stereotypes, prejudice, discrimination, and power dynamics. This additional information makes Assistant 2's answer slightly more comprehensive and informative, resulting in a higher score.", "score": [8.0, 9.0]}
+{"review_id": "JzwGgvuC4pcFm8PTaNL9az", "question_id": 15, "answer1_id": "dM5GHbLuPNfzUbBnJz6w7K", "answer2_id": "TFh5bXFdG4fdK5hmq6qS6o", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on a specific scenario involving AI-powered chatbots for patient triage and routine tasks, which was well-explained and detailed. Assistant 2 discussed two scenarios: disease diagnosis and treatment planning, and predictive analytics. Both scenarios were relevant and accurate, but Assistant 2's answer could have benefited from more specific examples or details. Assistant 1 receives a 9 for the focused and detailed response, while Assistant 2 receives an 8.5 for providing multiple scenarios but with slightly less detail.", "score": [9.0, 8.5]}
+{"review_id": "3oT5Lv3vEc55iYMSY6YaVD", "question_id": 16, "answer1_id": "BX7maaP5kGY6bBTLJRwkit", "answer2_id": "XDV7jFB36qKGzPXPcwvbQy", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more comprehensive, as it discussed the potential applications of CRISPR-Cas9 technology in more detail and provided a clearer explanation of the ethical concerns. Assistant 2's answer also covered the main points, but it was less detailed in discussing the potential applications and ethical implications. Both answers were well-structured and informative, but Assistant 1's answer was more complete and well-rounded, earning it a higher score.", "score": [9.0, 8.0]}
+{"review_id": "REncgcRuCE8bWcvfYe2XXZ", "question_id": 17, "answer1_id": "STuX8oc7Gu3SN6EWzwpUpp", "answer2_id": "6E3YAfxqckwL83dVo6ZRP4", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both explained how vaccinations work by introducing a weakened or deactivated form of a virus or bacteria into the body, which triggers the immune system to create a defense against it. They also both explained the concept of herd immunity and its importance in protecting individuals and communities from infectious diseases. The level of detail in both answers is sufficient to provide a clear understanding of the topic. The only minor difference is that Assistant 2 mentioned that it takes several weeks for the immune system to develop sufficient antibodies, which adds a bit more information to the answer. However, this difference is not significant enough to affect the overall scores, and both assistants deserve a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "TEKQwEBx83FPgZ8GhFdNxp", "question_id": 18, "answer1_id": "TFUUXWS7yn2u2b4n7eM3ZB", "answer2_id": "FjSXpLx6FfHU8zN9mb8ucX", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was well-structured and concise, discussing the democratization of information and the implications of misinformation. However, Assistant 2's answer provided a more detailed analysis of the positive and negative aspects of social media platforms, including the role of algorithms in promoting sensational content and the measures taken by platforms to combat misinformation. This additional detail and context make Assistant 2's answer slightly more informative and comprehensive, resulting in a higher score.", "score": [8.0, 9.0]}
+{"review_id": "34uMyH5hTLRbvGUJLB2V9N", "question_id": 19, "answer1_id": "3yRq2XXPi83H7Rr5SZS9rE", "answer2_id": "HJczP2JdM4s6cxZyBWVyNZ", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed and offered specific examples and strategies for promoting healthier diets, such as raising awareness of plant-based diets, imposing taxes on unhealthy foods, and increasing funding for community gardens and farmers' markets. Assistant 2's answer was also informative and touched on similar points, but it was less detailed and provided fewer specific examples. Both answers were well-structured and addressed the cultural, social, and economic factors influencing food choices.", "score": [9.0, 8.0]}
+{"review_id": "DLFSXCJPCNaWmyqGNb95sE", "question_id": 20, "answer1_id": "Sw34dAwQPCfGGotwRwhvtv", "answer2_id": "X2EFE34dc5vtFSTGr3n2Bg", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed answers to the question. They both explained the process of natural selection and its contribution to the evolution and adaptation of species. Assistant 1's answer was concise and clear, while Assistant 2's answer provided a step-by-step explanation of the process. Both answers complement each other and provide a comprehensive understanding of the topic.", "score": [9.0, 9.0]}
+{"review_id": "2jKBkX4TXMrxM3bBUcB9fV", "question_id": 21, "answer1_id": "cZw4Jw8Zyz6ZUy4WDsC6ta", "answer2_id": "iJjKWygtpHaLJirgK2PACK", "reviewer_id": 1, "metadata": {}, "text": "9 7\nAssistant 1 provided a more detailed and immersive response, giving a clear example of how a medieval knight would introduce themselves at a royal banquet. The answer was relevant, accurate, and engaging. Assistant 2's response was also relevant and accurate, but it lacked the level of detail and immersion that Assistant 1's response provided. Additionally, Assistant 2's answer focused more on the actions of the knight rather than the actual introduction, which was the main focus of the question.", "score": [9.0, 7.0]}
+{"review_id": "QzBv2zkBLPxAjsj4BCCod5", "question_id": 22, "answer1_id": "nj9y2HTWFGsD5B278ozm73", "answer2_id": "FRZfkefyPRGCSpLEexQoRH", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided relevant and motivating answers to the question. Assistant 1's answer was slightly more detailed and focused on the roles each crew member would play, while Assistant 2's answer emphasized the crew's past experiences and the potential for fame. Both answers were accurate and appropriate for a pirate captain's motivational speech. Assistant 1's answer receives a 9 due to its slightly higher level of detail, while Assistant 2's answer receives an 8.5 for its strong motivational content.", "score": [9.0, 8.5]}
+{"review_id": "9jADJCE55kXaVW6XCJo5Uk", "question_id": 23, "answer1_id": "Ep9rLcNzEGhG7AgbLTpRtm", "answer2_id": "HBah6W9KuR8eNpRQJUxVvd", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided relevant, accurate, and detailed responses to the user's question. They both composed soliloquies in the style of Shakespearean characters declaring their love. Assistant 1's soliloquy was slightly more poetic and used more Shakespearean language, which is why it received a higher score. Assistant 2's soliloquy was also well-written and in the style of Shakespeare, but it was not as rich in imagery and poetic language as Assistant 1's response. Both responses were helpful and appropriate for the user's question.", "score": [9.0, 8.5]}
+{"review_id": "VkJnm8PK8FzgY2BkCnw2mB", "question_id": 24, "answer1_id": "oNULT72cYUvit7D9SHb5aM", "answer2_id": "3Rgw9vMLyMiwazfdjhWcgT", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was more focused on explaining the origin story in a simple and engaging way for a child, emphasizing the importance of courage and helping others. Assistant 2's answer provided more specific details about the superhero's powers and origin, but it was slightly less focused on addressing the child's curiosity. Both answers were well-written and informative, but Assistant 1's response was slightly more tailored to the intended audience.", "score": [9.0, 8.5]}
+{"review_id": "gz3uDzMZrbEmmaPAX5iSEv", "question_id": 25, "answer1_id": "TX86xjPKTk2UxWwV4e8zRK", "answer2_id": "b4oghpgzhWMdoryzQrSwuF", "reviewer_id": 1, "metadata": {}, "text": "8 7\nBoth Assistant 1 and Assistant 2 provided relevant and helpful information about technological advancements in the year 3000. Assistant 1's answer was more concise and focused on five major advancements, while Assistant 2's answer covered a broader range of ten advancements. Assistant 1's response was more detailed in explaining the impact of each technology on society, whereas Assistant 2's response provided a brief overview of each technology. Both answers were accurate and relevant to the question, but Assistant 1's answer was slightly more detailed and organized, which is why it receives a higher score.", "score": [8.0, 7.0]}
+{"review_id": "SpzX5YCaiwaGFKWLnEYNDx", "question_id": 26, "answer1_id": "e5YFb6PojDThkcZdSH8kpC", "answer2_id": "GzzDrjBAe3BnXWgWrATxJL", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided relevant and engaging descriptions of the winning play in the final seconds of a championship game. Assistant 1's answer was slightly more detailed and vivid, with a clear description of the star player's actions and the ball's trajectory. Assistant 2's answer was also relevant and accurate, but it lacked the same level of detail and excitement as Assistant 1's response. Both answers captured the tension and excitement of the moment, but Assistant 1's answer was more immersive and precise, which is why it received a slightly higher score.", "score": [8.0, 7.5]}
+{"review_id": "GJWNKPhrrmDZsCpq3M73dm", "question_id": 27, "answer1_id": "NnkbyoNFRHay4toKTFfaeE", "answer2_id": "f7hUYhajUbXNs3gQrG9z3b", "reviewer_id": 1, "metadata": {}, "text": "9 7\nAssistant 1 provided a more detailed and engaging description of the signature dish, including specific ingredients, cooking techniques, and the overall flavor profile. The response was well-structured and painted a vivid picture of the dish, making it more appealing to the judges. Assistant 2's answer, while still relevant and accurate, lacked the same level of detail and engagement. It focused more on general aspects of the dish, such as the balance of flavors and presentation, but did not provide specific information about the ingredients or preparation methods. Overall, Assistant 1's response was more helpful and precise in describing the signature dish.", "score": [9.0, 7.0]}
+{"review_id": "bQCfAwpaAi6yHDz3dBr5Uy", "question_id": 28, "answer1_id": "Gpb8MuNU3Pt7k93dpRN9WM", "answer2_id": "LpUrrJuQ4cA6LtNRmiTfRv", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and accurate information about the summit of Mount Everest and the emotions a climber might experience. Assistant 1 started by clarifying their limitations as an AI language model, which is helpful for setting expectations. They then provided a detailed description of the summit, the challenges faced, and the breathtaking view. Assistant 2 also provided a vivid description of the view from the summit and the emotions a climber might experience. The main difference between the two answers is that Assistant 2's response is slightly more detailed and immersive, providing a more engaging description of the view and the emotions involved. This is why Assistant 2 receives a higher score.", "score": [8.0, 9.0]}
+{"review_id": "J2Pcp6UEJywVSYRJXgdEiG", "question_id": 29, "answer1_id": "SYvkCCHBUZPd9DQuidZM8K", "answer2_id": "Yiup49xrP6jf9nsEuGHgdN", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed, covering a broader range of challenges and providing a more vivid picture of daily life on Mars. Assistant 2's answer was also informative but focused more on the challenges faced by a space colonist, without providing as much detail about daily activities. Both answers were well-structured and addressed the main aspects of the question, but Assistant 1's response was slightly more comprehensive and engaging.", "score": [9.0, 8.5]}
+{"review_id": "iJfTwGGZQaXiP4QjF3XwQz", "question_id": 30, "answer1_id": "NjdsG8tYfrHMT5zGZPavk6", "answer2_id": "7cWm5Kop6bLzwLgJjpAVrK", "reviewer_id": 1, "metadata": {}, "text": "8 7\nBoth Assistant 1 and Assistant 2 provided relevant and engaging answers to the user's question. Assistant 1's answer was more detailed and immersive, creating a vivid picture of the post-apocalyptic world and the relationships formed with allies. Assistant 2's answer was more general and focused on the skills and resources needed to survive, but it was still relevant and helpful. Assistant 1's answer was slightly more engaging and detailed, which is why it received a higher score.", "score": [8.0, 7.0]}
+{"review_id": "gpbgmZwwLYk9WZNVGj6Kuu", "question_id": 31, "answer1_id": "8eovAhyvrKJEMWiVdYzByH", "answer2_id": "YaUHhigGUvgv82Js3ktFgs", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed, providing a clear step-by-step approach to determining if a restaurant is popular among locals or mainly attracts tourists. Assistant 1 also discussed the usefulness of this information for tourists, local residents, and business owners, which added more depth to the answer. Assistant 2's answer was also helpful and relevant, but it was slightly less detailed and focused more on the indicators rather than the reasons why this information might be useful. Overall, both assistants provided valuable information, but Assistant 1's answer was more comprehensive and detailed.", "score": [9.0, 8.5]}
+{"review_id": "8gGAuoHkHSXj27Dtrs53Pq", "question_id": 32, "answer1_id": "nvyaGEveLWBaxgXzriB93d", "answer2_id": "LaHQYWhmXF7mnPSVFdhCeq", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more concise and easier to follow, with a clear list format. Assistant 2's answer was also informative and detailed, but the formatting was less organized, making it slightly harder to follow. Both assistants covered similar points, but Assistant 1's response was more straightforward and to the point, which is why it receives a slightly higher score.", "score": [9.0, 8.5]}
+{"review_id": "7wB7MDWbkVAy5zF7Jwqtbg", "question_id": 33, "answer1_id": "3xU2t6Yvx9EWpqfqvinNfH", "answer2_id": "Br2uFCYmRUaQULwKzpffz9", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was well-structured and covered a variety of reasons why someone might choose to use a paper map or ask for directions. The answer touched on aspects such as power requirements, reliability, geography, tactile experience, and personal growth. Assistant 2's answer also covered various reasons, including privacy concerns, detailed information, offline functionality, human interaction, and battery life. Both answers were detailed and informative, but Assistant 2's answer slightly edged out Assistant 1's due to the inclusion of privacy concerns and the emphasis on human interaction, which added more depth to the response.", "score": [8.0, 9.0]}
+{"review_id": "A7wbvsh2EpUhU7Tyha9aa2", "question_id": 34, "answer1_id": "Mq6hzNziUxzQ2juPMDrv3h", "answer2_id": "FCRqJu6DgRvCNq4Z2NneHf", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more structured and concise, with clear points and examples. Assistant 2's answer was also informative and detailed, but the points were not as clearly separated, and the answer seemed slightly repetitive. Both assistants covered the importance of active listening, engagement, and body language. However, Assistant 1's answer was more precise and easier to follow, which is why it receives a slightly higher score.", "score": [9.0, 8.5]}
+{"review_id": "8MrjJsKfD3J63dpw9mUwgg", "question_id": 35, "answer1_id": "KU6BNNN8d6MLHyrA8nV4DB", "answer2_id": "Fy5Nw8LcWpdq2GokTbiwuq", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, accurate, and detailed answers to the question. They both listed several reasons why someone might prefer to shop at a small, locally-owned business instead of a large chain store, even if the prices are higher. The reasons provided by both assistants were similar and covered various aspects such as personalized service, supporting the local economy, unique products, and environmental impact. Both answers were well-structured and easy to understand, making it difficult to differentiate between the two in terms of quality. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "eiyje8eeDhZMNhib2KWnZT", "question_id": 36, "answer1_id": "RpHbPLJamuknRRa3xU5bUF", "answer2_id": "hKhcnEtSjzysU7sbtE3JeH", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed and organized, with a clear list of factors to consider when assessing the credibility of a source. Assistant 2's answer also provided useful tips, but the organization was slightly less clear. Both answers covered important aspects of evaluating credibility, such as checking the author's credentials, looking for secondary sources, and considering the purpose and bias of the publication. Assistant 1's answer included additional points about evaluating the tone of the article and verifying the information using reputable sources, which added value to the response. Overall, both assistants performed well, but Assistant 1's answer was slightly more comprehensive and well-structured.", "score": [9.0, 8.5]}
+{"review_id": "Qu2QPbdwF6jxUWYhVzyTVf", "question_id": 37, "answer1_id": "AFR3AJW4sSPLDLiAUvrL8s", "answer2_id": "cAVZTw5QY8WUnJEd3rUu3p", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was well-structured and touched upon various factors that contribute to individual preferences for fear-inducing experiences. Assistant 2's answer was similar in content but provided a slightly more detailed explanation of the factors influencing a person's enjoyment of being scared, such as the release of endorphins. Both answers were informative, but Assistant 2's answer had a slight edge in terms of detail and clarity, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "YKYWDfU9hCCZPGs2RiD7ad", "question_id": 38, "answer1_id": "esqiBYHa56ygcPU2ux2Pdx", "answer2_id": "9JxYGUzSa2rR68BTVuZJEA", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both covered various aspects of how observing the behavior of others in a social situation can provide clues about cultural norms and expectations. Assistant 1 focused on dress codes, social hierarchy, communication styles, and customs and traditions, while Assistant 2 discussed body language, gender roles, interpersonal distance, table manners, and gift-giving. Both answers were detailed and provided clear examples to illustrate their points. The overall quality of the responses is very similar, and both assistants deserve a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "htjN3RruvxUtzutM3CzKrw", "question_id": 39, "answer1_id": "NmuuKUipqt62QKuEHCuBWh", "answer2_id": "5b3qKj2jtzgcZX7SSipQBW", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided thoughtful, relevant, and accurate answers to the question. They both acknowledged the complexity of the issue and presented arguments for both sides, emphasizing the importance of personal values and priorities in making a decision. Both answers also mentioned the potential benefits of space exploration and the pressing problems on Earth that need to be addressed. The level of detail in both responses is sufficient to provide a comprehensive understanding of the debate. Therefore, both assistants receive a score of 9 for their overall performance.", "score": [9.0, 9.0]}
+{"review_id": "4VzEfU3eyUDAc5QhgLbXSW", "question_id": 40, "answer1_id": "3HypDqXt6tHieMDN7hWYCh", "answer2_id": "hG6i3Q8F7bA7u73vo6t2XZ", "reviewer_id": 1, "metadata": {}, "text": "8 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. They both emphasized the importance of striking a balance between job creation and technological progress. Assistant 1 provided more specific examples of methods for promoting job creation, while Assistant 2 focused more on the potential benefits of automation and the need to equip workers with necessary skills. Assistant 2's answer was slightly more concise, which is why it received a slightly higher score. However, both answers were informative and addressed the question effectively.", "score": [8.0, 8.5]}
+{"review_id": "5LjZqRLm4jvNGjkGD4M3uj", "question_id": 41, "answer1_id": "DmQtupeyNDrQFBccBRAsbD", "answer2_id": "KaZUZ4NAnQ3SY6mq2QY3Kt", "reviewer_id": 1, "metadata": {}, "text": "7 4\nAssistant 1 provided a clear and concise answer, with a step-by-step explanation of the calculations. The answer was relevant and accurate, considering the assumptions made. However, the level of detail could have been improved by discussing the factors that can affect blinking frequency.\n\nAssistant 2's answer was confusing and contained several errors in calculations. The answer was not as relevant or accurate as Assistant 1's, and the level of detail was not helpful due to the errors. The explanation was not easy to follow, and the final result did not match the initial claim of 6.25 million blinks in a lifetime.", "score": [7.0, 4.0]}
+{"review_id": "k9CzUCg3irWWyv4KgUzMgq", "question_id": 42, "answer1_id": "froHv7kwRMYGWPXDQXk2Gw", "answer2_id": "Jwrdb34BnrswtwbeSd6xxP", "reviewer_id": 1, "metadata": {}, "text": "9 3\nAssistant 1 provided a detailed and accurate explanation of the number of atoms in a grain of salt, using scientific formulas, data, and step-by-step calculations. The answer was relevant, precise, and easy to follow. Assistant 2, on the other hand, provided an incorrect and confusing response, discussing the amount of salt in a grain of salt rather than the number of atoms. The information provided by Assistant 2 was not relevant to the question and did not address the user's request for a step-by-step explanation.", "score": [9.0, 3.0]}
+{"review_id": "nytATiEamWMfqdsiWFS9h8", "question_id": 43, "answer1_id": "ahktv9NqxZ2cYquTXwF42r", "answer2_id": "Ubz2V2EhkWmNYPEErdX8gn", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more detailed and step-by-step explanation of how the estimated number of lightning strikes per day was calculated, which was the main requirement of the question. The answer also mentioned the factors that contribute to lightning formation and the varying estimates from different sources. Assistant 2's answer, while informative, did not provide a step-by-step explanation as requested and provided a different estimate without explaining how it was derived. Both answers were relevant and accurate, but Assistant 1's response was more helpful and detailed, which is why it received a higher score.", "score": [8.0, 6.0]}
+{"review_id": "T8eKtw9mAFpjgrt9VrwKMF", "question_id": 44, "answer1_id": "kqqPRaFqb3w9Ky9LGB3yKU", "answer2_id": "CufihrnQT3GRic3Vr2rfU2", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more relevant and accurate answer, estimating the weight of the house and the lifting capacity of a single balloon. The answer also took into account the weight of the balloon material, ropes, and other equipment needed to tether the balloons to the house. Assistant 2's answer, while detailed, made several assumptions about the house's dimensions and used an incorrect method for calculating the number of balloons needed. The calculation based on buoyancy force and surface area of the balloons was not relevant to the question, making the answer less accurate and helpful.", "score": [8.0, 6.0]}
+{"review_id": "LEykgnFTYpK4Tv993B6igJ", "question_id": 45, "answer1_id": "946tQg8kS7GYPSm4qcV6Pt", "answer2_id": "fBBFkmgfGSiPtQtBdsVxpZ", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more precise and detailed answer, with specific numbers and calculations to arrive at an estimated number of text messages sent per minute. Assistant 2, on the other hand, gave a more general response, discussing factors that contribute to the high number of text messages sent but not providing a specific number for messages sent per minute. Assistant 1's answer was more helpful and relevant to the user's question, while Assistant 2's answer, although informative, did not directly address the question.", "score": [8.0, 6.0]}
+{"review_id": "XGKF3bH2GvkZwBSZHDobWr", "question_id": 46, "answer1_id": "cU3wut3Ta3ySbRHGxfwgjc", "answer2_id": "eQmVTyJQmoBKrZ9J38EXBC", "reviewer_id": 1, "metadata": {}, "text": "9 7\nAssistant 1 provided a more detailed and step-by-step explanation, starting with the world population and then using a reasonable estimate for the average daily words spoken per person to calculate the total words spoken daily on Earth. The answer was clear and easy to follow, and the assistant acknowledged the limitations of the estimate. Assistant 2, on the other hand, provided some useful information but did not provide a clear calculation or step-by-step explanation. The answer was less focused and less structured, making it harder to follow and less helpful overall.", "score": [9.0, 7.0]}
+{"review_id": "FUoGbDUbADzyobCHKznDZo", "question_id": 47, "answer1_id": "hQP784Ch2yq2b3BaXVBVX3", "answer2_id": "4JYtxzTN9oUTkVrFy7CDe6", "reviewer_id": 1, "metadata": {}, "text": "8 7\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers, explaining the difficulty in determining the exact number of snowflakes that fall during a typical winter. Assistant 1's answer was more concise and provided a clearer estimation by using the average snowfall in the United States as a reference point. Assistant 2's answer was more detailed in explaining the process of snowflake formation and factors affecting the number of snowflakes, but it did not provide a clear estimation or reference point like Assistant 1 did. Both answers were accurate and informative, but Assistant 1's answer was slightly more helpful due to its clearer estimation.", "score": [8.0, 7.0]}
+{"review_id": "nzVpjH45fpZzb8TyRfCBzh", "question_id": 48, "answer1_id": "a92bStUFdq4LBcv3pa9y3Z", "answer2_id": "M7KhQtwQrueKKwuemeJHUb", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more detailed and structured response, estimating the total number of pages in all books ever written based on Google's estimation of the number of books and the average number of pages per book. The answer also acknowledged the limitations of the estimation and the fact that it would become outdated quickly. Assistant 2, on the other hand, focused more on the difficulties of determining the exact number of pages and provided a narrower scope by only considering the United States. While both answers acknowledged the impossibility of determining the exact number, Assistant 1's response was more comprehensive and informative.", "score": [8.0, 6.0]}
+{"review_id": "VtS4C3siXuwAe6A6nBDdEz", "question_id": 49, "answer1_id": "a2QAcAm9wJeP2BpyWQnhot", "answer2_id": "AuuvYW4edaZy5wpL5QyppA", "reviewer_id": 1, "metadata": {}, "text": "9 3\nAssistant 1 provided a clear and concise explanation of the calculation, using the correct time frame of 3.8 billion years and the length of one Earth year to arrive at the answer of approximately 3.8 billion orbits. The answer also acknowledges the limitations of the estimate due to factors like gravitational interactions and the changing length of a year. On the other hand, Assistant 2's answer is confusing and incorrect, as it uses irrelevant information such as the Earth's distance from the Sun and the length of a solar orbit, leading to an incorrect answer of 200,000 orbits. Assistant 2's answer also lacks clarity and does not provide a step-by-step explanation of the reasoning. Therefore, Assistant 1 receives a score of 9 for a helpful, relevant, accurate, and detailed response, while Assistant 2 receives a score of 3 for providing an incorrect and confusing answer.", "score": [9.0, 3.0]}
+{"review_id": "XMGTWC7zqY7XLkhsp67ZMy", "question_id": 50, "answer1_id": "CrmusnxbTtGXF2varfcUd4", "answer2_id": "TJpkA2j3kC9akcHtmVQKUF", "reviewer_id": 1, "metadata": {}, "text": "8 6\nAssistant 1 provided a more detailed and comprehensive answer, taking into account the length of human history, modern recording technology, and the diversity of musical traditions around the world. The answer also acknowledged the impossibility of determining an exact number but provided a reasonable estimate in the billions. Assistant 2's answer was less detailed and provided a much lower estimate of tens of thousands, which seems unrealistic given the vast history of music and the number of songs recorded in recent centuries. Assistant 1's answer was more helpful, relevant, and accurate, while Assistant 2's answer lacked the depth and accuracy needed for a thorough response.", "score": [8.0, 6.0]}
+{"review_id": "UqAtoMvbdK3VXe7mNtDsVb", "question_id": 51, "answer1_id": "J9pZp6z2UUW7YcXgzUouqs", "answer2_id": "9FJsZWNGMNuGKGF4YjQoRM", "reviewer_id": 1, "metadata": {}, "text": "9 7\nAssistant 1 provided a more detailed and comprehensive answer, discussing the potential impact of the internet on the intellectual and cultural landscape of the Renaissance period, as well as mentioning the limitations of the technology at the time. The answer also touched upon the role of the printing press and how the internet could have played a similar role in democratizing knowledge. Assistant 2's answer, while still relevant and accurate, was less detailed and focused more on the potential uses of the internet among a small group of people, without discussing the broader implications for the Renaissance period as a whole.", "score": [9.0, 7.0]}
+{"review_id": "MQGxtmdBhtrCoC4sfa8Y6h", "question_id": 52, "answer1_id": "67bYUQb6zru8ofiub7uNUi", "answer2_id": "4vN4PKLUJkaYTEEVzYE3DN", "reviewer_id": 1, "metadata": {}, "text": "8 7\nBoth Assistant 1 and Assistant 2 provided relevant and accurate answers to the question, discussing the potential outcomes if the Aztecs had successfully repelled the Spanish conquistadors. Assistant 1's answer was more concise and focused on the broader implications for the Americas, Europe, and the world, while Assistant 2's answer delved into the potential impact on Aztec society itself. Assistant 1's answer was slightly more helpful and detailed, discussing the possible lack of interest from other colonial powers without the riches of the Aztec empire. Assistant 2's answer, while still relevant, was more speculative and repetitive, emphasizing the uncertainty of the situation. Overall, both answers were informative, but Assistant 1's response was slightly more comprehensive and well-structured.", "score": [8.0, 7.0]}
+{"review_id": "Xf4dURJyhQdrL7PZDxfFsG", "question_id": 53, "answer1_id": "gAisnQTHWFLW8aa5fQPNJf", "answer2_id": "LjnTtmsFEMgFEkYMKE3VRV", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was more concise and focused on the social, economic, and cultural impacts of the Black Death, while Assistant 2's answer explored a wider range of possible scenarios, including political changes, global trade, and environmental impact. Assistant 1's answer was slightly more organized and easier to follow, which is why it received a higher score. However, both answers were informative and provided a good level of detail, making them both valuable responses to the question.", "score": [9.0, 8.0]}
+{"review_id": "hv3zJjDrDu9mY5nfmiY5nr", "question_id": 54, "answer1_id": "4ZJCbj7T8BGzNhDqz7NSF4", "answer2_id": "9L9SgmcPjkFs3CwuiUJqm9", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was more concise and focused on the potential impact of Newton's work on biology, as well as the possible hindrance to the field of physics without his contributions. Assistant 2's answer, on the other hand, provided a more detailed analysis of the potential impact of Newton's work on biology and how it could have led to advancements in various fields. Both answers acknowledged the uncertainty of the hypothetical scenario, but Assistant 2's answer was slightly more comprehensive and provided a broader perspective on the potential implications of Newton's focus on biology.", "score": [8.0, 9.0]}
+{"review_id": "Yk6WEJ69ridVRCZnJ8UKvz", "question_id": 55, "answer1_id": "c6ixri3qqLfSBBnwMkgYB7", "answer2_id": "f9pwVc7bbzajLXKsaGsXQ8", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more concise and focused answer, discussing the potential impact on the music world and the cultural influence of the Beatles. Assistant 2's answer, while still relevant, was less focused and provided a list of possibilities rather than a cohesive response. Both answers were accurate and detailed, but Assistant 1's response was more helpful and relevant to the question.", "score": [8.0, 7.0]}
+{"review_id": "m98hWdPRHbzWJss7KkKMmM", "question_id": 56, "answer1_id": "c9AtDn7eeSYhtH854MQDDB", "answer2_id": "UfZJcVaZLWkVsipvmDBrdd", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed, discussing the impact on modern computing and artificial intelligence, which added value to the response. Assistant 2's answer was also informative and accurate, but it did not provide the same level of detail as Assistant 1's answer. Both answers addressed the potential consequences of not cracking the Enigma code, but Assistant 1's answer was more comprehensive, which is why it received a higher score.", "score": [9.0, 8.0]}
+{"review_id": "4yZGxVNaBtbSq9zcJjv7KW", "question_id": 57, "answer1_id": "jYd2gg6MJH8hdqFSAJTaiR", "answer2_id": "K635PkbTDkJTEnDSG68bTh", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1's answer was concise and touched on the economic, social, and political impacts of the Suez Canal not being constructed. Assistant 2's answer provided a bit more context about the Suez Canal and its significance in the 19th century, as well as mentioning the potential development of alternative trade routes. Both answers were accurate and detailed, but Assistant 2's answer was slightly more comprehensive, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "7ZHwhnPAmYAFzygoLRw4Pu", "question_id": 58, "answer1_id": "nZJ6LGJFegnHetutiAQtFm", "answer2_id": "aGEwGV2TQY6snjFdiLkyqZ", "reviewer_id": 1, "metadata": {}, "text": "8 7.5\nBoth Assistant 1 and Assistant 2 provided relevant and accurate information about the Maya civilization and its potential development if it had not collapsed. Assistant 1's answer was slightly more helpful and detailed, as it discussed the possible impact on the colonization of the Americas and the influence on political structures of neighboring civilizations. Assistant 2's answer, while still informative, was more focused on the potential advancements in technology, calendar systems, and mathematics. Both answers acknowledged the speculative nature of the question, but Assistant 1's response seemed more comprehensive and engaging.", "score": [8.0, 7.5]}
+{"review_id": "LDddCfmKX8JnTnU3ZPHSMu", "question_id": 59, "answer1_id": "dmEgLyeYNcwBZWHBak6Lap", "answer2_id": "ksnQVXY9SY2NzMxXEmV3Pd", "reviewer_id": 1, "metadata": {}, "text": "8 7\nAssistant 1 provided a more detailed response, discussing the possibility of other explorers discovering the Americas and the potential impact on native populations and European colonization. Assistant 2's answer was also relevant and accurate, but it was less detailed and focused more on the hypothetical consequences without discussing the likelihood of other explorers discovering the continent. Both answers were helpful and relevant, but Assistant 1's response was more comprehensive and informative.", "score": [8.0, 7.0]}
+{"review_id": "hSph7jZjHfucv9bHLsBj32", "question_id": 60, "answer1_id": "bkuECkuy7YiDUcj9oJjdrZ", "answer2_id": "GZpcvXQyrKUHsgWpzS7Hpo", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was slightly more detailed, discussing the potential impact on van Gogh's personal life, mental health, and the broader art world. Assistant 2's answer also touched on these points but was less specific in discussing the potential influence on other artists and the art world. Both answers acknowledged the uncertainty of the hypothetical scenario, but Assistant 1's response was more comprehensive and well-rounded, earning it a slightly higher score.", "score": [9.0, 8.0]}
+{"review_id": "Qmuy2aXu8wk8QShzseQaQw", "question_id": 61, "answer1_id": "2PRjT7j3V6487xZREfQfuD", "answer2_id": "Czx3sfkw8C4md9YJrhs3gu", "reviewer_id": 2, "metadata": {}, "text": "8 6\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to read.\n- The function `count_occurrences()` is modular and can be reused.\n- The code handles file opening errors.\n- The code converts the text to lowercase, making the search case-insensitive.\n- The code uses a stringstream to tokenize the line into words, which is more accurate than using `find()`.\n\nWeaknesses:\n- The code does not handle punctuation marks, which may cause incorrect word matching.\n\nSuggestions for improvement:\n- Add code to remove punctuation marks from the line before tokenizing it into words.\n\nAssistant 2:\nStrengths:\n- The code is simple and easy to understand.\n- The code reads the file line by line.\n\nWeaknesses:\n- The code does not tokenize the line into words, which may cause incorrect word matching.\n- The code does not handle file opening errors.\n- The code does not handle case sensitivity or punctuation marks.\n- The code does not use a separate function for counting occurrences, which makes it less modular.\n\nSuggestions for improvement:\n- Use a stringstream to tokenize the line into words.\n- Handle file opening errors.\n- Convert the text to lowercase and remove punctuation marks before searching for the word.\n- Create a separate function for counting occurrences to make the code more modular.", "score": [8.0, 6.0]}
+{"review_id": "h9Nopc94SEdRznouyUB9EW", "question_id": 62, "answer1_id": "jWyN8NTdVix6CUoqfbRqVx", "answer2_id": "G7QjnFhVnsMPMN9UES8mTV", "reviewer_id": 2, "metadata": {}, "text": "7 4\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to read.\n- The function correctly implements the dynamic programming approach to find the longest common subsequence.\n- The comments are clear and concise, explaining the logic and functionality of the code.\n\nWeaknesses:\n- The code could benefit from more comments, especially when initializing the 2D array and during the backtracking process.\n\nSuggestions for improvement:\n- Add more comments to explain the initialization of the 2D array and the backtracking process.\n\nAssistant 2:\nStrengths:\n- The code is structured and easy to read.\n\nWeaknesses:\n- The function does not correctly implement the dynamic programming approach to find the longest common subsequence. The code breaks out of the inner loop when characters do not match, which is incorrect.\n- The comments are not sufficient to explain the logic and functionality of the code.\n- The example provided does not produce the correct output.\n\nSuggestions for improvement:\n- Correct the implementation of the dynamic programming approach by removing the \"break\" statement and replacing it with the correct logic to update the dp array.\n- Add more comments to explain the logic and functionality of the code.\n- Provide a correct example that demonstrates the usage of the function.", "score": [7.0, 4.0]}
+{"review_id": "KU29LHj88LGxxHcyzzztNi", "question_id": 63, "answer1_id": "mx9G7gfKTCXCmNRaiMZQr9", "answer2_id": "RHqehPQpeXkN9d6NHkVGxU", "reviewer_id": 2, "metadata": {}, "text": "7 6\n\nAssistant 1:\nStrengths:\n- The provided code is accurate and efficient.\n- The regular expression pattern is more comprehensive and covers a wider range of valid email formats.\n- The code is structured in a function, making it reusable.\n\nWeaknesses:\n- There are no comments in the code to explain the logic and functionality.\n- The print statements in the function could be replaced with return statements to make the function more versatile.\n\nSuggestions for improvement:\n- Add comments to explain the regular expression pattern and the function logic.\n- Replace the print statements with return statements, so the function returns a boolean value instead of printing the result.\n\nAssistant 2:\nStrengths:\n- The code is accurate and efficient.\n- The code is structured in a function, making it reusable.\n\nWeaknesses:\n- The regular expression pattern is less comprehensive and may not cover all valid email formats.\n- There are no comments in the code to explain the logic and functionality.\n- The use of `re.search()` is not necessary in this case, as the `re.match()` function is more appropriate for checking the entire string.\n\nSuggestions for improvement:\n- Improve the regular expression pattern to cover a wider range of valid email formats.\n- Add comments to explain the regular expression pattern and the function logic.\n- Use the `re.match()` function instead of `re.search()` for checking the entire string.", "score": [7.0, 6.0]}
+{"review_id": "in3XxoSrmc2XeAWqkSAXWT", "question_id": 64, "answer1_id": "8LjpXk6Va5L2FBEwTU9YJ6", "answer2_id": "aqYsaowQtr5rYU6iDbRj5x", "reviewer_id": 2, "metadata": {}, "text": "8 7\n\nBoth Assistant 1 and Assistant 2 provided working solutions to find the nth Fibonacci number using dynamic programming. However, there are some differences in their approaches and code quality.\n\nAssistant 1:\n- The code is simple and easy to understand.\n- The function directly returns the nth Fibonacci number from the list.\n- The code is efficient, as it only calculates the required Fibonacci numbers.\n- The explanation provided is clear and concise.\n\nAssistant 2:\n- The code is slightly more complex than Assistant 1's solution.\n- The function returns the last element of the list, which is the nth Fibonacci number.\n- The code is less efficient, as it appends the previous Fibonacci number to the list, which is not necessary.\n- The explanation provided is clear but could be more concise.\n\nSuggestions for improvement:\n- Assistant 1's code is already quite good, but adding a few comments within the code would make it even easier to understand.\n- Assistant 2 should simplify the code by removing the unnecessary `fibonacci_list` and directly return the last element of the `previous` list. Also, adding comments within the code would be helpful for understanding the logic.", "score": [8.0, 7.0]}
+{"review_id": "PuE5Ne8nTEyRmvLkHY96F9", "question_id": 65, "answer1_id": "BLC8fhAUeahFfrjBRiL9tx", "answer2_id": "fimg24Eg23yjtVd7pYfo8A", "reviewer_id": 2, "metadata": {}, "text": "9 9\n\nBoth Assistant 1 and Assistant 2 provided correct and efficient implementations of the binary search algorithm in Python. Their code structures are almost identical, and both are easy to read and understand. Neither of them included comments within the code, but their explanations in the text surrounding the code were clear and concise.\n\nStrengths:\n- Both implementations are correct and solve the given problem statement.\n- The code is efficient, using a binary search algorithm.\n- The code is easy to read and understand.\n\nWeaknesses:\n- Neither implementation included comments within the code itself.\n\nSuggestions for improvement:\n- Although the explanations surrounding the code were clear, it would be helpful to include comments within the code to explain the logic and functionality of each step. This would make it easier for someone reading the code to understand the algorithm without having to refer back to the text explanation.", "score": [9.0, 9.0]}
+{"review_id": "EemZUqydtkqJHhdwwZEo2B", "question_id": 66, "answer1_id": "NAH245JGvMWF24FWZJagYp", "answer2_id": "7bsZbZ9E9jjwgGhnTiTyWS", "reviewer_id": 2, "metadata": {}, "text": "8 5\n\nAssistant 1:\nStrengths:\n- The code is well-structured and easy to understand.\n- The implementation is efficient and correctly solves the problem.\n- The comments are clear and concise, explaining the logic and functionality of the code.\n- Proper coding standards and best practices are followed.\n\nWeaknesses:\n- The code could benefit from a few more comments, especially in the `dequeue` method.\n\nSuggestions for improvement:\n- Add a few more comments to explain the logic in the `dequeue` method.\n\nAssistant 2:\nStrengths:\n- The code is readable and follows proper coding standards.\n- The implementation attempts to solve the problem using two stacks.\n\nWeaknesses:\n- The implementation is not efficient, as it does not make proper use of the two stacks.\n- The code does not correctly implement the problem statement, as the `dequeue` method does not transfer elements from `stack1` to `stack2` when necessary.\n- The comments are not as clear and concise as they could be, making it harder to understand the code's logic and functionality.\n\nSuggestions for improvement:\n- Modify the `dequeue` method to transfer elements from `stack1` to `stack2` when necessary, as shown in Assistant 1's implementation.\n- Improve the comments to better explain the code's logic and functionality.\n- Consider removing the `front` and `rear` variables, as they are not necessary for the implementation.", "score": [8.0, 5.0]}
+{"review_id": "9PeseoRfdydPZZCen7ky5z", "question_id": 67, "answer1_id": "NKXTwB3WtfFcmQZbTxBP5o", "answer2_id": "TMYj69PfzLFm6S8ahKbg5J", "reviewer_id": 2, "metadata": {}, "text": "8 4\n\nAssistant 1:\nStrengths:\n- The code is simple and easy to understand.\n- The function is well-structured and modular.\n- The example provided helps to understand the code's functionality.\n\nWeaknesses:\n- The code uses an extra data structure (list) to store the common elements, which is against the problem statement's requirement.\n- The code has a time complexity of O(n^2) due to the nested loops, which might not be the most efficient solution.\n\nSuggestions for improvement:\n- Try to find a solution that does not use any extra data structures.\n- Consider sorting the arrays first and then using a two-pointer approach to find common elements, which would improve the time complexity.\n\nAssistant 2:\nStrengths:\n- The answer provides a step-by-step explanation of the algorithm, which is helpful for understanding the logic.\n\nWeaknesses:\n- There is no actual code implementation provided, only a high-level description of the algorithm.\n- The algorithm seems to be incomplete and does not clearly explain how to find the common elements without using any extra data structures.\n\nSuggestions for improvement:\n- Provide a complete code implementation based on the described algorithm.\n- Ensure that the algorithm is complete and can solve the problem without using any extra data structures.", "score": [8.0, 4.0]}
+{"review_id": "LTAjxtE69TyEQkhtyfvcR9", "question_id": 68, "answer1_id": "5P7wPyrNwBrHNdJNrETS5h", "answer2_id": "NDXxGeVFruYsJ3NZ7zjJ3n", "reviewer_id": 3, "metadata": {}, "text": "First, let's solve the problem independently:\n\nf(x) = 5x^3 - 2x + 3\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 4 + 3\nf(2) = 39\n\nNow, let's evaluate the problem-solving process of Assistant 1 and Assistant 2:\n\nAssistant 1:\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 1\nf(2) = 39\n\nAssistant 2:\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 10 - 2 + 3\nf(2) = 10 - 5 + 3\nf(2) = 2\n\nAssistant 1's process is correct, and the final answer is 39. Assistant 2's process is incorrect, as they made a mistake in the first step by not correctly calculating 5(2)^3.\n\nScores:\nAssistant 1: 10\nAssistant 2: 4\n\nAssistant 1 performed better in this problem.", "score": [10, 4]}
+{"review_id": "7AmCkRDfBRoSUgKheYr9n4", "question_id": 69, "answer1_id": "UmnL4WoucBduFpX4jptkNU", "answer2_id": "g3NBgumbbZBxnCMCPj7kof", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\n3x + 10 = 5(x - 2)\n\n3x + 10 = 5x - 10 (use the distributive property)\n\n10 + 10 = 5x - 3x (subtract 3x from both sides)\n\n20 = 2x (simplify)\n\nx = 10 (divide both sides by 2)\n\nTherefore, x = 10.\n\nNow, I will examine the problem-solving process of Assistant 1 and Assistant 2 step-by-step:\n\nAssistant 1:\n3x + 10 = 5(x - 2)\n3x + 10 = 5x - 10 (use the distributive property)\n10 + 10 = 5x - 3x (subtract 3x from both sides)\n20 = 2x (simplify)\nx = 10 (divide both sides by 2)\nTherefore, x = 10.\n\nAssistant 1's steps are correct and lead to the correct answer.\n\nAssistant 2:\nAssistant 2's answer is incorrect from the beginning. The first step of subtracting 5(x - 2) from both sides of the equation is unnecessary and leads to a wrong approach. The rest of the steps are also incorrect and do not lead to a valid solution.\n\nScores:\nAssistant 1: 10\nAssistant 2: 1\n\nAssistant 1 performed better and provided the correct answer with a clear and precise explanation.", "score": [10, 1]}
+{"review_id": "bmPAcsfqXW9zKgijD8qom4", "question_id": 70, "answer1_id": "Hymm673GkY5tXNaTFeNUBZ", "answer2_id": "jwp2XMxq8PTKTJiZCATHnG", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\nThe distance formula is:\n\ndistance = sqrt((x2 - x1)^2 + (y2 - y1)^2)\n\nGiven the endpoints (2, -2) and (10, 4), we can plug in the coordinates:\n\ndistance = sqrt((10 - 2)^2 + (4 - (-2))^2)\ndistance = sqrt(8^2 + 6^2)\ndistance = sqrt(64 + 36)\ndistance = sqrt(100)\ndistance = 10\n\nNow, I will evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1:\nThe problem-solving process is correct, and the final answer is 10 units. The steps are clear and easy to follow.\n\nAssistant 2:\nThe problem-solving process is incorrect. Assistant 2 only subtracted the x-coordinates of the endpoints, which is not the correct way to find the length of a line segment. The correct method is to use the distance formula, as shown in Assistant 1's answer. The final answer provided by Assistant 2 is incorrect (8 units).\n\nScores:\nAssistant 1: 10\nAssistant 2: 1\n\nAssistant 1 performed better because the problem-solving process was correct, and the final answer was accurate. Assistant 2's method was incorrect, and the final answer was wrong.", "score": [10, 1]}
+{"review_id": "cWewYXDkyPMLjDG3ak9QpF", "question_id": 71, "answer1_id": "PUzddJ35E3WsM7BDQ9E59i", "answer2_id": "B6CrSifSJDEHUYiBaiHELR", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's request. They each provided a well-structured and formal email template that the user can use as a guide for proposing a joint venture to a potential business partner. Both responses included appropriate greetings, introductions, explanations of the proposal, and closing statements. The level of detail in both responses was sufficient for the user's needs, and both assistants demonstrated a clear understanding of the user's request.", "score": [9.0, 9.0]}
+{"review_id": "3pDnEqNGj99FiYmdPDmwyu", "question_id": 72, "answer1_id": "6Q72hZCtDkgq379yhdmN5N", "answer2_id": "932DTWDKnBbQDQYTW6be87", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate resignation letter templates that the user can use as a starting point. Both responses included expressions of gratitude and a willingness to help with the transition process. The level of detail in both responses was sufficient for the user's needs, and both assistants maintained a professional and positive tone throughout their answers. It is difficult to differentiate between the two responses in terms of quality, as both are equally useful and well-written.", "score": [9.0, 9.0]}
+{"review_id": "3HxuYW7yfAzfzCuwY2SbDA", "question_id": 73, "answer1_id": "ReXnHy9C8SwcYPAep6gvJg", "answer2_id": "jVEfncaU9LitqggtYBGuik", "reviewer_id": 1, "metadata": {}, "text": "9 6\n\nAssistant 1 provided a well-structured and comprehensive formal letter of recommendation for a student applying to a prestigious graduate program in computer science. The letter included all the necessary elements, such as the sender's and recipient's information, a clear introduction, body, and conclusion, and a strong recommendation for the student. The letter also highlighted the student's academic achievements, extracurricular activities, and personal qualities, making it a strong and convincing recommendation.\n\nAssistant 2, on the other hand, provided a letter that seemed to be more of a recommendation for the student to apply to the program, rather than a recommendation for the student's admission to the program. The letter was less detailed and focused on the student's qualities and achievements, and it did not provide specific examples or evidence to support the recommendation. Additionally, the format of the letter was less formal and did not follow the standard structure of a formal letter of recommendation. Overall, Assistant 1's response was more helpful, relevant, accurate, and detailed than Assistant 2's response.", "score": [9.0, 6.0]}
+{"review_id": "5GQ3UKcnDPPnXV4gm36ppW", "question_id": 74, "answer1_id": "cKk5zZe8yYY4JH3kr5pGXG", "answer2_id": "NQSftH48RoRxTKLCqNbPtt", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided compelling and detailed product launch announcement emails. They both highlighted key features of the software solution, its benefits, and the special introductory offer. The emails were well-structured, engaging, and persuasive. Both assistants demonstrated a high level of helpfulness, relevance, accuracy, and detail in their responses.", "score": [9.0, 9.0]}
+{"review_id": "eSCFcocftJpUgBMUNVN7H6", "question_id": 75, "answer1_id": "c5rwA3cPjytSGcn7H8dZ6Q", "answer2_id": "YeYDnz2kVYAfsz7vCfgLNS", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate responses to the user's question. Assistant 1's answer is slightly better due to the inclusion of more details, such as the reason for the delay (unexpected high volume of orders), the steps taken to resolve the issue (hiring and re-training staff, improving inventory management), and the assurance of preventing future delays. Assistant 2's answer also addressed the issue and provided an apology, but it lacked the specific details that Assistant 1 provided. Both answers were well-written and appropriate for the situation, but Assistant 1's response was more comprehensive and informative.", "score": [9.0, 8.5]}
+{"review_id": "F4brtQMAnC6sfyrbzMEKco", "question_id": 76, "answer1_id": "XZGPtBo86KfF9REZ36s2X5", "answer2_id": "o2LgSDUwp4b4y4iCteh3RR", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided well-structured and engaging scripts for a YouTube video exploring the history and cultural significance of jazz. They both covered the origins of jazz, its evolution, and its impact on culture. The scripts included appropriate visuals and narration, making them suitable for a video format. It is difficult to choose one over the other, as both responses are of high quality and provide a similar level of detail. Therefore, they both receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "gittB7dtSvwEnYznUP7q6E", "question_id": 77, "answer1_id": "DRncHCsdGji756efDhacUT", "answer2_id": "CBvZQ8RJG6AqhUBSoUDbqU", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided engaging and detailed travel blog posts about a recent trip to Hawaii. They both highlighted cultural experiences and must-see attractions, making their answers relevant and helpful. The accuracy and level of detail in both responses were impressive, with each assistant describing different aspects of Hawaiian culture, attractions, and natural beauty. It is difficult to choose one over the other, as both answers were well-written and informative. Therefore, both Assistant 1 and Assistant 2 receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "iLS7cQ9XAeVtbUSENRB2Re", "question_id": 78, "answer1_id": "Y5rCQHHDA6WNfhRcB6QboG", "answer2_id": "VfeFmV9H6SEuotYqEf5ri5", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided captivating movie reviews for recently released science fiction films. Assistant 1's answer was more detailed and comprehensive, discussing the plot, characters, and special effects in a more engaging manner. The review also provided a better sense of the film's atmosphere and the challenges faced by the characters. Assistant 2's answer was also well-written and informative, but it was slightly less detailed and engaging compared to Assistant 1's answer. Both reviews were relevant and accurate, but Assistant 1's answer had a slight edge in terms of helpfulness and level of detail.", "score": [9.0, 8.0]}
+{"review_id": "Keb3b6DPTqmEr99JMZxf9j", "question_id": 79, "answer1_id": "Lea4wh5n6GsEkBnKsntN64", "answer2_id": "ZLYhNQxJXycBZL56RbdGh3", "reviewer_id": 1, "metadata": {}, "text": "9 10\nBoth Assistant 1 and Assistant 2 provided relevant and detailed responses to the question. Assistant 1's answer was structured in a clear and organized manner, providing a comprehensive outline for a podcast script. Assistant 2's answer went a step further by incorporating elements of a real podcast script, such as opening and closing music, interviews with artists and industry insiders, and a more conversational tone. This made Assistant 2's response slightly more engaging and practical for a podcast script, earning it a slightly higher score.", "score": [9.0, 10.0]}
+{"review_id": "k6N3jTNeiiZNsmT6pfdyxr", "question_id": 80, "answer1_id": "gdLxzcypTeuD6ToC6HWnXh", "answer2_id": "kZw2ii8HQtrQp4d2bK5cHy", "reviewer_id": 1, "metadata": {}, "text": "9 8\nBoth Assistant 1 and Assistant 2 provided relevant and detailed symphony concert reviews. Assistant 1's answer was slightly more detailed and engaging, with a vivid description of the orchestra's performance and the audience's reaction. Assistant 2's answer was also well-written and relevant, but it lacked the same level of detail and immersion that Assistant 1's answer provided. Both answers were helpful and accurate, but Assistant 1's answer was more captivating and provided a richer overall experience.", "score": [9.0, 8.0]}
diff --git a/model/fastchat/eval/table/review/vicuna-7b_20230322-fp16/review_llama_vicuna-7b.jsonl b/model/fastchat/eval/table/review/vicuna-7b_20230322-fp16/review_llama_vicuna-7b.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e5964812ffac3d1b93808cf8fa52b44b46c056d5
--- /dev/null
+++ b/model/fastchat/eval/table/review/vicuna-7b_20230322-fp16/review_llama_vicuna-7b.jsonl
@@ -0,0 +1,80 @@
+{"review_id": "5i9XFoVFJDwMgvbcsTqWbQ", "question_id": 1, "answer1_id": "J3UA6eGXGyFeUGqGpP3g34", "answer2_id": "8haRLA69xGcHc5woRHTj2f", "reviewer_id": 1, "metadata": {}, "text": "9 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about improving time management skills. Both answers included a list of strategies and tips, with some overlap between the two. Assistant 1's answer was more focused on specific actions and habits, while Assistant 2's answer included a brief introduction to time management and a more general approach to the strategies. Both answers were detailed and well-organized, making it easy for the user to understand and apply the information. Therefore, both assistants receive a score of 9.", "score": [9.0, 9.0]}
+{"review_id": "7nq3WwrqScHTUEs6gjJEh9", "question_id": 2, "answer1_id": "jZVDNTtFnNNd6vgkGjzAGa", "answer2_id": "j7DV4tZTXhmRFj9zL5TSwq", "reviewer_id": 1, "metadata": {}, "text": "9 8.5\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question about effective ways to deal with stress. Assistant 1 provided a more detailed response with 10 suggestions, while Assistant 2 provided 8 suggestions. Both assistants covered similar points, such as exercise, relaxation techniques, time management, social support, healthy diet, and sleep. Assistant 1 also mentioned prioritizing responsibilities, improving social networks, developing a support network, and getting professional help, which added more depth to the answer. Assistant 2's answer was concise and well-organized, but Assistant 1's answer provided a slightly higher level of detail, which is why Assistant 1 received a higher score.", "score": [9.0, 8.5]}
+{"review_id": "Ec5BTCrpi53kQypqZRUqDD", "question_id": 3, "answer1_id": "NbSxRXf8Yuvjr7HC6AvrPw", "answer2_id": "fEq2fYvNcpaENuQFRMFdan", "reviewer_id": 1, "metadata": {}, "text": "4 8\nAssistant 1's answer contains a lot of repetitive and incorrect information, making it confusing and less helpful. On the other hand, Assistant 2's answer is concise, accurate, and provides a clear comparison between Python and JavaScript, making it more helpful and relevant to the user's question.", "score": [4.0, 8.0]}
+{"review_id": "HQcZ2gk94iJPpHg3DFT5BQ", "question_id": 4, "answer1_id": "E9zrPDdncYvAebEaxiPXfx", "answer2_id": "VYKmdXnESAKV3FRutDUPWU", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question about increasing productivity while working from home. Assistant 1's answer was well-structured and provided six clear tips, while Assistant 2's answer was more detailed and provided ten tips. Both answers covered similar points, such as setting up a proper workspace, avoiding multitasking, taking breaks, and setting goals. However, Assistant 2's answer included additional tips, such as using noise-cancelling headphones, time-management tools, staying connected with colleagues, staying motivated, staying physically active, and setting boundaries. These additional tips make Assistant 2's answer more comprehensive and helpful, which is why it receives a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "3mAEiKpkyfmW4bUxKGV3d9", "question_id": 5, "answer1_id": "36TYpADDHAfRvtvKitfSXR", "answer2_id": "maL9a3rivWyWZk3UgwQTVR", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth Assistant 1 and Assistant 2 provided relevant and accurate information about the basics of quantum computing. However, Assistant 2's answer was more detailed and comprehensive, covering the principles of superposition and entanglement, as well as mentioning the current state of quantum computing and its potential applications. Assistant 1's answer was more concise but lacked the depth and elaboration provided by Assistant 2.", "score": [8.0, 10.0]}
+{"review_id": "jcoiJpcxDqCSWAedtb93Sk", "question_id": 6, "answer1_id": "8vtLLYRM352mZ8PXXbwzBT", "answer2_id": "aGRf8RjpUgneLvw4Uf93do", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and accurate answer, but it lacked some details and did not mention the environmental and nutritional aspects of the differences between plant-based and animal-based protein sources. Assistant 2, on the other hand, provided a more comprehensive answer, covering the differences in nutrient composition, digestibility, and environmental impact. The level of detail and the examples provided by Assistant 2 made their response more helpful and informative.", "score": [7.0, 9.0]}
+{"review_id": "g9Njd865dM6JuEgwQWng3a", "question_id": 7, "answer1_id": "mrCniyWYvAtxWhgMdXq2BG", "answer2_id": "oXtzronC4mdVKH9J59ofij", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question about developing critical thinking skills. Assistant 1's answer was concise and offered four strategies, while Assistant 2's answer was more detailed and provided eight strategies. Assistant 2's response was more comprehensive, covering a wider range of techniques and offering a more in-depth explanation of each strategy. This is why Assistant 2 receives a slightly higher score. Both answers were accurate and relevant, but Assistant 2's answer provided a more complete guide for someone looking to improve their critical thinking skills.", "score": [8.0, 9.0]}
+{"review_id": "5DCpRQ22UUyGodUnk5b36n", "question_id": 8, "answer1_id": "S8fmdbvpvbgTUyDU9V3M3N", "answer2_id": "dE5c99j9hW9qDvjjPxUPzc", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a relevant and accurate answer, but it was limited to only one major challenge faced by the education sector, which is the lack of skilled teachers. On the other hand, Assistant 2 provided a more comprehensive and detailed answer, covering a wider range of challenges faced by the education sector, such as access and equity, teacher shortages and quality, technology and innovation, curriculum and standards, financing and funding, student engagement and motivation, teacher and student evaluation, and international comparisons and rankings. This makes Assistant 2's answer more helpful and informative for the user.", "score": [8.0, 10.0]}
+{"review_id": "N8cmCoKXRbiLfHHYadBewx", "question_id": 9, "answer1_id": "KmuNjvNKRyaFwaBZTLArcG", "answer2_id": "oLRzkYUv8ooSJJLqfPnrxd", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question about the primary factors influencing consumer behavior. Assistant 1's answer was well-organized and covered a wide range of factors, including psychological, social, cultural, demographic, economic, personal, situational, and market factors. The answer was accurate and provided a good level of detail.\n\nAssistant 2's answer was also accurate and relevant, covering personal, psychological, social, economic, technological, environmental, marketing, health, legal, and public opinion factors. The answer provided a slightly more comprehensive list of factors, including technological, environmental, health, legal, and public opinion factors that were not mentioned by Assistant 1. Additionally, Assistant 2's answer was well-organized and provided a clear explanation of each factor. Therefore, Assistant 2 receives a slightly higher score due to the more comprehensive list of factors and clear explanations.", "score": [8.0, 9.0]}
+{"review_id": "2YxYqwKkjnyJMxwtnQFxGY", "question_id": 10, "answer1_id": "HNNT9k5htiMeRwNSD6dcz4", "answer2_id": "hi7Gu2XPwcThie58TvvkK8", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a list of conflict resolution strategies but did not go into detail on how to implement them effectively. Assistant 2, on the other hand, provided a more comprehensive list of strategies and included details on how to apply them in the workplace. Assistant 2's answer also focused more on fostering a positive work environment and open communication, which are essential aspects of conflict resolution. Therefore, Assistant 2's answer is more helpful, relevant, and detailed, earning a higher score.", "score": [7.0, 9.0]}
+{"review_id": "3kaigfGHSEvDPEJgJmu8zr", "question_id": 11, "answer1_id": "ChXjhDDikxU9FV3CADs6Ym", "answer2_id": "Xx5PB6u9sBagzxtB2YUKq8", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on the environmental impact and human health risks associated with single-use plastic bottles, providing a clear and concise response. However, Assistant 2 provided a more detailed answer, discussing not only the environmental and human health implications but also the economic and social impacts of using single-use plastic bottles versus reusable bottles. This additional information makes Assistant 2's response more comprehensive and informative, earning it a slightly higher score.", "score": [8.0, 9.0]}
+{"review_id": "DhmA5fZbbF3Fy74Gx7Jy2x", "question_id": 12, "answer1_id": "5wsPnN3VmmSkahgugFNo7u", "answer2_id": "FfaUTMS95MuGQQRDefvVzj", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on the needs of people with disabilities and provided examples of accessible stops, routes, and vehicles, as well as the importance of reliable information. Assistant 2 provided a more comprehensive list of factors to consider, including physical accessibility, communication accessibility, route and schedule accessibility, service animals and assistive devices, dissemination of information, training and education, and continuous improvement. Assistant 2's answer was more detailed and covered a wider range of factors, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "KEcC2DvbyNeo7hUYQPvkvc", "question_id": 13, "answer1_id": "NRGZGnU2sPN3ShMe9C3fMn", "answer2_id": "WgCpMqMPUb9TU8jCuiExg3", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer starts by explaining the two types of fiscal policy but then proceeds to mix fiscal and monetary policies in the examples provided, which creates confusion. The answer could have been more organized and accurate. Assistant 2's answer, on the other hand, provides a clear and organized response, accurately differentiating between fiscal and monetary policies and offering relevant examples for each. Assistant 2's answer also includes additional points, such as targeted support and international cooperation, which makes it more comprehensive and detailed.", "score": [7.0, 9.0]}
+{"review_id": "42LQAykxbQW4rJGoKMJ9RK", "question_id": 14, "answer1_id": "inKimHkWsXShQBTRmxr5Yg", "answer2_id": "ATkPcXKbAki2VCoopjq6c3", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and general answer, touching on the difficulties of language barriers and different cultural values. While the answer was relevant and accurate, it lacked detail and examples. Assistant 2, on the other hand, provided a more comprehensive answer with specific examples and a wider range of factors that affect communication and relationships in multicultural societies. The answer was well-structured, detailed, and addressed various aspects such as stereotypes, prejudice, discrimination, and power dynamics. Therefore, Assistant 2 receives a higher score for their more in-depth and informative response.", "score": [7.0, 9.0]}
+{"review_id": "kdbzLpXnpPQ3r6br9GypYR", "question_id": 15, "answer1_id": "H8aKtWwf8m6Lgxc2YyR2yf", "answer2_id": "TFh5bXFdG4fdK5hmq6qS6o", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1's answer was more concise and listed various applications of AI in healthcare, but it lacked a specific scenario and details. Assistant 2's answer, on the other hand, provided a more detailed scenario involving disease diagnosis and treatment planning, as well as mentioning predictive analytics and automating tasks. This made Assistant 2's answer more comprehensive and informative, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "bfP6NbhL4hfd5JVHQaZHiL", "question_id": 16, "answer1_id": "PafVwxMsjSkYUETiVBgxTU", "answer2_id": "XDV7jFB36qKGzPXPcwvbQy", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a brief overview of the CRISPR-Cas9 technology and touched on its potential applications and ethical implications. However, Assistant 2 provided a more detailed and structured response, listing specific applications and ethical concerns, which made the answer more informative and comprehensive. Therefore, Assistant 2 receives a higher score.", "score": [8.0, 9.0]}
+{"review_id": "fDkTDh7L7XchdcoweEaCvy", "question_id": 17, "answer1_id": "dmDUAfTP4aERJqqSeDBybu", "answer2_id": "6E3YAfxqckwL83dVo6ZRP4", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 gave a clear explanation of how vaccinations work and what herd immunity is. However, Assistant 2's answer was more detailed and comprehensive, explaining the role of antibodies, the concept of herd immunity thresholds, and the time it takes for the immune system to develop sufficient antibodies. This additional information makes Assistant 2's answer slightly better in terms of level of detail and overall performance.", "score": [8.0, 9.0]}
+{"review_id": "cYJRhHbbxvE5n4A2K2dUaS", "question_id": 18, "answer1_id": "8KGSSqbLqVdSZMEN9oCv5R", "answer2_id": "FjSXpLx6FfHU8zN9mb8ucX", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1 focused on the negative aspects of social media platforms and their influence on news consumption, while Assistant 2 provided a more balanced view, discussing both positive and negative implications. Assistant 2 also mentioned some measures taken by social media platforms to combat misinformation, which added to the level of detail in their response. Overall, Assistant 2's answer was slightly more comprehensive and balanced, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "HY68RS4gmj92SqSJJX8Z49", "question_id": 19, "answer1_id": "HbnJXJpPfaM2iX3ek4Epvy", "answer2_id": "HJczP2JdM4s6cxZyBWVyNZ", "reviewer_id": 1, "metadata": {}, "text": "4 8\nAssistant 1's answer started by listing various interventions to increase the intake of healthier foods but failed to address how cultural, social, and economic factors influence people's food choices. The answer was repetitive and did not provide a clear connection to the question. On the other hand, Assistant 2's answer directly addressed the question by discussing the impact of cultural, social, and economic factors on food choices and provided examples of how this knowledge can be used to promote healthier diets. The answer was relevant, accurate, and detailed, making it more helpful and informative.", "score": [4.0, 8.0]}
+{"review_id": "6WddacqH4zVXD9su3LZLgf", "question_id": 20, "answer1_id": "mx8Abfz5PtDcn6jgCA8zhM", "answer2_id": "X2EFE34dc5vtFSTGr3n2Bg", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer started off well by explaining the concept of natural selection but then drifted off into discussing population size and genetic drift, which are related but not the main focus of the question. The answer also lacked a clear explanation of how natural selection contributes to the evolution and adaptation of species. Assistant 2's answer, on the other hand, provided a clear and concise explanation of natural selection and its role in evolution and adaptation. The answer included a step-by-step explanation of the process, which made it easier to understand and more relevant to the question.", "score": [7.0, 9.0]}
+{"review_id": "nPTHzTY3KHwxAKXcXEfNAT", "question_id": 21, "answer1_id": "NuS9PUGkJG2pHscArvfyeF", "answer2_id": "iJjKWygtpHaLJirgK2PACK", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer is relevant and accurate, but it lacks the level of detail and context that Assistant 2's answer provides. Assistant 2 not only mentions the respectful greeting and bow but also elaborates on the importance of appearance and adherence to the codes of chivalry. This makes Assistant 2's response more helpful and informative for someone looking to understand how a medieval knight would introduce themselves at a royal banquet.", "score": [7.0, 9.0]}
+{"review_id": "LzLzWC4WP2bRquLAn3KA4v", "question_id": 22, "answer1_id": "SPjzirzbzo3UJ8BHXSgY9y", "answer2_id": "FRZfkefyPRGCSpLEexQoRH", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a relevant and helpful response, but it lacked the level of detail and enthusiasm that Assistant 2's answer had. Assistant 2's answer was more engaging, provided a clear motivational speech, and painted a vivid picture of the adventure and rewards that the crew could expect. This made Assistant 2's response more effective in motivating the crew to search for hidden treasure.", "score": [7.0, 9.0]}
+{"review_id": "f7UkLUJvX32LVEsLiSz9tD", "question_id": 23, "answer1_id": "JkeeC4MZVwybPU8DSbusnT", "answer2_id": "HBah6W9KuR8eNpRQJUxVvd", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a good explanation of what a soliloquy is and gave an example from Romeo and Juliet, which was relevant to the question. However, the answer did not include a personalized soliloquy as the question requested. Assistant 2, on the other hand, provided a well-written and relevant soliloquy that directly addressed the question. The soliloquy was creative, expressive, and in the style of Shakespearean language, which made it a more complete and satisfying answer to the question.", "score": [8.0, 10.0]}
+{"review_id": "b8ZetYDhsQmF5bYn3YJvQN", "question_id": 24, "answer1_id": "UcfKSXuzBPkZzLHsH4EwQz", "answer2_id": "3Rgw9vMLyMiwazfdjhWcgT", "reviewer_id": 1, "metadata": {}, "text": "4 8\nAssistant 1's answer was not relevant to the question, as it focused on their own origin as an AI rather than providing a superhero origin story for a curious child. Assistant 2, on the other hand, provided a relevant and engaging superhero origin story that would be appropriate for a child. The answer included details about the superhero's powers and their dedication to using them for good, which makes it a more suitable response to the question.", "score": [4.0, 8.0]}
+{"review_id": "NTqffFAzRnFRkcdekorc8G", "question_id": 25, "answer1_id": "cEGdo69FwL2Y3teYkMk9Gp", "answer2_id": "b4oghpgzhWMdoryzQrSwuF", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer was more concise and focused on five main advancements, while Assistant 2's answer covered a broader range of ten advancements. Assistant 2's answer provided a slightly more detailed and comprehensive overview of the technological advancements, which is why I gave it a higher score. Both answers were well-structured and informative, but Assistant 2's answer was more extensive and covered more areas of technology.", "score": [8.0, 9.0]}
+{"review_id": "EjhiZnnwZG92KRCNdrtHr2", "question_id": 26, "answer1_id": "W28UtVDZTcWwa8TmeruRn4", "answer2_id": "GzzDrjBAe3BnXWgWrATxJL", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1's answer was confusing and hard to follow, with multiple fumbles and changes in possession that made it difficult to understand the winning play. The answer also lacked the excitement and emotion expected from a sports commentator. Assistant 2's answer, on the other hand, was more engaging and provided a clearer description of the winning play. It captured the excitement and tension of the final moments of the game, and the teamwork and determination of the players. However, both answers could have been more precise in describing the specific teams and players involved in the play.", "score": [6.0, 8.0]}
+{"review_id": "kpM82CmswmZuQWr7LbQXX5", "question_id": 27, "answer1_id": "j65UyAfRdAcWBAEinXJzBK", "answer2_id": "f7hUYhajUbXNs3gQrG9z3b", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer is relevant and provides a brief description of the signature dish, but it lacks the depth and detail that would be expected from a world-famous chef. The answer could have been more elaborate in terms of flavors, presentation, and techniques used in the dish. Assistant 2's answer, on the other hand, is more detailed and paints a vivid picture of the dish, its flavors, and the techniques used. It also emphasizes the use of locally-sourced, seasonal ingredients and the balance of flavors, which adds to the overall quality of the response.", "score": [7.0, 9.0]}
+{"review_id": "h2MxJiALvLxrDJcznhQGvv", "question_id": 28, "answer1_id": "VWuywPvFPK42ZxmHpmQ27y", "answer2_id": "LpUrrJuQ4cA6LtNRmiTfRv", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer provided a decent description of the emotions and the view from the top of Mount Everest, but it lacked some details and accuracy, such as mentioning green trees and bushes, which would not be present at that altitude. Assistant 2's answer was more detailed and accurate, providing a better description of the surrounding peaks, the landscape, and the emotions a climber might experience. The answer also acknowledged the dangers and challenges associated with climbing Mount Everest, making it more comprehensive and informative.", "score": [7.0, 9.0]}
+{"review_id": "52pHrcutWNVFE5tfeVtsEC", "question_id": 29, "answer1_id": "AUWSLTAvGV5sP9qLsVyzjT", "answer2_id": "Yiup49xrP6jf9nsEuGHgdN", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful, relevant, and accurate answers to the question. Assistant 1's answer focused more on the environmental conditions of Mars, such as temperature, gravity, and the atmosphere, while also mentioning the challenges of isolation, boredom, and stress. Assistant 2's answer, on the other hand, provided a more structured list of challenges, including temperature fluctuations, radiation exposure, lack of oxygen, water scarcity, psychological challenges, and physical challenges. Both answers touched on the importance of establishing a sustainable community on Mars. Assistant 2's answer was slightly more comprehensive and organized, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "9ec9AaZaT5K9fKbM59Uu26", "question_id": 30, "answer1_id": "GZtEdKLxmUUbenH4XRyLkE", "answer2_id": "7cWm5Kop6bLzwLgJjpAVrK", "reviewer_id": 1, "metadata": {}, "text": "4 9\nAssistant 1's response was not helpful, as it merely provided a list of ways to describe allies and survival in a post-apocalyptic world without actually providing any descriptions or examples. The answer was repetitive and lacked detail. On the other hand, Assistant 2's response was much more relevant and detailed, providing a clear picture of how the character would survive in a post-apocalyptic world and the allies they would encounter. The answer was well-structured and provided examples of the character's skills, knowledge, and relationships with other survivors, as well as their interaction with the natural world.", "score": [4.0, 9.0]}
+{"review_id": "geEwZK2e2MURYg64LLgJDb", "question_id": 31, "answer1_id": "kba2Xpb75rUfnskZSzrBas", "answer2_id": "YaUHhigGUvgv82Js3ktFgs", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer is less detailed and less accurate than Assistant 2's answer. Assistant 1 incorrectly states that a restaurant popular among locals will be less crowded, while in reality, a popular local restaurant is likely to be crowded. Assistant 2 provides a more comprehensive and accurate response, listing several indicators to determine if a restaurant is popular among locals or tourists, and explains why this information might be useful. Assistant 2's answer is more helpful, relevant, and detailed, which is why it receives a higher score.", "score": [7.0, 9.0]}
+{"review_id": "iiTyDMyXMyxSXc8UNiP8n7", "question_id": 32, "answer1_id": "RCaptsMBYXseVJgjJyyvUh", "answer2_id": "LaHQYWhmXF7mnPSVFdhCeq", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1's answer was clear and concise, listing six subtle clues that suggest someone is pretending to understand a topic. However, Assistant 2's answer was more detailed and provided additional clues, such as using big words or jargon, dominating the conversation, and avoiding eye contact. Assistant 2 also mentioned that some people might use these tactics unintentionally and advised approaching conversations with an open mind. This additional information and advice make Assistant 2's answer slightly better than Assistant 1's.", "score": [8.0, 9.0]}
+{"review_id": "3K5uApi5ZY9VysLGzgyu8H", "question_id": 33, "answer1_id": "2CeidEr7Cjj6YgcuaiPgnN", "answer2_id": "Br2uFCYmRUaQULwKzpffz9", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a relevant answer, but it focused mainly on the general idea of using a paper map or asking for directions. Assistant 2, on the other hand, provided a more detailed and comprehensive answer, listing several reasons why someone might choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app. These reasons included privacy concerns, detailed information, offline functionality, human interaction, and battery life. Assistant 2's answer was more helpful, accurate, and relevant to the user's question, which is why it received a higher score.", "score": [8.0, 10.0]}
+{"review_id": "HHa3UPwHtxUw3zbksET5fr", "question_id": 34, "answer1_id": "kpStLLNWNkGHyRFdKsmDds", "answer2_id": "FCRqJu6DgRvCNq4Z2NneHf", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer is helpful and relevant but lacks details and examples of how to determine if a person is genuinely interested in a conversation. The answer only mentions one example of a person's actions, which is looking at their phone or engaging in other activities. On the other hand, Assistant 2's answer provides a more comprehensive list of clues to look for, such as active listening, engagement, inclusivity, respect and empathy, and authenticity. This answer also acknowledges the complexity of human behavior and the possibility of a person being both polite and genuinely interested in a conversation. Overall, Assistant 2's answer is more detailed, accurate, and helpful in addressing the user's question.", "score": [7.0, 9.0]}
+{"review_id": "AkQKABDspbUeqF97KqoJnm", "question_id": 35, "answer1_id": "PFtMQp4X4MrMYkEa8c8CKG", "answer2_id": "Fy5Nw8LcWpdq2GokTbiwuq", "reviewer_id": 1, "metadata": {}, "text": "8 10\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused on the support for the local community, flexibility, and eco-friendliness of small businesses. However, Assistant 2 provided a more comprehensive and detailed answer, listing eight possible reasons for preferring small, locally-owned businesses over large chain stores. These reasons included personal connection, local involvement, personalized service, quality of products, unique products, sense of place, sustainability, and experience. Assistant 2's answer covered a wider range of factors that might influence a customer's decision, which is why it receives a higher score.", "score": [8.0, 10.0]}
+{"review_id": "oX5Y34wVE8iQUHiaArecxz", "question_id": 36, "answer1_id": "4JwjhR5QVsdYgQupSZrfUp", "answer2_id": "hKhcnEtSjzysU7sbtE3JeH", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1's answer was accurate and provided a good starting point for assessing the credibility of a source. However, Assistant 2's answer was more detailed and offered a wider range of tips for evaluating the credibility of a source, including checking the author's credentials, looking for secondary sources, considering the publication's purpose and bias, and using alternative sources. This additional information makes Assistant 2's answer more comprehensive and useful for the user.", "score": [8.0, 9.0]}
+{"review_id": "6Gk4mx6t7esRfXVcmvjc3m", "question_id": 37, "answer1_id": "ednPMy4dvW9CgKg2PJsBqW", "answer2_id": "cAVZTw5QY8WUnJEd3rUu3p", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer is brief and provides a general idea of why people enjoy or avoid the sensation of being scared. However, it lacks details and explanations about the factors that influence these preferences. Assistant 2's answer, on the other hand, is more comprehensive and informative, discussing personality traits, past experiences, coping mechanisms, and the release of endorphins. This answer also acknowledges the complexity and individuality of the experience, making it more helpful and relevant to the user's question.", "score": [7.0, 9.0]}
+{"review_id": "NodnduDxAzpwvjVi3CYPxd", "question_id": 38, "answer1_id": "ehPApSqCniyGN7hd332ToW", "answer2_id": "9JxYGUzSa2rR68BTVuZJEA", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a relevant and accurate answer, but it was quite brief and lacked specific examples or details. Assistant 2, on the other hand, provided a more comprehensive answer with clear examples and details, covering various aspects of social situations and cultural norms. The response from Assistant 2 was more helpful and informative, which is why it received a higher score.", "score": [8.0, 10.0]}
+{"review_id": "cL5QHiCqv9PvegPYLcNLXQ", "question_id": 39, "answer1_id": "bLvdcqSwgSCrUPZ8eQLLqb", "answer2_id": "5b3qKj2jtzgcZX7SSipQBW", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a relevant and accurate response, but it was less detailed and comprehensive compared to Assistant 2's answer. Assistant 2 presented a more balanced view of the debate, discussing the arguments for and against space exploration and the moral implications of each side. Additionally, Assistant 2's response was more structured and provided a clearer conclusion, making it easier to understand the complexity of the issue.", "score": [7.0, 9.0]}
+{"review_id": "n8hSXAU9A2ATfJcLpdYsbV", "question_id": 40, "answer1_id": "8tx5GEHjSRKZRaZwHvLoM8", "answer2_id": "hG6i3Q8F7bA7u73vo6t2XZ", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided helpful and relevant answers to the question. Assistant 1 focused on the complexity of the relationship between job creation and technological progress, emphasizing the need for balance and considering various factors. Assistant 2 also emphasized the importance of balance but provided more concrete examples of how automation can create new jobs and improve society. Assistant 2's answer was slightly more detailed and offered a clearer perspective on the potential benefits of automation, which is why it received a higher score.", "score": [8.0, 9.0]}
+{"review_id": "7C29a5tM32oHhEQCuqCGZt", "question_id": 41, "answer1_id": "J6TzFQAyiHs6Arijqwxtrm", "answer2_id": "KaZUZ4NAnQ3SY6mq2QY3Kt", "reviewer_id": 1, "metadata": {}, "text": "7 5\nAssistant 1 provided a more accurate and detailed response, considering the average life expectancy and giving a range of blinks per day (20,000 to 30,000) to estimate the total number of blinks in a lifetime. The calculations were clear and easy to follow. However, the answer could be improved by providing a more precise estimate and considering the global average life expectancy rather than just the United States.\n\nAssistant 2's response was less accurate and contained some calculation errors. The initial calculation of the number of seconds in a human lifetime was incorrect, as it should have been multiplied by the number of seconds in a day, not divided. Additionally, the method of calculating the total number of blinks in a lifetime was convoluted and confusing, leading to an incorrect final result. The answer could be improved by correcting the calculations and providing a clearer explanation.", "score": [7.0, 5.0]}
+{"review_id": "JvM4CKLAWgQqNizs7nm7m7", "question_id": 42, "answer1_id": "f7xAtd3wwc4vP7WSnHffAN", "answer2_id": "Jwrdb34BnrswtwbeSd6xxP", "reviewer_id": 1, "metadata": {}, "text": "6 4\nAssistant 1's answer is more relevant and accurate in terms of addressing the question about the number of atoms in a grain of salt. However, it lacks a detailed explanation of how the numbers were derived. Assistant 2's answer, on the other hand, is not relevant to the question, as it focuses on the weight of a grain of salt rather than the number of atoms. The level of detail in Assistant 2's answer is higher, but it does not address the user's question. Therefore, Assistant 1 receives a score of 6, while Assistant 2 receives a score of 4.", "score": [6.0, 4.0]}
+{"review_id": "9qeRpLBoa6LRtcCCUQHtuA", "question_id": 43, "answer1_id": "eRMyp4AbfgBqtbVZzHupFN", "answer2_id": "Ubz2V2EhkWmNYPEErdX8gn", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided an answer that was accurate but lacked a detailed explanation of how the number of lightning strikes was determined. The answer also contained an error, stating that there are roughly 50 million strikes per day, which contradicts the information provided earlier in the response. Assistant 2, on the other hand, provided a more detailed and accurate explanation, discussing the factors that influence the number of lightning strikes, the methods used to determine the number of strikes, and the importance of lightning in the Earth's ecosystem. Assistant 2's answer was more helpful, relevant, and detailed, which is why it received a higher score.", "score": [7.0, 9.0]}
+{"review_id": "LyaeedUdytWvA4vJnU3FvL", "question_id": 44, "answer1_id": "kjKb6YG86FfAh22qCePV4V", "answer2_id": "CufihrnQT3GRic3Vr2rfU2", "reviewer_id": 1, "metadata": {}, "text": "4 7\nAssistant 1's answer contains incorrect calculations and assumptions, leading to an inaccurate conclusion. The calculation of the volume of a balloon and the weight of helium is incorrect, and the final conclusion does not make sense. Assistant 2's answer, while not perfect, provides a more detailed and step-by-step explanation of the reasoning and calculations. The answer is more accurate and relevant to the question, although it still contains some inaccuracies and assumptions. Overall, Assistant 2's answer is more helpful and informative than Assistant 1's answer.", "score": [4.0, 7.0]}
+{"review_id": "JMwszXCUAW3oBwaE6F75BF", "question_id": 45, "answer1_id": "ULtkGu6m6tB4tawFStVdJM", "answer2_id": "fBBFkmgfGSiPtQtBdsVxpZ", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a specific number of text messages sent per minute (16.3 million) and cited a 2016 study, but the answer lacks a clear step-by-step explanation of how this number was derived. Assistant 2, on the other hand, acknowledged the difficulty in determining the exact number and provided a more detailed explanation of the factors that contribute to the high number of text messages sent worldwide. Although Assistant 2 did not provide a specific number for text messages sent per minute, the answer was more comprehensive and informative, which is why it receives a higher score.", "score": [7.0, 9.0]}
+{"review_id": "N5TsbDUjFGKsfqj9yYarMu", "question_id": 46, "answer1_id": "C6hkCnJZmfCGvnr5an2jFh", "answer2_id": "eQmVTyJQmoBKrZ9J38EXBC", "reviewer_id": 1, "metadata": {}, "text": "4 7\nAssistant 1's answer is confusing and contains inaccuracies, such as stating that the total number of words in the English language is approximately 1,025,109,500, which is incorrect. The answer also focuses solely on the English language, which is not representative of the entire world's population. Assistant 2's answer, on the other hand, provides a more reasonable approach to the question by considering the number of languages spoken on Earth, the world's population, and an estimated average number of words spoken per day. Although Assistant 2's answer does not provide an exact number, it acknowledges the difficulty in providing a precise estimate and offers a rough calculation based on reasonable assumptions.", "score": [4.0, 7.0]}
+{"review_id": "UF4wqSNSTikYMhMh48RQbd", "question_id": 47, "answer1_id": "DApBTLuiJwSsfQHTioH9PY", "answer2_id": "4JYtxzTN9oUTkVrFy7CDe6", "reviewer_id": 1, "metadata": {}, "text": "4 8\nAssistant 1's answer is not helpful, as it provides an incorrect and unrealistic estimation of the number of snowflakes that fall during a typical winter. The answer also lacks a clear explanation of the reasoning behind the estimation. On the other hand, Assistant 2's answer is more helpful, as it explains the factors that affect the formation and number of snowflakes, such as temperature, humidity, and the presence of particles in the air. Although Assistant 2 does not provide a specific number, the answer acknowledges the difficulty in estimating the number of snowflakes and provides a general range of snowfall per square meter per hour. The answer is relevant, accurate, and detailed, making it more informative and useful for the reader.", "score": [4.0, 8.0]}
+{"review_id": "hbP6McDx8B7PhdeReV4Zbu", "question_id": 48, "answer1_id": "gmy58biNVuneSpFZTcpXtT", "answer2_id": "M7KhQtwQrueKKwuemeJHUb", "reviewer_id": 1, "metadata": {}, "text": "6 8\nAssistant 1's answer starts with an estimate of the number of published books but does not provide any further calculations or reasoning to estimate the total number of pages. Assistant 2's answer, on the other hand, provides a more detailed explanation, using the estimated number of books published each year and an average length of 250 pages per book to arrive at an approximate number of pages. While both answers acknowledge the difficulty in determining an exact number, Assistant 2's answer is more helpful, relevant, and detailed, which is why it receives a higher score.", "score": [6.0, 8.0]}
+{"review_id": "BrXYnG65tBH2y55LXe8eaJ", "question_id": 49, "answer1_id": "Cpi6LBB86W73APEA8naNuj", "answer2_id": "AuuvYW4edaZy5wpL5QyppA", "reviewer_id": 1, "metadata": {}, "text": "4 7\nAssistant 1's answer is confusing and contains irrelevant calculations, making it difficult to follow and understand. The final answer of 4.7 billion orbits is also incorrect. Assistant 2's answer is clearer and more relevant, although it still contains some inaccuracies. The final answer of 200,000 orbits is closer to the correct answer, but it is still incorrect. The explanation provided by Assistant 2 is more detailed and easier to follow, which is why it receives a higher score.", "score": [4.0, 7.0]}
+{"review_id": "LjeASAY8Bc4PEvFhiZoZxa", "question_id": 50, "answer1_id": "3g83CoAFWrDD6V6LyEpoMV", "answer2_id": "TJpkA2j3kC9akcHtmVQKUF", "reviewer_id": 1, "metadata": {}, "text": "3 8\nAssistant 1's answer is highly inaccurate and seems to be based on arbitrary assumptions, resulting in an implausible number of 36,000 songs recorded throughout history. The reasoning provided is not logical and does not take into account the vast history of music and the exponential growth of recorded music in the 20th and 21st centuries.\n\nOn the other hand, Assistant 2's answer acknowledges the difficulty in determining the exact number of songs recorded throughout history and provides a more reasonable estimation of tens of thousands of songs in existence today, with hundreds of thousands more recorded throughout history. The answer also takes into account the evolution of music, the impact of recording technology, and the fact that songs can be re-recorded and covered by other artists. This response is more helpful, relevant, and detailed, which is why it receives a higher score of 8.", "score": [3.0, 8.0]}
+{"review_id": "YSCXdw9ConTy2JrN3saWjS", "question_id": 51, "answer1_id": "8p5A4gouBgCmbQj5gMZapU", "answer2_id": "9FJsZWNGMNuGKGF4YjQoRM", "reviewer_id": 1, "metadata": {}, "text": "7 8\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the question. Assistant 1 focused on the potential spread of the technology, comparing it to the printing press, while Assistant 2 delved deeper into the possible uses and limitations of the Internet during the Renaissance period. Assistant 2's answer was more detailed and provided a better understanding of how the Internet might have been used during that time, which is why it received a higher score. However, both answers were accurate and relevant to the question.", "score": [7.0, 8.0]}
+{"review_id": "aeAafF8CwowE9N6ML35GtP", "question_id": 52, "answer1_id": "LVWqSxH3Uq6675w7LXH6zP", "answer2_id": "4vN4PKLUJkaYTEEVzYE3DN", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer provides a brief and speculative response about the potential growth and expansion of the Aztec Empire if they had repelled the Spanish conquistadors. However, it lacks depth and does not consider other potential outcomes or factors that could have influenced the Aztec civilization. Assistant 2's answer, on the other hand, acknowledges the uncertainty of the situation and provides a more balanced view of the potential outcomes, considering both the possibility of the Aztecs thriving and the potential for eventual defeat. Assistant 2 also discusses the impact of European ideas and technologies on the Aztec society, providing a more comprehensive and detailed response. Therefore, Assistant 2 receives a higher score for its more nuanced and informative answer.", "score": [7.0, 9.0]}
+{"review_id": "Aj6YBfogYK8D7WxmHmvvXJ", "question_id": 53, "answer1_id": "ffXksqRU3k8CNHqYfshYhj", "answer2_id": "LjnTtmsFEMgFEkYMKE3VRV", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer focused on the current situation and how humans are better equipped to deal with the plague today, which is not relevant to the question about the hypothetical scenario of the Black Death not occurring in the 14th century. Assistant 2's answer, on the other hand, provided a more relevant and detailed response, discussing the possible scenarios that could have occurred if the Black Death had not happened in the 14th century. Assistant 2's answer was more helpful, accurate, and relevant to the user's question, which is why it received a higher score.", "score": [7.0, 9.0]}
+{"review_id": "d5AH4SEUimw83giYCTKUaN", "question_id": 54, "answer1_id": "4L3dWuqvWwg5kELt7p8SVZ", "answer2_id": "9L9SgmcPjkFs3CwuiUJqm9", "reviewer_id": 1, "metadata": {}, "text": "6 9\nAssistant 1's answer is brief and doesn't provide much insight into the potential impact of Newton focusing on biology. It simply states that he wouldn't have discovered the law of gravity, which is true but not very helpful. Assistant 2's answer, on the other hand, is more detailed and explores the possible outcomes of Newton focusing on biology, such as contributions to medicine, genetics, and ecology. It also acknowledges the uncertainty of the hypothetical scenario and highlights the importance of Newton's work in physics. Therefore, Assistant 2's answer is more helpful, relevant, and detailed, earning a higher score.", "score": [6.0, 9.0]}
+{"review_id": "9fSrpTszmv62245Lyt6ZAF", "question_id": 55, "answer1_id": "XfpqViGW5dvauNZPDSF8yv", "answer2_id": "f9pwVc7bbzajLXKsaGsXQ8", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and accurate answer, mentioning the impact of the Beatles on the music world. However, the answer lacked depth and details. Assistant 2, on the other hand, provided a more comprehensive response, exploring various possibilities and potential impacts on the music scene, the Beatles themselves, the music industry, and cultural and social changes. This answer was more helpful, relevant, and detailed, which is why it receives a higher score.", "score": [7.0, 9.0]}
+{"review_id": "6XivLVpPqe6rFoU56gyBsN", "question_id": 56, "answer1_id": "WsuMf9XBGs3dFjfSGKjEzg", "answer2_id": "UfZJcVaZLWkVsipvmDBrdd", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer is concise and provides a general idea of the consequences if Alan Turing had not cracked the Enigma code. However, it lacks depth and detail. Assistant 2's answer, on the other hand, provides a more comprehensive response, discussing the importance of the Enigma code, the potential challenges faced by the Allies, and the possible impact on the duration and outcome of the war. While both answers are relevant and accurate, Assistant 2's answer offers a more detailed and informative response, which is why it receives a higher score.", "score": [7.0, 9.0]}
+{"review_id": "ECG6kwPnVVYw2ycC7amdqj", "question_id": 57, "answer1_id": "5qo3HudLrwrzEV2Px7gYRf", "answer2_id": "K635PkbTDkJTEnDSG68bTh", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and general answer, mentioning the longer and more dangerous route ships would have to take without the Suez Canal. However, the answer lacked depth and detail. Assistant 2, on the other hand, provided a more comprehensive response, discussing the impact on global trade, economic growth, and the potential development of alternative trade routes. Assistant 2 also acknowledged the difficulty in predicting the exact consequences of a world without the Suez Canal, which demonstrates a more nuanced understanding of the topic.", "score": [7.0, 9.0]}
+{"review_id": "LL82DX2sMg5nxfyLXMeb8V", "question_id": 58, "answer1_id": "SRxwJkNCfUaVe4Gb8LPvSK", "answer2_id": "aGEwGV2TQY6snjFdiLkyqZ", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief and general answer, stating that the Maya civilization would have continued to grow, thrive, and develop if it had not collapsed. While this answer is relevant and accurate, it lacks detail and depth. Assistant 2, on the other hand, provided a more comprehensive response, discussing the factors that may have contributed to the collapse, as well as the possible outcomes if the civilization had not collapsed. Assistant 2 also acknowledged the ongoing research and uncertainty surrounding the topic. Therefore, Assistant 2's answer is more helpful, relevant, accurate, and detailed than Assistant 1's answer.", "score": [7.0, 9.0]}
+{"review_id": "3PGSnTk5mNFuiJUo9oWQda", "question_id": 59, "answer1_id": "S54Mmyim3QT5owdhwnNYmB", "answer2_id": "ksnQVXY9SY2NzMxXEmV3Pd", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1 provided a brief background on Christopher Columbus and his discovery of the Americas, but did not directly address the question of what would have happened if he had not discovered the Americas. Assistant 2, on the other hand, directly addressed the question and provided a more detailed analysis of the potential consequences of Columbus not discovering the Americas. Assistant 2's answer was more helpful, relevant, and detailed in addressing the user's question.", "score": [7.0, 9.0]}
+{"review_id": "7hW9CaMFfSHULgQVaJFxxT", "question_id": 60, "answer1_id": "8DJpwEFb5jqt3vMj5oNqVX", "answer2_id": "GZpcvXQyrKUHsgWpzS7Hpo", "reviewer_id": 1, "metadata": {}, "text": "6 9\nAssistant 1's answer provides some background information on van Gogh's life but does not directly address the question of what might have happened if he had been successful during his lifetime. Assistant 2's answer, on the other hand, directly addresses the question and provides a thoughtful analysis of the possible outcomes of van Gogh's success, while also acknowledging the uncertainty of such a hypothetical scenario. Assistant 2's answer is more helpful, relevant, and detailed, which is why it receives a higher score.", "score": [6.0, 9.0]}
+{"review_id": "YvsUg3io7zqtitXv2CGyYr", "question_id": 61, "answer1_id": "KMtqqYkDmyyThkvxXJ4kJd", "answer2_id": "Czx3sfkw8C4md9YJrhs3gu", "reviewer_id": 2, "metadata": {}, "text": "6 8\n\nAssistant 1:\nStrengths:\n- The answer provides a step-by-step approach to solving the problem, which can be helpful for someone who is new to programming.\n\nWeaknesses:\n- The answer does not provide a complete code example.\n- The answer suggests using the strstr function, which is not the most efficient or accurate way to count occurrences of a word in a line.\n\nSuggestions for improvement:\n- Provide a complete code example.\n- Use the find function from the string class instead of strstr for a more accurate and efficient solution.\n\nAssistant 2:\nStrengths:\n- The answer provides a complete code example that is easy to understand.\n- The code uses the find function from the string class, which is a more accurate and efficient way to count occurrences of a word in a line.\n\nWeaknesses:\n- The code does not ask the user for the word to search for, and the word variable is not initialized with a value.\n- The code does not count multiple occurrences of the word in a single line.\n\nSuggestions for improvement:\n- Ask the user for the word to search for and initialize the word variable with the user's input.\n- Modify the loop to count multiple occurrences of the word in a single line.", "score": [6.0, 8.0]}
+{"review_id": "4J6FjknfHm2y3Kfaer8AuR", "question_id": 62, "answer1_id": "MkbgwTpP5b79NWdPEnvHyq", "answer2_id": "G7QjnFhVnsMPMN9UES8mTV", "reviewer_id": 2, "metadata": {}, "text": "4 6\n\nAssistant 1 provided a good explanation of the longest common subsequence problem but did not provide any code implementation. They could improve by actually implementing the solution in Python.\n\nAssistant 2 provided a code implementation, but it is incorrect. The code does not properly implement dynamic programming, and the break statement in the else block is unnecessary. The function also returns the length of the LCS, not the LCS itself. Here is a corrected version of the code:\n\n```python\ndef longest_common_subsequence(str1, str2):\n m, n = len(str1), len(str2)\n dp = [[0] * (n+1) for _ in range(m+1)]\n\n for i in range(1, m+1):\n for j in range(1, n+1):\n if str1[i-1] == str2[j-1]:\n dp[i][j] = dp[i-1][j-1] + 1\n else:\n dp[i][j] = max(dp[i-1][j], dp[i][j-1])\n\n lcs = \"\"\n i, j = m, n\n while i > 0 and j > 0:\n if str1[i-1] == str2[j-1]:\n lcs = str1[i-1] + lcs\n i -= 1\n j -= 1\n elif dp[i-1][j] > dp[i][j-1]:\n i -= 1\n else:\n j -= 1\n\n return lcs\n```\n\nAssistant 2 could improve by testing their code and ensuring it correctly solves the problem. Additionally, they should provide more comments to explain the logic and functionality of the code.", "score": [4.0, 6.0]}
+{"review_id": "bF4sGnoYBAjsm8MrZXDmUc", "question_id": 63, "answer1_id": "PSb6r567shkRasPkJagvWi", "answer2_id": "RHqehPQpeXkN9d6NHkVGxU", "reviewer_id": 2, "metadata": {}, "text": "6 8\n\nAssistant 1:\nStrengths:\n- The provided regular expression is mostly accurate and covers a wide range of email formats.\n- The explanation of the difference between re.match and re.search is helpful.\n\nWeaknesses:\n- The code is incomplete and contains syntax errors, such as missing an else statement and not importing the re module.\n- The regular expression does not account for uppercase letters, which are valid in email addresses.\n- The answer could be more concise and better structured.\n\nSuggestions for improvement:\n- Complete the code examples and fix syntax errors.\n- Modify the regular expression to include uppercase letters.\n- Provide a more concise and structured answer.\n\nAssistant 2:\nStrengths:\n- The provided regular expression is accurate and covers a wide range of email formats.\n- The code is complete, well-structured, and easy to understand.\n- The answer includes clear explanations and examples.\n\nWeaknesses:\n- The regular expression does not account for uppercase letters, which are valid in email addresses.\n\nSuggestions for improvement:\n- Modify the regular expression to include uppercase letters.", "score": [6.0, 8.0]}
+{"review_id": "UdxG6QjixaQKzNu29DjquR", "question_id": 64, "answer1_id": "QWpBq9gM4iYmsTLQ3NNnBe", "answer2_id": "aqYsaowQtr5rYU6iDbRj5x", "reviewer_id": 2, "metadata": {}, "text": "6 8\nAssistant 1 provided an explanation of the Fibonacci sequence and the problem statement but did not provide any code implementation. The explanation was clear and concise, but the lack of code makes it difficult to evaluate their coding abilities.\n\nAssistant 2 provided a Python implementation of the problem using dynamic programming. The code is accurate and efficient, and it includes comments that explain the logic and functionality. The code structure is easy to understand and adheres to proper coding standards. However, the code does not handle multiple test cases as mentioned in the problem statement. To improve the code, Assistant 2 should add functionality to handle multiple test cases and read input values for 't' and 'n'.", "score": [6.0, 8.0]}
+{"review_id": "8bnxnWLD9Moa3pXgfZL5qC", "question_id": 65, "answer1_id": "U3QxoC4DxXoggnLRbgJh8b", "answer2_id": "fimg24Eg23yjtVd7pYfo8A", "reviewer_id": 2, "metadata": {}, "text": "4 10\n\nAssistant 1's feedback:\nStrengths:\n- Acknowledged the problem statement.\n\nWeaknesses:\n- Did not provide any code implementation.\n- Did not provide any explanation or logic for solving the problem.\n\nSuggestions for improvement:\n- Provide a complete code implementation for the problem.\n- Explain the logic and steps involved in solving the problem.\n- Include comments in the code to make it more readable and understandable.\n\nAssistant 2's feedback:\nStrengths:\n- Provided a complete and correct code implementation for the binary search algorithm.\n- The code is efficient and accurate.\n- Included an example usage of the function.\n- The answer is well-structured and easy to understand.\n\nWeaknesses:\n- None.\n\nSuggestions for improvement:\n- The code is already well-written and efficient. No major improvements needed.", "score": [4.0, 10.0]}
+{"review_id": "BQWyN8WzgdogniLr6xhHbF", "question_id": 66, "answer1_id": "f3TXCreQPJ5Erd6BGt4SFE", "answer2_id": "7bsZbZ9E9jjwgGhnTiTyWS", "reviewer_id": 2, "metadata": {}, "text": "4 7\n\nAssistant 1's submission does not provide any code, but rather a high-level description of how to implement a queue using two stacks. The description is not entirely accurate, as it suggests using a third stack, which is unnecessary. The explanation is also not very clear and could be improved.\n\nAssistant 2's submission provides a working implementation of a queue using two stacks. However, the code has some issues. The `enqueue` method checks if the item is equal to the rear, which is unnecessary. The `dequeue` method does not properly handle the case when `stack2` is empty. The code could be improved by removing the unnecessary checks and handling the case when `stack2` is empty. Here's an updated version of the code:\n\n```python\nclass Queue:\n def __init__(self):\n self.stack1 = []\n self.stack2 = []\n\n def enqueue(self, item):\n self.stack1.append(item)\n\n def dequeue(self):\n if not self.stack2:\n while self.stack1:\n self.stack2.append(self.stack1.pop())\n return self.stack2.pop() if self.stack2 else None\n```\n\nThis version of the code is more concise and handles the case when `stack2` is empty correctly. It also removes the unnecessary checks for the front and rear pointers.", "score": [4.0, 7.0]}
+{"review_id": "BK4HFmdLxYdDgvFGXyyPfT", "question_id": 67, "answer1_id": "SG7YcT8s4sJNBRqidKk7Z7", "answer2_id": "TMYj69PfzLFm6S8ahKbg5J", "reviewer_id": 2, "metadata": {}, "text": "4 6\n\nAssistant 1:\nStrengths:\n- The answer provides a high-level approach to solving the problem.\n\nWeaknesses:\n- The answer does not provide any actual code implementation.\n- The answer suggests using an extra data structure (accumulated array), which is not allowed by the problem statement.\n\nSuggestions for improvement:\n- Provide a complete code implementation that adheres to the problem statement's constraints.\n- Avoid using extra data structures, as required by the problem statement.\n\nAssistant 2:\nStrengths:\n- The answer provides a step-by-step approach to solving the problem.\n- The answer adheres to the problem statement's constraints by not using extra data structures.\n\nWeaknesses:\n- The answer does not provide any actual code implementation.\n- The answer could be more concise and clear in its explanation.\n\nSuggestions for improvement:\n- Provide a complete code implementation that adheres to the problem statement's constraints.\n- Improve the clarity and conciseness of the explanation.", "score": [4.0, 6.0]}
+{"review_id": "65puCiRvUN3gKJ7HZ5USUY", "question_id": 68, "answer1_id": "ENsG3zoeUY7ArFo2kNaQbz", "answer2_id": "NDXxGeVFruYsJ3NZ7zjJ3n", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\nf(x) = 5x^3 - 2x + 3\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 5(8) - 4 + 3\nf(2) = 40 - 4 + 3\nf(2) = 39\n\nNow, I will evaluate the answers provided by Assistant 1 and Assistant 2:\n\nAssistant 1's Answer:\nf(2) = 26\n\nThis answer is incorrect. It seems that Assistant 1 did not show any problem-solving steps, so it's hard to determine where the mistake was made.\n\nAssistant 2's Answer:\nf(2) = 5(2)^3 - 2(2) + 3\nf(2) = 10 - 2 + 3\nf(2) = 10 - 5 + 3\nf(2) = 2\n\nThis answer is also incorrect. Assistant 2 made a mistake in the first step of the calculation, where they should have calculated 5(2)^3 as 5(8) = 40, not 10.\n\nScores:\nAssistant 1: 1/10 (incorrect answer, no problem-solving steps)\nAssistant 2: 4/10 (incorrect answer, but showed problem-solving steps with one mistake)\n\nIn this case, Assistant 2 performed better than Assistant 1, as they provided problem-solving steps, but both assistants provided incorrect answers.\n\nThe correct tuple output is (1, 4).", "score": [1, 4]}
+{"review_id": "A2nW8mSXsuA25Kj75fj7qt", "question_id": 69, "answer1_id": "C7CN5AYJhGC7iDgS7rciDQ", "answer2_id": "g3NBgumbbZBxnCMCPj7kof", "reviewer_id": 3, "metadata": {}, "text": "First, I will solve the problem independently:\n\n3x + 10 = 5(x - 2)\n3x + 10 = 5x - 10\n10 + 10 = 5x - 3x\n20 = 2x\nx = 10\n\nNow, I will examine the problem-solving process of Assistant 1 and Assistant 2 step-by-step:\n\nAssistant 1:\n3x + 10 = 5x - 10x + 20 = 20 + 10x = 30x = 10.\n\nAssistant 1's steps are incorrect and confusing. It seems like they tried to combine terms in an incorrect manner, leading to an incorrect result.\n\nAssistant 2:\n3x + 10 = 5(x - 2)\n3x + 10 - 5(x - 2) = 0\n3x - 5x + 10 - 5(x - 2) = 0\n2x - x + 10 - 5(x - 2) = 0\nx - x + 10 - 5(x - 2) = 0\n3x - 2x + 10 - 5(x - 2) = 0\n-x + 2x + 10 - 5x = 0\n-x = 2x + 10 - 5x\n-x = 7x/7\n\nAssistant 2's steps are also incorrect. They made mistakes in the process of subtracting 5(x - 2) from both sides of the equation, and their subsequent steps are confusing and incorrect.\n\nScores:\nAssistant 1: 1/10\nAssistant 2: 1/10\n\nBoth assistants performed poorly in solving the problem, and neither provided a correct solution.", "score": [1, 1]}
+{"review_id": "f689S6bcrnF9pydBjBodvb", "question_id": 70, "answer1_id": "QP9CmZKwhu2BrUmzhy8tH4", "answer2_id": "jwp2XMxq8PTKTJiZCATHnG", "reviewer_id": 3, "metadata": {}, "text": "First, let's solve the problem independently. To find the length of a line segment with endpoints (x1, y1) and (x2, y2), we can use the distance formula:\n\nLength = sqrt((x2 - x1)^2 + (y2 - y1)^2)\n\nIn this case, the endpoints are (2, -2) and (10, 4). Plugging these values into the formula, we get:\n\nLength = sqrt((10 - 2)^2 + (4 - (-2))^2)\nLength = sqrt(8^2 + 6^2)\nLength = sqrt(64 + 36)\nLength = sqrt(100)\nLength = 10 units\n\nNow, let's evaluate the answers provided by Assistant 1 and Assistant 2.\n\nAssistant 1's Answer:\nThe length of the line segment is 10 units.\nThis answer is correct.\n\nAssistant 2's Answer:\nThe length of a line segment can be found by subtracting the x-coordinate of one endpoint from the x-coordinate of the other endpoint. In this case, the x-coordinate of the first endpoint is 2 and the x-coordinate of the second endpoint is 10, so the length of the line segment is:\n\nLength = 10 - 2 = 8 units.\nThis answer is incorrect. Assistant 2 only considered the difference in x-coordinates, which is not the correct method for finding the length of a line segment.\n\nScores:\nAssistant 1: 10 (Correct answer and method)\nAssistant 2: 2 (Incorrect answer and method)\n\nAssistant 1 performed better in this case.", "score": [10, 2]}
+{"review_id": "CZJoGw7Jj5AaLPAeDnbhYu", "question_id": 71, "answer1_id": "2KtpJeRqEfbPLwdUxAXWrj", "answer2_id": "B6CrSifSJDEHUYiBaiHELR", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a list of tips for writing a formal email, which is helpful but not as precise as the user's request. Assistant 2, on the other hand, provided a complete and detailed example of a formal email that directly addresses the user's question. The email is well-structured, clear, and includes all the necessary information for proposing a joint venture. Assistant 2's response is more helpful, relevant, and accurate, which is why it receives a higher score.", "score": [8.0, 10.0]}
+{"review_id": "GLw29X2fp49dKnMnCKBtVK", "question_id": 72, "answer1_id": "gsL3R2grTXUiHdeDSk8f9Q", "answer2_id": "932DTWDKnBbQDQYTW6be87", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a helpful starting point for a resignation letter, but Assistant 2 went above and beyond by providing a more detailed and complete template that the user can easily customize. Assistant 2's answer was more comprehensive and covered all the necessary elements of a resignation letter, making it more useful for the user.", "score": [8.0, 10.0]}
+{"review_id": "Pd2yKvuRWN42cm9ioecPGE", "question_id": 73, "answer1_id": "HprPMoUk9JY5TtToRbuMwy", "answer2_id": "jVEfncaU9LitqggtYBGuik", "reviewer_id": 1, "metadata": {}, "text": "7 10\nAssistant 1 provided a brief and general outline of a recommendation letter, which is helpful but lacks the level of detail and specificity needed for a formal letter. Assistant 2, on the other hand, provided a comprehensive and well-structured example of a formal letter of recommendation, including all necessary components and addressing the specific context of a computer science graduate program. The response from Assistant 2 is more helpful, relevant, and detailed, which is why it receives a higher score.", "score": [7.0, 10.0]}
+{"review_id": "TmdoUTcBj6PJT5JwnLBN9u", "question_id": 74, "answer1_id": "YU7gDhmo4LDVMTEZZMRdBC", "answer2_id": "NQSftH48RoRxTKLCqNbPtt", "reviewer_id": 1, "metadata": {}, "text": "0 9\nAssistant 1 provided no answer at all, so it receives a score of 0. Assistant 2's answer was helpful, relevant, accurate, and detailed. It included a subject line, a clear introduction, a list of key features, and a call to action for scheduling a demo. The tone was professional and compelling, making it an effective product launch announcement email. Therefore, Assistant 2 receives a score of 9.", "score": [0.0, 9.0]}
+{"review_id": "BNRZ9NUnwPnaPV6mv5CcY5", "question_id": 75, "answer1_id": "2gNqU2XpsL6yqFn6DMRgHq", "answer2_id": "YeYDnz2kVYAfsz7vCfgLNS", "reviewer_id": 1, "metadata": {}, "text": "4 10\nAssistant 1's response was not helpful as it only asked for the customer's name and did not provide any content for the apology email. On the other hand, Assistant 2's response was relevant, accurate, and detailed, providing a complete draft of an apology email that addressed the customer's issue and offered reassurance. The response was well-structured and professional, making it a perfect 10.", "score": [4.0, 10.0]}
+{"review_id": "4hhD8CXrVFqtPn38QNStKF", "question_id": 76, "answer1_id": "a4L6JNPuomkPLdWxJRQpod", "answer2_id": "o2LgSDUwp4b4y4iCteh3RR", "reviewer_id": 1, "metadata": {}, "text": "8 10\nAssistant 1 provided a brief and accurate overview of the history and cultural significance of jazz, but the response lacked the format of a script for a YouTube video. Assistant 2, on the other hand, delivered a well-structured script that not only covered the history and cultural significance of jazz but also included visual and audio cues, making it more suitable for a YouTube video. Assistant 2's answer also provided a more engaging narrative and a better flow, which is why it receives a higher score.", "score": [8.0, 10.0]}
+{"review_id": "dP58FowQJAryiBEQ2Ltrrp", "question_id": 77, "answer1_id": "eN9V2Wf2BNzWoJosqdFw6K", "answer2_id": "CBvZQ8RJG6AqhUBSoUDbqU", "reviewer_id": 1, "metadata": {}, "text": "2 9\nAssistant 1's response did not provide any content or answer to the question, making it unhelpful and irrelevant. On the other hand, Assistant 2's response was engaging, relevant, and detailed, covering cultural experiences, must-see attractions, and adventurous activities in Hawaii. The response was well-structured and provided a comprehensive overview of a trip to Hawaii, making it deserving of a high score.", "score": [2.0, 9.0]}
+{"review_id": "XhHtn66JczMczUHXSD3pdm", "question_id": 78, "answer1_id": "iCzkbWTatKfuC6QVv59XgA", "answer2_id": "VfeFmV9H6SEuotYqEf5ri5", "reviewer_id": 1, "metadata": {}, "text": "7 9\nAssistant 1's answer provided a detailed plot summary, character descriptions, and mentioned the special effects, but it incorrectly stated that the movie was based on \"Ready Player One\" by Ernest Cline and directed by Steven Spielberg, which is not relevant to the given question. Assistant 2's answer, on the other hand, was more captivating and relevant, discussing the plot, characters, and special effects in a concise and engaging manner. Assistant 2's answer also provided the names of the actors, making it more informative.", "score": [7.0, 9.0]}
+{"review_id": "fagwFYoJVmZ4MCfhcFZH24", "question_id": 79, "answer1_id": "GQcFyY8ayFT48DAP5NKNZf", "answer2_id": "ZLYhNQxJXycBZL56RbdGh3", "reviewer_id": 1, "metadata": {}, "text": "4 9\nAssistant 1's response provided a long list of points related to the topic but did not structure it as a podcast script, which was the main requirement of the question. Assistant 2, on the other hand, provided a well-structured podcast script with a clear introduction, interviews with artists and industry insiders, and a conclusion. The response was relevant, detailed, and accurate, making it much more helpful and aligned with the user's request.", "score": [4.0, 9.0]}
+{"review_id": "URnqJT6CEdCs2Pb5k6gEDd", "question_id": 80, "answer1_id": "A6pauLMckn34otnfpeQRFi", "answer2_id": "kZw2ii8HQtrQp4d2bK5cHy", "reviewer_id": 1, "metadata": {}, "text": "8 9\nBoth Assistant 1 and Assistant 2 provided relevant and helpful answers to the user's question. Assistant 1's answer was concise and covered the main aspects of a symphony concert review, such as the orchestra's performance, the pieces played, and the audience's reaction. However, Assistant 2's answer was more detailed and provided a more immersive description of the concert experience, including the venue, conductor, and the energy in the room. This made Assistant 2's answer slightly more engaging and informative, resulting in a higher score.", "score": [8.0, 9.0]}
diff --git a/model/fastchat/eval/table/reviewer.jsonl b/model/fastchat/eval/table/reviewer.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..2f8941a641bcee0307fb2bb40c0365150f9d84fe
--- /dev/null
+++ b/model/fastchat/eval/table/reviewer.jsonl
@@ -0,0 +1,3 @@
+{"reviewer_id": "gpt-4-0328-default", "prompt_id": 1, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for general questions", "category": "general"}
+{"reviewer_id": "gpt-4-0328-coding", "prompt_id": 2, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for coding questions", "category": "coding"}
+{"reviewer_id": "gpt-4-0328-math", "prompt_id": 3, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for math questions", "category": "math"}
diff --git a/model/fastchat/eval/webpage/figures/alpaca.png b/model/fastchat/eval/webpage/figures/alpaca.png
new file mode 100644
index 0000000000000000000000000000000000000000..497a702ab5efb88b8f67333eae81645eecea78cd
Binary files /dev/null and b/model/fastchat/eval/webpage/figures/alpaca.png differ
diff --git a/model/fastchat/eval/webpage/figures/bard.jpg b/model/fastchat/eval/webpage/figures/bard.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5b32cb501799175e3829f92b014795ad1cbee79d
Binary files /dev/null and b/model/fastchat/eval/webpage/figures/bard.jpg differ
diff --git a/model/fastchat/eval/webpage/figures/chatgpt.svg b/model/fastchat/eval/webpage/figures/chatgpt.svg
new file mode 100644
index 0000000000000000000000000000000000000000..8147382a3152de03c24b4cd91f9870ced1a95d54
--- /dev/null
+++ b/model/fastchat/eval/webpage/figures/chatgpt.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/model/fastchat/eval/webpage/figures/llama.jpg b/model/fastchat/eval/webpage/figures/llama.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..7217e5dc1bb683453204a20890f01f5806ce12cf
Binary files /dev/null and b/model/fastchat/eval/webpage/figures/llama.jpg differ
diff --git a/model/fastchat/eval/webpage/figures/swords_FILL0_wght300_GRAD0_opsz48.svg b/model/fastchat/eval/webpage/figures/swords_FILL0_wght300_GRAD0_opsz48.svg
new file mode 100644
index 0000000000000000000000000000000000000000..3bee468d34515fdcbef1a8b8803c9fc4f7dc0b34
--- /dev/null
+++ b/model/fastchat/eval/webpage/figures/swords_FILL0_wght300_GRAD0_opsz48.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/model/fastchat/eval/webpage/figures/vicuna.jpeg b/model/fastchat/eval/webpage/figures/vicuna.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..e7883dc886b96d078883e01aefd16792133e204a
Binary files /dev/null and b/model/fastchat/eval/webpage/figures/vicuna.jpeg differ
diff --git a/model/fastchat/eval/webpage/index.html b/model/fastchat/eval/webpage/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..c2e3cf020ba7d8e064f2cd801788a5d2d50b97da
--- /dev/null
+++ b/model/fastchat/eval/webpage/index.html
@@ -0,0 +1,162 @@
+
+
+
+
+
+ Who's GPT-4's favorite? Battles between State-of-the-Art Chatbots
+
+
+
+
+
+
+
+ 🏔️ Vicuna Evaluation Examples
+
+
+
+
+
+
+
+
Who's GPT-4's favorite? Battles between State-of-the-Art Chatbots
+
+
+
+
+
+
+
+ */10
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ */10
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
This website is co-authored with GPT-4 .
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/model/fastchat/eval/webpage/script.js b/model/fastchat/eval/webpage/script.js
new file mode 100644
index 0000000000000000000000000000000000000000..4b71e3d5618a262e4746f58e5d10947b73370dca
--- /dev/null
+++ b/model/fastchat/eval/webpage/script.js
@@ -0,0 +1,245 @@
+// Description: Script for the evaluation webpage.
+
+let currentQuestionIndex = 1;
+
+// Store the model name mapping for later use.
+modelNameMapping = {
+ "gpt35": "ChatGPT-3.5",
+ "gpt4": "GPT-4",
+ "alpaca": "Alpaca-13b",
+ "vicuna": "Vicuna-13b",
+ "llama": "LLaMA-13b",
+ "bard": "Bard",
+};
+
+modelFigureMapping = {
+ "vicuna": "figures/vicuna.jpeg",
+ // Image from: https://commons.wikimedia.org/wiki/File:ChatGPT_logo.svg
+ "gpt35": "figures/chatgpt.svg",
+ // Image from: https://www.reddit.com/r/logodesign/comments/1128aat/google_ai_bard_logo_design/
+ "bard": "figures/bard.jpg",
+ // Image from: https://crfm.stanford.edu/2023/03/13/alpaca.html
+ "alpaca": "figures/alpaca.png",
+ // Image adapted from https://commons.wikimedia.org/wiki/File:Llama_on_Machu_Picchu.jpg
+ "llama": "figures/llama.jpg",
+}
+
+// Store the question data in a mapping for later use.
+questionMapping = {};
+// Store the question ids in a mapping for later use.
+categoryMapping = {};
+// Store the number of questions for later use.
+questionsCount = 0;
+
+
+function text2Markdown(text) {
+ // Normalize the text for markdown rendering.
+ text = text.trim().replaceAll('\n\n', '\n').replaceAll('\n', '\n\n');
+ return marked.parse(text);
+}
+
+function capitalizeFirstChar(str) {
+ if (!str || str.length === 0) {
+ return str;
+ }
+ return str.charAt(0).toUpperCase() + str.slice(1);
+}
+
+function updateQuestionSelect(question_id) {
+ const select = document.getElementById('question-select');
+ // Clear the question select.
+ select.innerHTML = '';
+ // Populate the question select.
+ category = questionMapping[question_id].category;
+ categoryMapping[category].forEach(question_id => {
+ const question = questionMapping[question_id];
+ const option = document.createElement('option');
+ option.value = question_id;
+ option.textContent = 'Q' + question_id.toString() + ': ' + question.question;
+ select.appendChild(option);
+ });
+ select.value = question_id;
+}
+
+function updateModelSelect() {
+ const select = document.getElementById('model-select');
+ img_path = modelFigureMapping[select.value];
+ document.getElementById('other-model-figure').src = img_path;
+}
+
+function populateModels(models) {
+ const select = document.getElementById('model-select');
+ models.forEach(model => {
+ const option = document.createElement('option');
+ option.value = model;
+ option.textContent = modelNameMapping[model];
+ select.appendChild(option);
+ });
+ updateModelSelect();
+}
+
+function populateQuestions(questions) {
+ const category_select = document.getElementById('category-select');
+
+ questionsCount = questions.length;
+ questions.forEach(question => {
+ const option = document.createElement('option');
+ // Store the question data in a mapping for later use.
+ questionMapping[question.id] = {
+ category: question.category,
+ question: question.question,
+ answers: question.answers,
+ evaluations: question.evaluations,
+ scores: question.scores,
+ };
+ // Store the question id in the category mapping.
+ if (question.category in categoryMapping) {
+ categoryMapping[question.category].push(question.id);
+ } else {
+ categoryMapping[question.category] = [question.id];
+ const category_option = document.createElement('option');
+ category_option.value = question.category;
+ category_option.textContent = capitalizeFirstChar(question.category);
+ category_select.appendChild(category_option);
+ }
+ });
+ // Set the default category.
+ updateQuestionSelect(currentQuestionIndex);
+}
+
+function displayQuestion(index) {
+ const question = questionMapping[index].question;
+ document.getElementById('selected-question').innerHTML = text2Markdown('**Question:** ' + question);
+ displayAnswers(index);
+}
+
+function displayAnswers(index) {
+ const question = questionMapping[index];
+ const otherModel = document.getElementById('model-select').value;
+ // render the answers with markdown
+ document.getElementById('other-model-answer').innerHTML = text2Markdown(question.answers[otherModel]);
+ document.getElementById('our-model-answer').innerHTML = text2Markdown(question.answers.vicuna);
+
+ // Display evaluation
+ score = question.scores[otherModel];
+ score_text = modelNameMapping[otherModel] + " " + score[0] + "/10, Vicuna-13b " + score[1] + "/10";
+ document.getElementById('evaluation-header').textContent = "GPT-4 Evaluation" + " (Score: " + score_text + ")";
+ document.getElementById('evaluation-result').innerHTML = text2Markdown(question.evaluations[otherModel]);
+
+ // Update model names
+ let assistant1_title = "Assistant #1"; // (" + modelNameMapping[otherModel] + ")";
+ let assistant2_title = "Assistant #2 (Vicuna-13b, our model)";
+ // Update scores/labels.
+ let assistant1_score_label = score[0].toString() + '/10';
+ let assistant2_score_label = score[1].toString() + '/10';
+
+ const colorRed ='#fa9'; // '#eb978d';
+ // const colorGreen = '#c9f2c9';
+ const colorBlue = '#8ef'; // '#71dbf9';
+ const colorYellow = '#fe7'; // '#fada57';
+ let otherModelHeaderColor = '';
+ let ourModelHeaderColor = '';
+ // Update the winner.
+ if (score[0] == score[1]) {
+ assistant1_title = '🏆 ' + assistant1_title;
+ assistant1_score_label = '🏆 ' + assistant1_score_label;
+ assistant2_title = '🏆 ' + assistant2_title;
+ assistant2_score_label = '🏆 ' + assistant2_score_label;
+ otherModelHeaderColor = colorYellow;
+ ourModelHeaderColor = colorYellow;
+ } else if (score[0] > score[1]) {
+ assistant1_title = '🏆 ' + assistant1_title;
+ assistant1_score_label = '🏆 ' + assistant1_score_label;
+ otherModelHeaderColor = colorBlue;
+ ourModelHeaderColor = colorRed;
+ } else if (score[0] < score[1]) {
+ assistant2_title = '🏆 ' + assistant2_title;
+ assistant2_score_label = '🏆 ' + assistant2_score_label;
+ otherModelHeaderColor = colorRed;
+ ourModelHeaderColor = colorBlue;
+ }
+
+ document.getElementById('other-model-header-bg').style.backgroundColor = otherModelHeaderColor;
+ document.getElementById('our-model-header').style.backgroundColor = ourModelHeaderColor;
+
+ document.getElementById('other-model-header').textContent = assistant1_title;
+ document.getElementById('our-model-header').textContent = assistant2_title;
+
+ document.getElementById('other-score-label').textContent = assistant1_score_label;
+ document.getElementById('our-score-label').textContent = assistant2_score_label;
+
+ // Update expand buttons visibility for both cards after displaying answers
+ // Reset the expanded state and update expand buttons visibility for both cards after displaying answers
+ document.querySelectorAll('.expandable-card').forEach(card => {
+ card.classList.remove('expanded');
+ updateExpandButtonVisibility(card);
+ const expandBtn = card.querySelector('.expand-btn');
+ expandBtn.innerHTML = 'keyboard_arrow_down Show more'; // .textContent = 'Show more';
+ });
+}
+
+document.getElementById('question-select').addEventListener('change', e => {
+ currentQuestionIndex = parseInt(e.target.value);
+ displayQuestion(currentQuestionIndex);
+});
+
+document.getElementById('category-select').addEventListener('change', e => {
+ let currentCategory = e.target.value;
+ const questionIds = categoryMapping[currentCategory];
+ currentQuestionIndex = questionIds[0];
+ updateQuestionSelect(currentQuestionIndex);
+ displayQuestion(currentQuestionIndex);
+});
+
+// Update expand buttons whenever the model is changed
+document.getElementById('model-select').addEventListener('change', () => {
+ displayAnswers(currentQuestionIndex);
+ document.querySelectorAll('.expandable-card').forEach(card => {
+ updateExpandButtonVisibility(card);
+ });
+ updateModelSelect();
+});
+
+function switchQuestionAndCategory() {
+ document.getElementById('question-select').value = currentQuestionIndex;
+ old_category = document.getElementById('category-select').value;
+ new_category = questionMapping[currentQuestionIndex].category;
+ if (old_category != new_category) {
+ document.getElementById('category-select').value = new_category;
+ updateQuestionSelect(currentQuestionIndex);
+ }
+ displayQuestion(currentQuestionIndex);
+}
+
+document.getElementById('prev-question').addEventListener('click', () => {
+ // Question index starts from 1.
+ currentQuestionIndex = Math.max(1, currentQuestionIndex - 1);
+ switchQuestionAndCategory();
+});
+
+document.getElementById('next-question').addEventListener('click', () => {
+ // Question index starts from 1.
+ currentQuestionIndex = Math.min(questionsCount, currentQuestionIndex + 1);
+ switchQuestionAndCategory();
+});
+
+function updateExpandButtonVisibility(card) {
+ const cardTextContainer = card.querySelector('.card-text-container');
+ const expandBtn = card.querySelector('.expand-btn');
+ if (cardTextContainer.scrollHeight > cardTextContainer.offsetHeight) {
+ expandBtn.style.display = 'flex';
+ } else {
+ expandBtn.style.display = 'none';
+ card.classList.add('expanded');
+ }
+}
+
+document.querySelectorAll('.expand-btn').forEach(btn => {
+ btn.addEventListener('click', e => {
+ const card = e.target.closest('.expandable-card');
+ card.classList.toggle('expanded');
+ const more = 'keyboard_arrow_down Show more';
+ const less = 'keyboard_arrow_up Show less';
+ e.target.innerHTML = card.classList.contains('expanded') ? less : more;
+ });
+});
diff --git a/model/fastchat/eval/webpage/styles.css b/model/fastchat/eval/webpage/styles.css
new file mode 100644
index 0000000000000000000000000000000000000000..7b6d6fc69b336c0a5d103be9fb13a0e0897c76a3
--- /dev/null
+++ b/model/fastchat/eval/webpage/styles.css
@@ -0,0 +1,105 @@
+body {
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
+ background-color: #f8f9fa;
+}
+
+.navbar-dark .navbar-nav .nav-link {
+ color: #f1cf68;
+ font-size: 1.1rem;
+ padding: 0.5rem 0.6rem;
+}
+
+.card-header {
+ font-weight: bold;
+}
+
+.card {
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
+ transition: 0.3s;
+}
+
+.card:hover {
+ box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2);
+}
+
+button {
+ transition: background-color 0.3s;
+}
+
+button:hover {
+ background-color: #007bff;
+}
+
+@media (max-width: 767px) {
+ .form-row .form-group {
+ margin-bottom: 10px;
+ }
+}
+
+/* Extra styles */
+
+.expandable-card .card-text-container {
+ max-height: 200px;
+ overflow-y: hidden;
+ position: relative;
+}
+
+.expandable-card.expanded .card-text-container {
+ max-height: none;
+}
+
+.expand-btn {
+ position: relative;
+ display: none;
+ background-color: rgba(255, 255, 255, 0.8);
+ color: #510c75;
+ border-color: transparent;
+}
+
+.expand-btn:hover {
+ background-color: rgba(200, 200, 200, 0.8);
+ text-decoration: none;
+ border-color: transparent;
+ color: #510c75;
+}
+
+.expand-btn:focus {
+ outline: none;
+ text-decoration: none;
+}
+
+.expandable-card:not(.expanded) .card-text-container:after {
+ content: "";
+ position: absolute;
+ bottom: 0;
+ left: 0;
+ width: 100%;
+ height: 90px;
+ background: linear-gradient(rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 1));
+}
+
+.expandable-card:not(.expanded) .expand-btn {
+ margin-top: -40px;
+}
+
+.card-body {
+ padding-bottom: 5px;
+}
+
+.vertical-flex-layout {
+ justify-content: center;
+ align-items: center;
+ height: 100%;
+ display: flex;
+ flex-direction: column;
+ gap: 5px;
+}
+
+.figure-img {
+ max-width: 100%;
+ height: auto;
+}
+
+.adjustable-font-size {
+ font-size: calc(0.5rem + 2vw);
+}
diff --git a/model/fastchat/model/__init__.py b/model/fastchat/model/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/model/fastchat/model/apply_delta.py b/model/fastchat/model/apply_delta.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e8ff15d4d188888a27bd673c21f466034993a4c
--- /dev/null
+++ b/model/fastchat/model/apply_delta.py
@@ -0,0 +1,165 @@
+"""
+Apply the delta weights on top of a base model.
+
+Usage:
+python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta lmsys/vicuna-7b-delta-v1.1
+"""
+import argparse
+import gc
+import glob
+import json
+import os
+import shutil
+import tempfile
+
+from huggingface_hub import snapshot_download
+import torch
+from torch import nn
+from tqdm import tqdm
+from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
+
+
+GB = 1 << 30
+
+
+def split_files(model_path, tmp_path, split_size):
+ if not os.path.exists(model_path):
+ model_path = snapshot_download(repo_id=model_path)
+ if not os.path.exists(tmp_path):
+ os.makedirs(tmp_path)
+
+ file_pattern = os.path.join(model_path, "pytorch_model-*.bin")
+ files = glob.glob(file_pattern)
+
+ part = 0
+ try:
+ for file_path in tqdm(files):
+ state_dict = torch.load(file_path)
+ new_state_dict = {}
+
+ current_size = 0
+ for name, param in state_dict.items():
+ param_size = param.numel() * param.element_size()
+
+ if current_size + param_size > split_size:
+ new_file_name = f"pytorch_model-{part}.bin"
+ new_file_path = os.path.join(tmp_path, new_file_name)
+ torch.save(new_state_dict, new_file_path)
+ current_size = 0
+ new_state_dict = None
+ gc.collect()
+ new_state_dict = {}
+ part += 1
+
+ new_state_dict[name] = param
+ current_size += param_size
+
+ new_file_name = f"pytorch_model-{part}.bin"
+ new_file_path = os.path.join(tmp_path, new_file_name)
+ torch.save(new_state_dict, new_file_path)
+ new_state_dict = None
+ gc.collect()
+ new_state_dict = {}
+ part += 1
+ except Exception as e:
+ print(f"An error occurred during split_files: {e}")
+ shutil.rmtree(tmp_path)
+ raise
+
+
+def apply_delta_low_cpu_mem(base_model_path, target_model_path, delta_path):
+ base_tokenizer = AutoTokenizer.from_pretrained(base_model_path, use_fast=False)
+ base_config = AutoConfig.from_pretrained(base_model_path)
+
+ if os.path.exists(target_model_path):
+ shutil.rmtree(target_model_path)
+ os.makedirs(target_model_path)
+
+ split_size = 4 * GB
+
+ with tempfile.TemporaryDirectory() as tmp_base_path, tempfile.TemporaryDirectory() as tmp_delta_path:
+ print(f"Split files for the base model to {tmp_base_path}")
+ split_files(base_model_path, tmp_base_path, split_size)
+ print(f"Split files for the delta model to {tmp_delta_path}")
+ split_files(delta_path, tmp_delta_path, split_size)
+
+ base_pattern = os.path.join(tmp_base_path, "pytorch_model-*.bin")
+ base_files = glob.glob(base_pattern)
+ delta_pattern = os.path.join(tmp_delta_path, "pytorch_model-*.bin")
+ delta_files = glob.glob(delta_pattern)
+ delta_state_dict = torch.load(delta_files[0])
+
+ print("Applying the delta")
+ weight_map = {}
+ total_size = 0
+
+ for i, base_file in tqdm(enumerate(base_files)):
+ state_dict = torch.load(base_file)
+ file_name = f"pytorch_model-{i}.bin"
+ for name, param in state_dict.items():
+ if name not in delta_state_dict:
+ for delta_file in delta_files:
+ delta_state_dict = torch.load(delta_file)
+ gc.collect()
+ if name in delta_state_dict:
+ break
+
+ state_dict[name] += delta_state_dict[name]
+ weight_map[name] = file_name
+ total_size += param.numel() * param.element_size()
+ gc.collect()
+ torch.save(state_dict, os.path.join(target_model_path, file_name))
+
+ with open(
+ os.path.join(target_model_path, "pytorch_model.bin.index.json"), "w"
+ ) as f:
+ json.dump(
+ {"weight_map": weight_map, "metadata": {"total_size": total_size}}, f
+ )
+
+ print(f"Saving the target model to {target_model_path}")
+ base_tokenizer.save_pretrained(target_model_path)
+ base_config.save_pretrained(target_model_path)
+
+
+def apply_delta(base_model_path, target_model_path, delta_path):
+ print(f"Loading the base model from {base_model_path}")
+ base = AutoModelForCausalLM.from_pretrained(
+ base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
+ )
+ base_tokenizer = AutoTokenizer.from_pretrained(base_model_path, use_fast=False)
+
+ print(f"Loading the delta from {delta_path}")
+ delta = AutoModelForCausalLM.from_pretrained(
+ delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
+ )
+
+ print("Applying the delta")
+ for name, param in tqdm(base.state_dict().items(), desc="Applying delta"):
+ assert name in delta.state_dict()
+ param.data += delta.state_dict()[name]
+
+ print(f"Saving the target model to {target_model_path}")
+ base.save_pretrained(target_model_path)
+ base_tokenizer.save_pretrained(target_model_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--base-model-path", type=str, required=True)
+ parser.add_argument("--target-model-path", type=str, required=True)
+ parser.add_argument("--delta-path", type=str, required=True)
+ parser.add_argument(
+ "--low-cpu-mem",
+ action="store_true",
+ help="Lower the cpu memory usage. This will split large files and use "
+ "disk as swap to reduce the memory usage below 10GB.",
+ )
+ args = parser.parse_args()
+
+ if args.low_cpu_mem:
+ apply_delta_low_cpu_mem(
+ args.base_model_path, args.target_model_path, args.delta_path
+ )
+ else:
+ apply_delta(args.base_model_path, args.target_model_path, args.delta_path)
diff --git a/model/fastchat/model/apply_lora.py b/model/fastchat/model/apply_lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..870e64a3b4bfb6e44df70bf922bb8e3f6d3c9d42
--- /dev/null
+++ b/model/fastchat/model/apply_lora.py
@@ -0,0 +1,48 @@
+"""
+Apply the LoRA weights on top of a base model.
+
+Usage:
+python3 -m fastchat.model.apply_lora --base ~/model_weights/llama-7b --target ~/model_weights/baize-7b --lora project-baize/baize-lora-7B
+
+Dependency:
+pip3 install git+https://github.com/huggingface/peft.git@2822398fbe896f25d4dac5e468624dc5fd65a51b
+"""
+import argparse
+
+import torch
+from peft import PeftModel
+from transformers import AutoTokenizer, AutoModelForCausalLM
+
+
+def apply_lora(base_model_path, target_model_path, lora_path):
+ print(f"Loading the base model from {base_model_path}")
+ base = AutoModelForCausalLM.from_pretrained(
+ base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
+ )
+ base_tokenizer = AutoTokenizer.from_pretrained(base_model_path, use_fast=False)
+
+ print(f"Loading the LoRA adapter from {lora_path}")
+
+ lora_model = PeftModel.from_pretrained(
+ base,
+ lora_path,
+ torch_dtype=torch.float16,
+ )
+
+ print("Applying the LoRA")
+ model = lora_model.merge_and_unload()
+
+ print(f"Saving the target model to {target_model_path}")
+ model.save_pretrained(target_model_path)
+ base_tokenizer.save_pretrained(target_model_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--base-model-path", type=str, required=True)
+ parser.add_argument("--target-model-path", type=str, required=True)
+ parser.add_argument("--lora-path", type=str, required=True)
+
+ args = parser.parse_args()
+
+ apply_lora(args.base_model_path, args.target_model_path, args.lora_path)
diff --git a/model/fastchat/model/convert_fp16.py b/model/fastchat/model/convert_fp16.py
new file mode 100644
index 0000000000000000000000000000000000000000..efc40aa83bf3a85129a668387df86a41d925f13d
--- /dev/null
+++ b/model/fastchat/model/convert_fp16.py
@@ -0,0 +1,26 @@
+"""
+Usage:
+python3 -m fastchat.model.convert_fp16 --in in-folder --out out-folder
+"""
+import argparse
+
+from transformers import AutoTokenizer, AutoModelForCausalLM
+import torch
+
+
+def convert_fp16(in_checkpoint, out_checkpoint):
+ tokenizer = AutoTokenizer.from_pretrained(in_checkpoint, use_fast=False)
+ model = AutoModelForCausalLM.from_pretrained(
+ in_checkpoint, torch_dtype=torch.float16, low_cpu_mem_usage=True
+ )
+ model.save_pretrained(out_checkpoint)
+ tokenizer.save_pretrained(out_checkpoint)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-checkpoint", type=str, help="Path to the model")
+ parser.add_argument("--out-checkpoint", type=str, help="Path to the output model")
+ args = parser.parse_args()
+
+ convert_fp16(args.in_checkpoint, args.out_checkpoint)
diff --git a/model/fastchat/model/make_delta.py b/model/fastchat/model/make_delta.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebaa2db62e50f7d91ee0d1f9379e704c932b9ec2
--- /dev/null
+++ b/model/fastchat/model/make_delta.py
@@ -0,0 +1,46 @@
+"""
+Make the delta weights by subtracting base weights.
+
+Usage:
+python3 -m fastchat.model.make_delta --base ~/model_weights/llama-13b --target ~/model_weights/vicuna-13b --delta ~/model_weights/vicuna-13b-delta --hub-repo-id lmsys/vicuna-13b-delta-v1.1
+"""
+import argparse
+
+import torch
+from tqdm import tqdm
+from transformers import AutoTokenizer, AutoModelForCausalLM
+
+
+def make_delta(base_model_path, target_model_path, delta_path):
+ print(f"Loading the base model from {base_model_path}")
+ base = AutoModelForCausalLM.from_pretrained(
+ base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
+ )
+
+ print(f"Loading the target model from {target_model_path}")
+ target = AutoModelForCausalLM.from_pretrained(
+ target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
+ )
+
+ print("Calculating the delta")
+ for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"):
+ assert name in base.state_dict()
+ param.data -= base.state_dict()[name]
+
+ print(f"Saving the delta to {delta_path}")
+ if args.hub_repo_id:
+ kwargs = {"push_to_hub": True, "repo_id": args.hub_repo_id}
+ else:
+ kwargs = {}
+ target.save_pretrained(delta_path, **kwargs)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--base-model-path", type=str, required=True)
+ parser.add_argument("--target-model-path", type=str, required=True)
+ parser.add_argument("--delta-path", type=str, required=True)
+ parser.add_argument("--hub-repo-id", type=str)
+ args = parser.parse_args()
+
+ make_delta(args.base_model_path, args.target_model_path, args.delta_path)
diff --git a/model/fastchat/protocol/chat_completion.py b/model/fastchat/protocol/chat_completion.py
new file mode 100644
index 0000000000000000000000000000000000000000..6acf682dc805f5add5aa8eee52200c4d6e92b585
--- /dev/null
+++ b/model/fastchat/protocol/chat_completion.py
@@ -0,0 +1,35 @@
+from typing import Optional, List, Dict, Any
+
+import time
+
+import shortuuid
+from pydantic import BaseModel, Field
+
+
+class ChatCompletionRequest(BaseModel):
+ # TODO: support streaming, stop with a list of text etc.
+ model: str
+ messages: List[Dict[str, str]]
+ temperature: Optional[float] = 0.7
+ n: int = 1
+ max_tokens: Optional[int] = None
+ stop: Optional[str] = None
+
+
+class ChatMessage(BaseModel):
+ role: str
+ content: str
+
+
+class ChatCompletionResponseChoice(BaseModel):
+ index: int
+ message: ChatMessage
+ finish_reason: str
+
+
+class ChatCompletionResponse(BaseModel):
+ id: str = Field(default_factory=shortuuid.random)
+ object: str = "chat.completion"
+ created: int = Field(default_factory=lambda: int(time.time()))
+ choices: List[ChatCompletionResponseChoice]
+ usage: Optional[Dict[str, int]] = None
diff --git a/model/fastchat/serve/__init__.py b/model/fastchat/serve/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/model/fastchat/serve/api.py b/model/fastchat/serve/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5aeb579e5ad76e18c54b2663f2abc1f42d58160
--- /dev/null
+++ b/model/fastchat/serve/api.py
@@ -0,0 +1,206 @@
+"""This module provides a ChatGPT-compatible Restful API for chat completion.
+
+Usage:
+
+python3 -m fastchat.serve.api
+
+Reference: https://platform.openai.com/docs/api-reference/chat/create
+"""
+import asyncio
+from typing import Union, Dict, List, Any
+
+import argparse
+import json
+import logging
+
+import fastapi
+from fastapi.middleware.cors import CORSMiddleware
+import httpx
+import uvicorn
+from pydantic import BaseSettings
+
+from fastchat.protocol.chat_completion import (
+ ChatCompletionRequest,
+ ChatCompletionResponse,
+ ChatMessage,
+ ChatCompletionResponseChoice,
+)
+from fastchat.conversation import get_default_conv_template, SeparatorStyle
+from fastchat.serve.inference import compute_skip_echo_len
+
+logger = logging.getLogger(__name__)
+
+
+class AppSettings(BaseSettings):
+ # The address of the model controller.
+ FASTCHAT_CONTROLLER_URL: str = "http://localhost:21001"
+
+
+app_settings = AppSettings()
+app = fastapi.FastAPI()
+headers = {"User-Agent": "FastChat API Server"}
+
+
+@app.get("/v1/models")
+async def show_available_models():
+ controller_url = app_settings.FASTCHAT_CONTROLLER_URL
+ async with httpx.AsyncClient() as client:
+ ret = await client.post(controller_url + "/refresh_all_workers")
+ ret = await client.post(controller_url + "/list_models")
+ models = ret.json()["models"]
+ models.sort()
+ return {"data": [{"id": m} for m in models], "object": "list"}
+
+
+@app.post("/v1/chat/completions")
+async def create_chat_completion(request: ChatCompletionRequest):
+ """Creates a completion for the chat message"""
+ payload, skip_echo_len = generate_payload(
+ request.model,
+ request.messages,
+ temperature=request.temperature,
+ max_tokens=request.max_tokens,
+ stop=request.stop,
+ )
+
+ choices = []
+ # TODO: batch the requests. maybe not necessary if using CacheFlow worker
+ chat_completions = []
+ for i in range(request.n):
+ content = asyncio.create_task(chat_completion(request.model, payload, skip_echo_len))
+ chat_completions.append(content)
+
+ for i, content_task in enumerate(chat_completions):
+ content = await content_task
+ choices.append(
+ ChatCompletionResponseChoice(
+ index=i,
+ message=ChatMessage(role="assistant", content=content),
+ # TODO: support other finish_reason
+ finish_reason="stop",
+ )
+ )
+
+ # TODO: support usage field
+ # "usage": {
+ # "prompt_tokens": 9,
+ # "completion_tokens": 12,
+ # "total_tokens": 21
+ # }
+ return ChatCompletionResponse(choices=choices)
+
+
+def generate_payload(
+ model_name: str,
+ messages: List[Dict[str, str]],
+ *,
+ temperature: float,
+ max_tokens: int,
+ stop: Union[str, None],
+):
+ is_chatglm = "chatglm" in model_name.lower()
+ # TODO(suquark): The template is currently a reference. Here we have to make a copy.
+ # We use create a template factory to avoid this.
+ conv = get_default_conv_template(model_name).copy()
+
+ # TODO(suquark): Conv.messages should be a list. But it is a tuple now.
+ # We should change it to a list.
+ conv.messages = list(conv.messages)
+
+ for message in messages:
+ msg_role = message["role"]
+ if msg_role == "system":
+ conv.system = message["content"]
+ elif msg_role == "user":
+ conv.append_message(conv.roles[0], message["content"])
+ elif msg_role == "assistant":
+ conv.append_message(conv.roles[1], message["content"])
+ else:
+ raise ValueError(f"Unknown role: {msg_role}")
+
+ # Add a blank message for the assistant.
+ conv.append_message(conv.roles[1], None)
+
+ if is_chatglm:
+ prompt = conv.messages[conv.offset :]
+ else:
+ prompt = conv.get_prompt()
+ skip_echo_len = compute_skip_echo_len(model_name, conv, prompt)
+
+ if stop is None:
+ stop = conv.sep if conv.sep_style == SeparatorStyle.SINGLE else conv.sep2
+
+ # TODO(suquark): We should get the default `max_new_tokens`` from the model.
+ if max_tokens is None:
+ max_tokens = 512
+
+ payload = {
+ "model": model_name,
+ "prompt": prompt,
+ "temperature": temperature,
+ "max_new_tokens": max_tokens,
+ "stop": stop,
+ }
+
+ logger.debug(f"==== request ====\n{payload}")
+ return payload, skip_echo_len
+
+
+async def chat_completion(model_name: str, payload: Dict[str, Any], skip_echo_len: int):
+ controller_url = app_settings.FASTCHAT_CONTROLLER_URL
+ async with httpx.AsyncClient() as client:
+ ret = await client.post(
+ controller_url + "/get_worker_address", json={"model": model_name}
+ )
+ worker_addr = ret.json()["address"]
+ # No available worker
+ if worker_addr == "":
+ raise ValueError(f"No available worker for {model_name}")
+
+ logger.debug(f"model_name: {model_name}, worker_addr: {worker_addr}")
+
+ output = ""
+ delimiter = b"\0"
+ async with client.stream(
+ "POST",
+ worker_addr + "/worker_generate_stream",
+ headers=headers,
+ json=payload,
+ timeout=20,
+ ) as response:
+ content = await response.aread()
+
+ for chunk in content.split(delimiter):
+ if not chunk:
+ continue
+ data = json.loads(chunk.decode())
+ if data["error_code"] == 0:
+ output = data["text"][skip_echo_len:].strip()
+
+ return output
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="FastChat ChatGPT-compatible Restful API server."
+ )
+ parser.add_argument("--host", type=str, default="localhost", help="host name")
+ parser.add_argument("--port", type=int, default=8000, help="port number")
+ parser.add_argument("--allow-credentials", action="store_true", help="allow credentials")
+ parser.add_argument("--allowed-origins", type=json.loads, default=["*"], help="allowed origins")
+ parser.add_argument("--allowed-methods", type=json.loads, default=["*"], help="allowed methods")
+ parser.add_argument("--allowed-headers", type=json.loads, default=["*"], help="allowed headers")
+
+ args = parser.parse_args()
+
+ app.add_middleware(
+ CORSMiddleware,
+ allow_origins=args.allowed_origins,
+ allow_credentials=args.allow_credentials,
+ allow_methods=args.allowed_methods,
+ allow_headers=args.allowed_headers,
+ )
+
+ logger.debug(f"==== args ====\n{args}")
+
+ uvicorn.run("fastchat.serve.api:app", host=args.host, port=args.port, reload=True)
diff --git a/model/fastchat/serve/cacheflow_worker.py b/model/fastchat/serve/cacheflow_worker.py
new file mode 100644
index 0000000000000000000000000000000000000000..f83b2598ec21a1966af182d9f4f5a879614bacd1
--- /dev/null
+++ b/model/fastchat/serve/cacheflow_worker.py
@@ -0,0 +1,330 @@
+"""
+A model worker executes the model based on Cacheflow.
+
+Install Cacheflow first. Then, assuming controller is live:
+1. ray start --head
+2. python3 -m fastchat.serve.cacheflow_worker --model-path path_to_vicuna
+
+launch Gradio:
+3. python3 -m fastchat.serve.gradio_web_server --concurrency-count 10000
+"""
+import argparse
+import asyncio
+import json
+import threading
+import time
+import uuid
+from typing import List, Dict
+
+import requests
+import torch
+import uvicorn
+from fastapi import FastAPI, Request, BackgroundTasks
+from fastapi.responses import StreamingResponse
+from transformers import AutoTokenizer
+
+from cacheflow.master.server import Server, initialize_ray_cluster
+from cacheflow.sampling_params import SamplingParams
+from cacheflow.sequence import Sequence, SequenceGroup
+from cacheflow.utils import Counter, get_gpu_memory, get_cpu_memory
+from fastchat.constants import WORKER_HEART_BEAT_INTERVAL
+from fastchat.utils import build_logger, pretty_print_semaphore
+
+GB = 1 << 30
+TIMEOUT_TO_PREVENT_DEADLOCK = 1 # seconds
+
+worker_id = str(uuid.uuid4())[:6]
+logger = build_logger("model_worker", f"model_worker_{worker_id}.log")
+global_counter = 0
+seed = torch.cuda.current_device()
+
+
+def heart_beat_worker(controller):
+ while True:
+ time.sleep(WORKER_HEART_BEAT_INTERVAL)
+ controller.send_heart_beat()
+
+
+class CacheFlowWorker:
+ def __init__(
+ self,
+ controller_addr,
+ worker_addr,
+ worker_id,
+ no_register,
+ model_path,
+ model_name,
+ block_size,
+ seed,
+ swap_space,
+ max_num_batched_tokens,
+ distributed_init_method,
+ all_stage_devices,
+ ):
+ self.controller_addr = controller_addr
+ self.worker_addr = worker_addr
+ self.worker_id = worker_id
+ if model_path.endswith("/"):
+ model_path = model_path[:-1]
+ self.model_name = model_name or model_path.split("/")[-1]
+
+ logger.info(f"Loading the model {self.model_name} on worker {worker_id} ...")
+ self.block_size = block_size
+
+ # FIXME(Hao): we need to pass the tokenizer into cacheflow because we need
+ # to detect the stopping criteria "###".
+ self.tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
+ self.seq_group_counter = Counter()
+ self.seq_counter = Counter()
+ # FIXME(Hao): hard code context len
+ self.context_len = 2048
+ # pipeline_parallel_size = 1,
+ # tensor_parallel_size = 1,
+ # dtype = torch.float16
+ remote_server_class = Server
+ self.server = remote_server_class(
+ model=self.model_name,
+ model_path=model_path,
+ pipeline_parallel_size=1,
+ tensor_parallel_size=1,
+ block_size=block_size,
+ dtype=torch.float16,
+ seed=seed,
+ swap_space=swap_space,
+ max_num_batched_tokens=max_num_batched_tokens,
+ num_nodes=1,
+ num_devices_per_node=4,
+ distributed_init_method=distributed_init_method,
+ all_stage_devices=all_stage_devices,
+ gpu_memory=get_gpu_memory(),
+ cpu_memory=get_cpu_memory(),
+ )
+ self.running_seq_groups: Dict[int, SequenceGroup] = {}
+ self.sequence_group_events: Dict[int, asyncio.Event] = {}
+ self.is_server_running = False
+
+ if not no_register:
+ self.register_to_controller()
+ self.heart_beat_thread = threading.Thread(
+ target=heart_beat_worker, args=(self,)
+ )
+ self.heart_beat_thread.start()
+
+ def register_to_controller(self):
+ logger.info("Register to controller")
+
+ url = self.controller_addr + "/register_worker"
+ data = {
+ "worker_name": self.worker_addr,
+ "check_heart_beat": True,
+ "worker_status": self.get_status(),
+ }
+ r = requests.post(url, json=data)
+ assert r.status_code == 200
+
+ def send_heart_beat(self):
+ logger.info(
+ f"Send heart beat. Models: {[self.model_name]}. "
+ f"Semaphore: {pretty_print_semaphore(model_semaphore)}. "
+ f"global_counter: {global_counter}"
+ )
+
+ url = self.controller_addr + "/receive_heart_beat"
+
+ while True:
+ try:
+ ret = requests.post(
+ url,
+ json={
+ "worker_name": self.worker_addr,
+ "queue_length": self.get_queue_length(),
+ },
+ timeout=5,
+ )
+ exist = ret.json()["exist"]
+ break
+ except requests.exceptions.RequestException as e:
+ logger.error(f"heart beat error: {e}")
+ time.sleep(5)
+
+ if not exist:
+ self.register_to_controller()
+
+ def get_queue_length(self):
+ if (
+ model_semaphore is None
+ or model_semaphore._value is None
+ or model_semaphore._waiters is None
+ ):
+ return 0
+ else:
+ return (
+ args.limit_model_concurrency
+ - model_semaphore._value
+ + len(model_semaphore._waiters)
+ )
+
+ def get_status(self):
+ return {
+ "model_names": [self.model_name],
+ "speed": 1,
+ "queue_length": self.get_queue_length(),
+ }
+
+ async def server_step(self):
+ self.is_server_running = True
+ updated_seq_groups = self.server.step()
+ self.is_server_running = False
+ # Notify the waiting coroutines that there new outputs ready.
+ for seq_group in updated_seq_groups:
+ group_id = seq_group.group_id
+ self.running_seq_groups[group_id] = seq_group
+ self.sequence_group_events[group_id].set()
+
+ async def generate_stream(self, params):
+ tokenizer = self.tokenizer
+ context = params["prompt"]
+ temperature = float(params.get("temperature", 1.0))
+ max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024)
+ stop_str = params.get("stop", None)
+
+ input_ids = tokenizer(context).input_ids
+ max_src_len = self.context_len - max_new_tokens - 8
+ input_ids = input_ids[-max_src_len:]
+
+ # make sampling params in cacheflow
+ sampling_params = SamplingParams.from_dict(params)
+ sampling_params.stop_token_ids.add(tokenizer.eos_token_id)
+ sampling_params.n = 1
+ sampling_params.max_num_steps = max_new_tokens
+ sampling_params.temperature = temperature
+ if stop_str is not None:
+ sampling_params.stop_str = stop_str
+ # we might sample multiple sequences, but in chatbot, this is one
+ seqs: List[Sequence] = []
+ for _ in range(sampling_params.n):
+ seq_id = next(self.seq_counter)
+ seq = Sequence(seq_id, input_ids, block_size=self.block_size)
+ seqs.append(seq)
+
+ arrival_time = time.time()
+ group_id = next(self.seq_group_counter)
+ # logger.info(f"Group {group_id} arrives at {time.time()}")
+ seq_group = SequenceGroup(group_id, seqs, arrival_time)
+ group_event = asyncio.Event()
+ self.running_seq_groups[group_id] = seq_group
+ self.sequence_group_events[group_id] = group_event
+ self.server.add_sequence_groups([(seq_group, sampling_params)])
+ while True:
+ if not self.is_server_running:
+ await self.server_step()
+ try:
+ await asyncio.wait_for(
+ group_event.wait(), timeout=TIMEOUT_TO_PREVENT_DEADLOCK
+ )
+ except:
+ pass
+ group_event.clear()
+ seq_group = self.running_seq_groups[group_id]
+ all_outputs = []
+ for seq in seq_group.seqs:
+ token_ids = seq.get_token_ids()
+ output = self.tokenizer.decode(token_ids, skip_special_tokens=True)
+ if stop_str is not None:
+ if output.endswith(stop_str):
+ output = output[: -len(stop_str)]
+ all_outputs.append(output)
+ assert len(seq_group.seqs) == 1
+ ret = {
+ "text": all_outputs[0],
+ "error_code": 0,
+ }
+ yield (json.dumps(ret) + "\0").encode("utf-8")
+ if seq_group.is_finished():
+ del self.running_seq_groups[group_id]
+ del self.sequence_group_events[group_id]
+ break
+
+
+app = FastAPI()
+model_semaphore = None
+
+
+def release_model_semaphore():
+ model_semaphore.release()
+
+
+@app.post("/worker_generate_stream")
+async def generate_stream(request: Request):
+ global model_semaphore, global_counter
+ global_counter += 1
+ params = await request.json()
+
+ if model_semaphore is None:
+ model_semaphore = asyncio.Semaphore(args.limit_model_concurrency)
+ await model_semaphore.acquire()
+ background_tasks = BackgroundTasks()
+ background_tasks.add_task(release_model_semaphore)
+ # return StreamingResponse(generator, background=background_tasks)
+ return StreamingResponse(
+ worker.generate_stream(params), background=background_tasks
+ )
+
+
+@app.post("/worker_get_status")
+async def get_status(request: Request):
+ return worker.get_status()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--host", type=str, default="localhost")
+ parser.add_argument("--port", type=int, default=21002)
+ parser.add_argument("--worker-address", type=str, default="http://localhost:21002")
+ parser.add_argument(
+ "--controller-address", type=str, default="http://localhost:21001"
+ )
+ parser.add_argument(
+ "--model-path", type=str, default="/home/haozhang/weights/hf-llama-7b"
+ )
+ parser.add_argument("--model-name", type=str)
+ parser.add_argument("--limit-model-concurrency", type=int, default=1024)
+ parser.add_argument("--stream-interval", type=int, default=2)
+ parser.add_argument("--no-register", action="store_true")
+ # cacheflow specific params
+ parser.add_argument(
+ "--block-size", type=int, default=8, choices=[8, 16], help="token block size"
+ )
+ parser.add_argument(
+ "--swap-space", type=int, default=20, help="CPU swap space size (GiB) per GPU"
+ )
+ parser.add_argument(
+ "--max-num-batched-tokens",
+ type=int,
+ default=2560,
+ help="maximum number of batched tokens",
+ )
+ args = parser.parse_args()
+
+ (
+ num_nodes,
+ num_devices_per_node,
+ distributed_init_method,
+ all_stage_devices,
+ ) = initialize_ray_cluster(pipeline_parallel_size=1, tensor_parallel_size=1)
+
+ worker = CacheFlowWorker(
+ args.controller_address,
+ args.worker_address,
+ worker_id,
+ args.no_register,
+ args.model_path,
+ args.model_name,
+ args.block_size,
+ seed,
+ args.swap_space,
+ args.max_num_batched_tokens,
+ distributed_init_method,
+ all_stage_devices,
+ )
+ uvicorn.run(app, host=args.host, port=args.port, log_level="info")
diff --git a/model/fastchat/serve/cli.py b/model/fastchat/serve/cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb4a485fc2dd2ab2605f5650cc08984912a3f3ce
--- /dev/null
+++ b/model/fastchat/serve/cli.py
@@ -0,0 +1,172 @@
+"""
+Chat with a model with command line interface.
+
+Usage:
+python3 -m fastchat.serve.cli --model ~/model_weights/llama-7b
+"""
+import argparse
+import os
+import re
+
+from prompt_toolkit import PromptSession
+from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
+from prompt_toolkit.completion import WordCompleter
+from prompt_toolkit.history import InMemoryHistory
+from rich.console import Console
+from rich.markdown import Markdown
+from rich.live import Live
+
+from fastchat.serve.inference import chat_loop, ChatIO
+
+
+class SimpleChatIO(ChatIO):
+ def prompt_for_input(self, role) -> str:
+ return input(f"{role}: ")
+
+ def prompt_for_output(self, role: str):
+ print(f"{role}: ", end="", flush=True)
+
+ def stream_output(self, output_stream, skip_echo_len: int):
+ pre = 0
+ for outputs in output_stream:
+ outputs = outputs[skip_echo_len:].strip()
+ outputs = outputs.split(" ")
+ now = len(outputs) - 1
+ if now > pre:
+ print(" ".join(outputs[pre:now]), end=" ", flush=True)
+ pre = now
+ print(" ".join(outputs[pre:]), flush=True)
+ return " ".join(outputs)
+
+
+class RichChatIO(ChatIO):
+ def __init__(self):
+ self._prompt_session = PromptSession(history=InMemoryHistory())
+ self._completer = WordCompleter(
+ words=["!exit", "!reset"], pattern=re.compile("$")
+ )
+ self._console = Console()
+
+ def prompt_for_input(self, role) -> str:
+ self._console.print(f"[bold]{role}:")
+ # TODO(suquark): multiline input has some issues. fix it later.
+ prompt_input = self._prompt_session.prompt(
+ completer=self._completer,
+ multiline=False,
+ auto_suggest=AutoSuggestFromHistory(),
+ key_bindings=None,
+ )
+ self._console.print()
+ return prompt_input
+
+ def prompt_for_output(self, role: str):
+ self._console.print(f"[bold]{role}:")
+
+ def stream_output(self, output_stream, skip_echo_len: int):
+ """Stream output from a role."""
+ # TODO(suquark): the console flickers when there is a code block
+ # above it. We need to cut off "live" when a code block is done.
+
+ # Create a Live context for updating the console output
+ with Live(console=self._console, refresh_per_second=4) as live:
+ # Read lines from the stream
+ for outputs in output_stream:
+ accumulated_text = outputs[skip_echo_len:]
+ if not accumulated_text:
+ continue
+ # Render the accumulated text as Markdown
+ # NOTE: this is a workaround for the rendering "unstandard markdown"
+ # in rich. The chatbots output treat "\n" as a new line for
+ # better compatibility with real-world text. However, rendering
+ # in markdown would break the format. It is because standard markdown
+ # treat a single "\n" in normal text as a space.
+ # Our workaround is adding two spaces at the end of each line.
+ # This is not a perfect solution, as it would
+ # introduce trailing spaces (only) in code block, but it works well
+ # especially for console output, because in general the console does not
+ # care about trailing spaces.
+ lines = []
+ for line in accumulated_text.splitlines():
+ lines.append(line)
+ if line.startswith("```"):
+ # Code block marker - do not add trailing spaces, as it would
+ # break the syntax highlighting
+ lines.append("\n")
+ else:
+ lines.append(" \n")
+ markdown = Markdown("".join(lines))
+ # Update the Live console output
+ live.update(markdown)
+ self._console.print()
+ return outputs[skip_echo_len:]
+
+
+def main(args):
+ if args.gpus:
+ if args.num_gpus and len(args.gpus.split(",")) < int(args.num_gpus):
+ raise ValueError(f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!")
+ os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
+ if args.style == "simple":
+ chatio = SimpleChatIO()
+ elif args.style == "rich":
+ chatio = RichChatIO()
+ else:
+ raise ValueError(f"Invalid style for console: {args.style}")
+ try:
+ chat_loop(
+ args.model_path,
+ args.device,
+ args.num_gpus,
+ args.max_gpu_memory,
+ args.load_8bit,
+ args.conv_template,
+ args.temperature,
+ args.max_new_tokens,
+ chatio,
+ args.debug,
+ )
+ except KeyboardInterrupt:
+ print("exit...")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--model-path",
+ type=str,
+ default="facebook/opt-350m",
+ help="The path to the weights",
+ )
+ parser.add_argument(
+ "--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda"
+ )
+ parser.add_argument(
+ "--gpus",
+ type=str,
+ default=None,
+ help="A single GPU like 1 or multiple GPUs like 0,2"
+ )
+ parser.add_argument("--num-gpus", type=str, default="1")
+ parser.add_argument(
+ "--max-gpu-memory",
+ type=str,
+ help="The maximum memory per gpu. Use a string like '13Gib'",
+ )
+ parser.add_argument(
+ "--load-8bit", action="store_true", help="Use 8-bit quantization."
+ )
+ parser.add_argument(
+ "--conv-template", type=str, default=None, help="Conversation prompt template."
+ )
+ parser.add_argument("--temperature", type=float, default=0.7)
+ parser.add_argument("--max-new-tokens", type=int, default=512)
+ parser.add_argument(
+ "--style",
+ type=str,
+ default="simple",
+ choices=["simple", "rich"],
+ help="Display style.",
+ )
+ parser.add_argument("--debug", action="store_true")
+ args = parser.parse_args()
+ main(args)
diff --git a/model/fastchat/serve/cli_caption.py b/model/fastchat/serve/cli_caption.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ef39879d49b04f60255f472cd26c3dda11c0f56
--- /dev/null
+++ b/model/fastchat/serve/cli_caption.py
@@ -0,0 +1,206 @@
+"""
+Chat with a model with command line interface.
+
+Usage:
+python3 -m fastchat.serve.cli --model ~/model_weights/llama-7b
+"""
+import argparse
+import os
+import re
+
+from prompt_toolkit import PromptSession
+from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
+from prompt_toolkit.completion import WordCompleter
+from prompt_toolkit.history import InMemoryHistory
+from rich.console import Console
+from rich.markdown import Markdown
+from rich.live import Live
+
+from fastchat.serve.inference import ChatIO, question_loop, answer_loop
+
+
+class SimpleChatIO(ChatIO):
+ def prompt_for_input(self, role) -> str:
+ return input(f"{role}: ")
+
+ def prompt_for_output(self, role: str):
+ print(f"{role}: ", end="", flush=True)
+
+ def stream_output(self, output_stream, skip_echo_len: int):
+ pre = 0
+ for outputs in output_stream:
+ outputs = outputs[skip_echo_len:].strip()
+ outputs = outputs.split(" ")
+ now = len(outputs) - 1
+ if now > pre:
+ print(" ".join(outputs[pre:now]), end=" ", flush=True)
+ pre = now
+ print(" ".join(outputs[pre:]), flush=True)
+ return " ".join(outputs)
+
+
+class RichChatIO(ChatIO):
+ def __init__(self):
+ self._prompt_session = PromptSession(history=InMemoryHistory())
+ self._completer = WordCompleter(
+ words=["!exit", "!reset"], pattern=re.compile("$")
+ )
+ self._console = Console()
+
+ def prompt_for_input(self, role) -> str:
+ self._console.print(f"[bold]{role}:")
+ # TODO(suquark): multiline input has some issues. fix it later.
+ prompt_input = self._prompt_session.prompt(
+ completer=self._completer,
+ multiline=False,
+ auto_suggest=AutoSuggestFromHistory(),
+ key_bindings=None,
+ )
+ self._console.print()
+ return prompt_input
+
+ def prompt_for_output(self, role: str):
+ self._console.print(f"[bold]{role}:")
+
+ def stream_output(self, output_stream, skip_echo_len: int):
+ """Stream output from a role."""
+ # TODO(suquark): the console flickers when there is a code block
+ # above it. We need to cut off "live" when a code block is done.
+
+ # Create a Live context for updating the console output
+ with Live(console=self._console, refresh_per_second=4) as live:
+ # Read lines from the stream
+ for outputs in output_stream:
+ accumulated_text = outputs[skip_echo_len:]
+ if not accumulated_text:
+ continue
+ # Render the accumulated text as Markdown
+ # NOTE: this is a workaround for the rendering "unstandard markdown"
+ # in rich. The chatbots output treat "\n" as a new line for
+ # better compatibility with real-world text. However, rendering
+ # in markdown would break the format. It is because standard markdown
+ # treat a single "\n" in normal text as a space.
+ # Our workaround is adding two spaces at the end of each line.
+ # This is not a perfect solution, as it would
+ # introduce trailing spaces (only) in code block, but it works well
+ # especially for console output, because in general the console does not
+ # care about trailing spaces.
+ lines = []
+ for line in accumulated_text.splitlines():
+ lines.append(line)
+ if line.startswith("```"):
+ # Code block marker - do not add trailing spaces, as it would
+ # break the syntax highlighting
+ lines.append("\n")
+ else:
+ lines.append(" \n")
+ markdown = Markdown("".join(lines))
+ # Update the Live console output
+ live.update(markdown)
+ self._console.print()
+ return outputs[skip_echo_len:]
+
+
+def main(args):
+ if args.gpus:
+ if args.num_gpus and len(args.gpus.split(",")) < int(args.num_gpus):
+ raise ValueError(f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!")
+ os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
+ if args.style == "simple":
+ chatio = SimpleChatIO()
+ elif args.style == "rich":
+ chatio = RichChatIO()
+ else:
+ raise ValueError(f"Invalid style for console: {args.style}")
+ try:
+ if args.answer_flag:
+ print("answer loop")
+ answer_loop(
+ args.model_path,
+ args.device,
+ args.num_gpus,
+ args.max_gpu_memory,
+ args.load_8bit,
+ args.conv_template,
+ args.temperature,
+ args.max_new_tokens,
+ chatio,
+ args.debug,
+ args.question_path,
+ args.caption_path,
+ args.data_info_path,
+ args.answer_path,
+ # args.caption_path
+ )
+ else:
+ print("question loop")
+ # detect if the caption.json is already there
+ if os.path.exists(args.caption_path):
+ print("caption.json already exists")
+ # exit(0)
+ question_loop(
+ args.model_path,
+ args.device,
+ args.num_gpus,
+ args.max_gpu_memory,
+ args.load_8bit,
+ args.conv_template,
+ args.temperature,
+ args.max_new_tokens,
+ chatio,
+ args.debug,
+ args.question_path,
+ args.caption_path,
+ # args.caption_path
+ )
+ except KeyboardInterrupt:
+ print("exit...")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--model-path",
+ type=str,
+ default="facebook/opt-350m",
+ help="The path to the weights",
+ )
+ parser.add_argument(
+ "--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda"
+ )
+ parser.add_argument(
+ "--gpus",
+ type=str,
+ default=None,
+ help="A single GPU like 1 or multiple GPUs like 0,2"
+ )
+ parser.add_argument("--num-gpus", type=str, default="1")
+ parser.add_argument(
+ "--max-gpu-memory",
+ type=str,
+ help="The maximum memory per gpu. Use a string like '13Gib'",
+ )
+ parser.add_argument(
+ "--load-8bit", action="store_true", help="Use 8-bit quantization."
+ )
+ parser.add_argument(
+ "--conv-template", type=str, default=None, help="Conversation prompt template."
+ )
+ parser.add_argument("--temperature", type=float, default=0.7)
+ parser.add_argument("--max-new-tokens", type=int, default=512)
+ parser.add_argument(
+ "--style",
+ type=str,
+ default="simple",
+ choices=["simple", "rich"],
+ help="Display style.",
+ )
+ parser.add_argument("--debug", action="store_true")
+ parser.add_argument("--question-path", type=str, default=None)
+ parser.add_argument("--caption-path", type=str, default=None)
+ parser.add_argument("--answer-flag", type=bool, default=False)
+ parser.add_argument("--data-info-path", type=str, default="../Test_frameqa_question-balanced.csv")
+ parser.add_argument("--answer-path", type=str, default="data_processed.json")
+ # parser.add_argument("--prompt-path", type=str, default=None)
+ args = parser.parse_args()
+ main(args)
diff --git a/model/fastchat/serve/compression.py b/model/fastchat/serve/compression.py
new file mode 100644
index 0000000000000000000000000000000000000000..be8363ac34c17b9b354d1daa616536ecfeaaa0ec
--- /dev/null
+++ b/model/fastchat/serve/compression.py
@@ -0,0 +1,138 @@
+import dataclasses
+
+import torch
+from torch import Tensor
+import torch.nn as nn
+from torch.nn import functional as F
+
+
+@dataclasses.dataclass
+class CompressionConfig:
+ """Group-wise quantization."""
+
+ num_bits: int
+ group_size: int
+ group_dim: int
+ symmetric: bool
+ enabled: bool = True
+
+
+default_compression_config = CompressionConfig(
+ num_bits=8, group_size=256, group_dim=1, symmetric=True, enabled=True
+)
+
+
+class CLinear(nn.Module):
+ """Compressed Linear Layer."""
+
+ def __init__(self, weight, bias, device):
+ super().__init__()
+
+ self.weight = compress(weight.data.to(device), default_compression_config)
+ self.bias = bias
+
+ def forward(self, input: Tensor) -> Tensor:
+ weight = decompress(self.weight, default_compression_config)
+ return F.linear(input, weight, self.bias)
+
+
+def compress_module(module, target_device):
+ for attr_str in dir(module):
+ target_attr = getattr(module, attr_str)
+ if type(target_attr) == torch.nn.Linear:
+ setattr(
+ module,
+ attr_str,
+ CLinear(target_attr.weight, target_attr.bias, target_device),
+ )
+ for name, child in module.named_children():
+ compress_module(child, target_device)
+
+
+def compress(tensor, config):
+ """Simulate group-wise quantization."""
+ if not config.enabled:
+ return tensor
+
+ group_size, num_bits, group_dim, symmetric = (
+ config.group_size,
+ config.num_bits,
+ config.group_dim,
+ config.symmetric,
+ )
+ assert num_bits <= 8
+
+ original_shape = tensor.shape
+ num_groups = (original_shape[group_dim] + group_size - 1) // group_size
+ new_shape = (
+ original_shape[:group_dim]
+ + (num_groups, group_size)
+ + original_shape[group_dim + 1 :]
+ )
+
+ # Pad
+ pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
+ if pad_len != 0:
+ pad_shape = (
+ original_shape[:group_dim] + (pad_len,) + original_shape[group_dim + 1 :]
+ )
+ tensor = torch.cat(
+ [tensor, torch.zeros(pad_shape, dtype=tensor.dtype, device=tensor.device)],
+ dim=group_dim,
+ )
+ data = tensor.view(new_shape)
+
+ # Quantize
+ if symmetric:
+ B = 2 ** (num_bits - 1) - 1
+ scale = B / torch.max(data.abs(), dim=group_dim + 1, keepdim=True)[0]
+ data = data * scale
+ data = data.clamp_(-B, B).round_().to(torch.int8)
+ return data, scale, original_shape
+ else:
+ B = 2**num_bits - 1
+ mn = torch.min(data, dim=group_dim + 1, keepdim=True)[0]
+ mx = torch.max(data, dim=group_dim + 1, keepdim=True)[0]
+
+ scale = B / (mx - mn)
+ data = data - mn
+ data.mul_(scale)
+
+ data = data.clamp_(0, B).round_().to(torch.uint8)
+ return data, mn, scale, original_shape
+
+
+def decompress(packed_data, config):
+ """Simulate group-wise dequantization."""
+ if not config.enabled:
+ return packed_data
+
+ group_size, num_bits, group_dim, symmetric = (
+ config.group_size,
+ config.num_bits,
+ config.group_dim,
+ config.symmetric,
+ )
+
+ # Dequantize
+ if symmetric:
+ data, scale, original_shape = packed_data
+ data = data / scale
+ else:
+ data, mn, scale, original_shape = packed_data
+ data = data / scale
+ data.add_(mn)
+
+ # Unpad
+ pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
+ if pad_len:
+ padded_original_shape = (
+ original_shape[:group_dim]
+ + (original_shape[group_dim] + pad_len,)
+ + original_shape[group_dim + 1 :]
+ )
+ data = data.reshape(padded_original_shape)
+ indices = [slice(0, x) for x in original_shape]
+ return data[indices].contiguous()
+ else:
+ return data.view(original_shape)
diff --git a/model/fastchat/serve/controller.py b/model/fastchat/serve/controller.py
new file mode 100644
index 0000000000000000000000000000000000000000..d46eaa29b97a8f89b0fa74b2d8c920b58a65c62e
--- /dev/null
+++ b/model/fastchat/serve/controller.py
@@ -0,0 +1,310 @@
+"""
+A controller manages distributed workers.
+It sends worker addresses to clients.
+"""
+import argparse
+import asyncio
+import dataclasses
+from enum import Enum, auto
+import json
+import logging
+import time
+from typing import List, Union
+import threading
+
+from fastapi import FastAPI, Request
+from fastapi.responses import StreamingResponse
+import numpy as np
+import requests
+import uvicorn
+
+from fastchat.constants import CONTROLLER_HEART_BEAT_EXPIRATION
+from fastchat.utils import build_logger, server_error_msg
+
+
+logger = build_logger("controller", "controller.log")
+
+
+class DispatchMethod(Enum):
+ LOTTERY = auto()
+ SHORTEST_QUEUE = auto()
+
+ @classmethod
+ def from_str(cls, name):
+ if name == "lottery":
+ return cls.LOTTERY
+ elif name == "shortest_queue":
+ return cls.SHORTEST_QUEUE
+ else:
+ raise ValueError(f"Invalid dispatch method")
+
+
+@dataclasses.dataclass
+class WorkerInfo:
+ model_names: List[str]
+ speed: int
+ queue_length: int
+ check_heart_beat: bool
+ last_heart_beat: str
+
+
+def heart_beat_controller(controller):
+ while True:
+ time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION)
+ controller.remove_stable_workers_by_expiration()
+
+
+class Controller:
+ def __init__(self, dispatch_method: str):
+ # Dict[str -> WorkerInfo]
+ self.worker_info = {}
+ self.dispatch_method = DispatchMethod.from_str(dispatch_method)
+
+ self.heart_beat_thread = threading.Thread(
+ target=heart_beat_controller, args=(self,)
+ )
+ self.heart_beat_thread.start()
+
+ logger.info("Init controller")
+
+ def register_worker(
+ self, worker_name: str, check_heart_beat: bool, worker_status: dict
+ ):
+ if worker_name not in self.worker_info:
+ logger.info(f"Register a new worker: {worker_name}")
+ else:
+ logger.info(f"Register an existing worker: {worker_name}")
+
+ if not worker_status:
+ worker_status = self.get_worker_status(worker_name)
+ if not worker_status:
+ return False
+
+ self.worker_info[worker_name] = WorkerInfo(
+ worker_status["model_names"],
+ worker_status["speed"],
+ worker_status["queue_length"],
+ check_heart_beat,
+ time.time(),
+ )
+
+ logger.info(f"Register done: {worker_name}, {worker_status}")
+ return True
+
+ def get_worker_status(self, worker_name: str):
+ try:
+ r = requests.post(worker_name + "/worker_get_status", timeout=5)
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Get status fails: {worker_name}, {e}")
+ return None
+
+ if r.status_code != 200:
+ logger.error(f"Get status fails: {worker_name}, {r}")
+ return None
+
+ return r.json()
+
+ def remove_worker(self, worker_name: str):
+ del self.worker_info[worker_name]
+
+ def refresh_all_workers(self):
+ old_info = dict(self.worker_info)
+ self.worker_info = {}
+
+ for w_name, w_info in old_info.items():
+ if not self.register_worker(w_name, w_info.check_heart_beat, None):
+ logger.info(f"Remove stale worker: {w_name}")
+
+ def list_models(self):
+ model_names = set()
+
+ for w_name, w_info in self.worker_info.items():
+ model_names.update(w_info.model_names)
+
+ return list(model_names)
+
+ def get_worker_address(self, model_name: str):
+ if self.dispatch_method == DispatchMethod.LOTTERY:
+ worker_names = []
+ worker_speeds = []
+ for w_name, w_info in self.worker_info.items():
+ if model_name in w_info.model_names:
+ worker_names.append(w_name)
+ worker_speeds.append(w_info.speed)
+ worker_speeds = np.array(worker_speeds, dtype=np.float32)
+ norm = np.sum(worker_speeds)
+ if norm < 1e-4:
+ return ""
+ worker_speeds = worker_speeds / norm
+ if True: # Directly return address
+ pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds)
+ worker_name = worker_names[pt]
+ return worker_name
+
+ # Check status before returning
+ while True:
+ pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds)
+ worker_name = worker_names[pt]
+
+ if self.get_worker_status(worker_name):
+ break
+ else:
+ self.remove_worker(worker_name)
+ worker_speeds[pt] = 0
+ norm = np.sum(worker_speeds)
+ if norm < 1e-4:
+ return ""
+ worker_speeds = worker_speeds / norm
+ continue
+ return worker_name
+ elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE:
+ worker_names = []
+ worker_qlen = []
+ for w_name, w_info in self.worker_info.items():
+ if model_name in w_info.model_names:
+ worker_names.append(w_name)
+ worker_qlen.append(w_info.queue_length / w_info.speed)
+ if len(worker_names) == 0:
+ return ""
+ min_index = np.argmin(worker_qlen)
+ w_name = worker_names[min_index]
+ self.worker_info[w_name].queue_length += 1
+ logger.info(
+ f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}"
+ )
+ return w_name
+ else:
+ raise ValueError(f"Invalid dispatch method: {self.dispatch_method}")
+
+ def receive_heart_beat(self, worker_name: str, queue_length: int):
+ if worker_name not in self.worker_info:
+ logger.info(f"Receive unknown heart beat. {worker_name}")
+ return False
+
+ self.worker_info[worker_name].queue_length = queue_length
+ self.worker_info[worker_name].last_heart_beat = time.time()
+ logger.info(f"Receive heart beat. {worker_name}")
+ return True
+
+ def remove_stable_workers_by_expiration(self):
+ expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION
+ to_delete = []
+ for worker_name, w_info in self.worker_info.items():
+ if w_info.check_heart_beat and w_info.last_heart_beat < expire:
+ to_delete.append(worker_name)
+
+ for worker_name in to_delete:
+ self.remove_worker(worker_name)
+
+ def worker_api_generate_stream(self, params):
+ worker_addr = self.get_worker_address(params["model"])
+ if not worker_addr:
+ logger.info(f"no worker: {params['model']}")
+ ret = {
+ "text": server_error_msg,
+ "error_code": 2,
+ }
+ yield json.dumps(ret).encode() + b"\0"
+
+ try:
+ response = requests.post(
+ worker_addr + "/worker_generate_stream",
+ json=params,
+ stream=True,
+ timeout=15,
+ )
+ for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
+ if chunk:
+ yield chunk + b"\0"
+ except requests.exceptions.RequestException as e:
+ logger.info(f"worker timeout: {worker_addr}")
+ ret = {
+ "text": server_error_msg,
+ "error_code": 3,
+ }
+ yield json.dumps(ret).encode() + b"\0"
+
+ # Let the controller act as a worker to achieve hierarchical
+ # management. This can be used to connect isolated sub networks.
+ def worker_api_get_status(self):
+ model_names = set()
+ speed = 0
+ queue_length = 0
+
+ for w_name in self.worker_info:
+ worker_status = self.get_worker_status(w_name)
+ if worker_status is not None:
+ model_names.update(worker_status["model_names"])
+ speed += worker_status["speed"]
+ queue_length += worker_status["queue_length"]
+
+ return {
+ "model_names": list(model_names),
+ "speed": speed,
+ "queue_length": queue_length,
+ }
+
+
+app = FastAPI()
+
+
+@app.post("/register_worker")
+async def register_worker(request: Request):
+ data = await request.json()
+ controller.register_worker(
+ data["worker_name"], data["check_heart_beat"], data.get("worker_status", None)
+ )
+
+
+@app.post("/refresh_all_workers")
+async def refresh_all_workers():
+ models = controller.refresh_all_workers()
+
+
+@app.post("/list_models")
+async def list_models():
+ models = controller.list_models()
+ return {"models": models}
+
+
+@app.post("/get_worker_address")
+async def get_worker_address(request: Request):
+ data = await request.json()
+ addr = controller.get_worker_address(data["model"])
+ return {"address": addr}
+
+
+@app.post("/receive_heart_beat")
+async def receive_heart_beat(request: Request):
+ data = await request.json()
+ exist = controller.receive_heart_beat(data["worker_name"], data["queue_length"])
+ return {"exist": exist}
+
+
+@app.post("/worker_generate_stream")
+async def worker_api_generate_stream(request: Request):
+ params = await request.json()
+ generator = controller.worker_api_generate_stream(params)
+ return StreamingResponse(generator)
+
+
+@app.post("/worker_get_status")
+async def worker_api_get_status(request: Request):
+ return controller.worker_api_get_status()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--host", type=str, default="localhost")
+ parser.add_argument("--port", type=int, default=21001)
+ parser.add_argument(
+ "--dispatch-method",
+ type=str,
+ choices=["lottery", "shortest_queue"],
+ default="shortest_queue",
+ )
+ args = parser.parse_args()
+ logger.info(f"args: {args}")
+
+ controller = Controller(args.dispatch_method)
+ uvicorn.run(app, host=args.host, port=args.port, log_level="info")
diff --git a/model/fastchat/serve/gateway/README.md b/model/fastchat/serve/gateway/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b3afaf171bc38b232b68609585244c9e76489da7
--- /dev/null
+++ b/model/fastchat/serve/gateway/README.md
@@ -0,0 +1,57 @@
+# fastchat Nginx Gateway
+
+## Purpose of the Gateway
+
+The Nginx gateway serves the following purposes:
+
+1. Protects Gradio servers by acting as a firewall.
+2. Facilitates dynamic mounting and unmounting of Gradio servers.
+3. Provides load balancing for Gradio servers.
+4. Offers additional security features, such as total connection limit.
+5. Reduces attack surface by requiring only a single public port to be exposed for serving.
+
+## Deployment and Updating of the Gateway
+
+### Installing Nginx
+
+On Debian-based distributions (e.g., Ubuntu):
+
+```bash
+sudo apt update
+sudo apt install nginx
+```
+On Red Hat-based distributions (e.g., CentOS, Fedora):
+
+```bash
+sudo yum install epel-release
+sudo yum install nginx
+```
+
+### Deployment
+
+Copy `nginx.conf` to `/etc/nginx/nginx.conf` (need sudo permission).
+
+Replace the port number 7860 in `server localhost:7860` with the port where you deploy the Gradio web server.
+
+Modify `upstream websocket` to configure Gradio servers behind the gateway.
+
+Lastly, update Nginx.
+
+
+### HTTPS Deployment with a Public Domain URL
+
+Make sure you obtain the HTTPS certificate and the private key used to generate the certificate.
+
+Fill the addresses to your certificate and private key in the `[PATH_TO_SSL_CERT]` and `[PATH_TO_PRIVATE_KEY]` fields.
+
+If you have your own domain url to serve the chatbot, replace the chat.lmsys.org url with your own domain url.
+
+### Updating
+
+Every time when `/etc/nginx/nginx.conf` is modified, you need to update the Nginx service:
+
+```bash
+sudo nginx -t # check `/etc/nginx/nginx.conf`
+sudo systemctl reload nginx # restart Nginx service to load the new config
+sudo systemctl status nginx # check the status of the Nginx service. It should be active (running).
+```
diff --git a/model/fastchat/serve/gateway/nginx.conf b/model/fastchat/serve/gateway/nginx.conf
new file mode 100644
index 0000000000000000000000000000000000000000..b88ca8c50772421fca91f33ff77ef75f4d23ad4d
--- /dev/null
+++ b/model/fastchat/serve/gateway/nginx.conf
@@ -0,0 +1,97 @@
+user www-data;
+worker_processes auto;
+pid /run/nginx.pid;
+include /etc/nginx/modules-enabled/*.conf;
+
+events {
+ worker_connections 1024; # maximum number of connections that a worker process can handle concurrently
+ # multi_accept on; # enabling multi_accept can help improve performance under high load, but may increase the number of simultaneous connections that a worker process can handle
+
+}
+
+http {
+ ##
+ # Basic Settings
+ ##
+
+ sendfile on; # enable sendfile for performance optimization
+ tcp_nopush on; # enable TCP no-pushing
+ tcp_nodelay on; # enable TCP no-delay
+ keepalive_timeout 65; # sets the timeout for keep-alive connections
+ types_hash_max_size 2048; # maximum size of the types hash table
+ # server_tokens off; # disable server token (i.e., server signature) in response headers to improve security
+
+ # server_names_hash_bucket_size 64;
+ # server_name_in_redirect off;
+
+ include /etc/nginx/mime.types; # include MIME types file
+ default_type application/octet-stream; # default MIME type for unknown file types
+
+ ##
+ # SSL Settings
+ ##
+
+ ssl_protocols TLSv1.2; # specify SSL/TLS protocols to use
+ ssl_prefer_server_ciphers on; # prefer server ciphers over client ciphers
+
+ ##
+ # Logging Settings
+ ##
+
+ access_log /var/log/nginx/access.log; # path to access log file
+ error_log /var/log/nginx/error.log; # path to error log file
+
+ ##
+ # Gzip Settings
+ ##
+ gzip on; # enable Gzip compression
+
+ ##
+ # Virtual Host Configs
+ ##
+
+ include /etc/nginx/conf.d/*.conf; # include all configuration files in conf.d directory
+ include /etc/nginx/sites-enabled/*; # include all enabled sites configuration files
+
+ # WebSocket Proxy: https://www.nginx.com/blog/websocket-nginx/
+ map $http_upgrade $connection_upgrade {
+ default upgrade;
+ '' close;
+ }
+
+ upstream websocket {
+ ip_hash; # load balancing by IP to guarantee session persistence
+ server localhost:7860; # The port should be the gradio web server port
+ # server localhost:7861; # extra gradio server if more than one
+ }
+
+ limit_conn_status 429;
+ limit_conn_zone $binary_remote_addr zone=perip:10m; # limit number of connections per IP
+ limit_conn_zone $server_name zone=perserver:10m; # limit number of connections per server
+
+ server {
+ listen 443 ssl; # the listening port of our server
+ ssl_certificate [PATH_TO_SSL_CERT];
+ ssl_certificate_key [PATH_TO_PRIVATE_KEY];
+ server_name chat.lmsys.org; # replace the url with your own domain url
+ limit_conn perserver 1024; # connections per server
+ location / {
+ proxy_pass http://websocket; # proxy all requests to the defined upstream server
+ limit_conn perip 5; # connections per IP
+ proxy_set_header Host $host; # set the Host header for the upstream server
+ proxy_set_header X-Real-IP $remote_addr; # set the client IP address as the real IP for the upstream server
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; # set the client IP addresses in the X-Forwarded-For header
+ proxy_http_version 1.1; # use HTTP version 1.1 for upstream communication
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "Upgrade"; # set the Connection header to Upgrade to enable WebSocket communication
+ }
+ }
+
+ # the following block routes all HTTP traffic to HTTPS via nginx
+ server {
+ listen 80;
+ server_name chat.lmsys.org;
+ return 301 https://chat.lmsys.org$request_uri;
+ }
+
+}
diff --git a/model/fastchat/serve/gradio_css.py b/model/fastchat/serve/gradio_css.py
new file mode 100644
index 0000000000000000000000000000000000000000..71d79b4a4b5a7ad84b8822d99e1740e77bc1f7a8
--- /dev/null
+++ b/model/fastchat/serve/gradio_css.py
@@ -0,0 +1,71 @@
+code_highlight_css = """
+#chatbot .hll { background-color: #ffffcc }
+#chatbot .c { color: #408080; font-style: italic }
+#chatbot .err { border: 1px solid #FF0000 }
+#chatbot .k { color: #008000; font-weight: bold }
+#chatbot .o { color: #666666 }
+#chatbot .ch { color: #408080; font-style: italic }
+#chatbot .cm { color: #408080; font-style: italic }
+#chatbot .cp { color: #BC7A00 }
+#chatbot .cpf { color: #408080; font-style: italic }
+#chatbot .c1 { color: #408080; font-style: italic }
+#chatbot .cs { color: #408080; font-style: italic }
+#chatbot .gd { color: #A00000 }
+#chatbot .ge { font-style: italic }
+#chatbot .gr { color: #FF0000 }
+#chatbot .gh { color: #000080; font-weight: bold }
+#chatbot .gi { color: #00A000 }
+#chatbot .go { color: #888888 }
+#chatbot .gp { color: #000080; font-weight: bold }
+#chatbot .gs { font-weight: bold }
+#chatbot .gu { color: #800080; font-weight: bold }
+#chatbot .gt { color: #0044DD }
+#chatbot .kc { color: #008000; font-weight: bold }
+#chatbot .kd { color: #008000; font-weight: bold }
+#chatbot .kn { color: #008000; font-weight: bold }
+#chatbot .kp { color: #008000 }
+#chatbot .kr { color: #008000; font-weight: bold }
+#chatbot .kt { color: #B00040 }
+#chatbot .m { color: #666666 }
+#chatbot .s { color: #BA2121 }
+#chatbot .na { color: #7D9029 }
+#chatbot .nb { color: #008000 }
+#chatbot .nc { color: #0000FF; font-weight: bold }
+#chatbot .no { color: #880000 }
+#chatbot .nd { color: #AA22FF }
+#chatbot .ni { color: #999999; font-weight: bold }
+#chatbot .ne { color: #D2413A; font-weight: bold }
+#chatbot .nf { color: #0000FF }
+#chatbot .nl { color: #A0A000 }
+#chatbot .nn { color: #0000FF; font-weight: bold }
+#chatbot .nt { color: #008000; font-weight: bold }
+#chatbot .nv { color: #19177C }
+#chatbot .ow { color: #AA22FF; font-weight: bold }
+#chatbot .w { color: #bbbbbb }
+#chatbot .mb { color: #666666 }
+#chatbot .mf { color: #666666 }
+#chatbot .mh { color: #666666 }
+#chatbot .mi { color: #666666 }
+#chatbot .mo { color: #666666 }
+#chatbot .sa { color: #BA2121 }
+#chatbot .sb { color: #BA2121 }
+#chatbot .sc { color: #BA2121 }
+#chatbot .dl { color: #BA2121 }
+#chatbot .sd { color: #BA2121; font-style: italic }
+#chatbot .s2 { color: #BA2121 }
+#chatbot .se { color: #BB6622; font-weight: bold }
+#chatbot .sh { color: #BA2121 }
+#chatbot .si { color: #BB6688; font-weight: bold }
+#chatbot .sx { color: #008000 }
+#chatbot .sr { color: #BB6688 }
+#chatbot .s1 { color: #BA2121 }
+#chatbot .ss { color: #19177C }
+#chatbot .bp { color: #008000 }
+#chatbot .fm { color: #0000FF }
+#chatbot .vc { color: #19177C }
+#chatbot .vg { color: #19177C }
+#chatbot .vi { color: #19177C }
+#chatbot .vm { color: #19177C }
+#chatbot .il { color: #666666 }
+"""
+# .highlight { background: #f8f8f8; }
diff --git a/model/fastchat/serve/gradio_patch.py b/model/fastchat/serve/gradio_patch.py
new file mode 100644
index 0000000000000000000000000000000000000000..af8731da17d4c39a2a32afd4ce2cca13e3845ac4
--- /dev/null
+++ b/model/fastchat/serve/gradio_patch.py
@@ -0,0 +1,168 @@
+"""
+Adopted from https://github.com/gradio-app/gradio/blob/main/gradio/components.py
+Fix a markdown render problem.
+"""
+from __future__ import annotations
+
+from gradio.components import *
+from markdown2 import Markdown
+import nh3
+
+
+class _Keywords(Enum):
+ NO_VALUE = "NO_VALUE" # Used as a sentinel to determine if nothing is provided as a argument for `value` in `Component.update()`
+ FINISHED_ITERATING = "FINISHED_ITERATING" # Used to skip processing of a component's value (needed for generators + state)
+
+
+@document("style")
+class Chatbot(Changeable, Selectable, IOComponent, JSONSerializable):
+ """
+ Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, and images.
+ Preprocessing: this component does *not* accept input.
+ Postprocessing: expects function to return a {List[Tuple[str | None | Tuple, str | None | Tuple]]}, a list of tuples with user message and response messages. Messages should be strings, tuples, or Nones. If the message is a string, it can include Markdown. If it is a tuple, it should consist of (string filepath to image/video/audio, [optional string alt text]). Messages that are `None` are not displayed.
+
+ Demos: chatbot_simple, chatbot_multimodal
+ """
+
+ def __init__(
+ self,
+ value: List[Tuple[str | None, str | None]] | Callable | None = None,
+ color_map: Dict[str, str] | None = None, # Parameter moved to Chatbot.style()
+ *,
+ label: str | None = None,
+ every: float | None = None,
+ show_label: bool = True,
+ visible: bool = True,
+ elem_id: str | None = None,
+ elem_classes: List[str] | str | None = None,
+ **kwargs,
+ ):
+ """
+ Parameters:
+ value: Default value to show in chatbot. If callable, the function will be called whenever the app loads to set the initial value of the component.
+ label: component name in interface.
+ every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
+ show_label: if True, will display label.
+ visible: If False, component will be hidden.
+ elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
+ elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
+ """
+ if color_map is not None:
+ warnings.warn(
+ "The 'color_map' parameter has been deprecated.",
+ )
+ # self.md = utils.get_markdown_parser()
+ self.md = Markdown(extras=["fenced-code-blocks", "tables", "break-on-newline"])
+ self.select: EventListenerMethod
+ """
+ Event listener for when the user selects message from Chatbot.
+ Uses event data gradio.SelectData to carry `value` referring to text of selected message, and `index` tuple to refer to [message, participant] index.
+ See EventData documentation on how to use this event data.
+ """
+
+ IOComponent.__init__(
+ self,
+ label=label,
+ every=every,
+ show_label=show_label,
+ visible=visible,
+ elem_id=elem_id,
+ elem_classes=elem_classes,
+ value=value,
+ **kwargs,
+ )
+
+ def get_config(self):
+ return {
+ "value": self.value,
+ "selectable": self.selectable,
+ **IOComponent.get_config(self),
+ }
+
+ @staticmethod
+ def update(
+ value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
+ label: str | None = None,
+ show_label: bool | None = None,
+ visible: bool | None = None,
+ ):
+ updated_config = {
+ "label": label,
+ "show_label": show_label,
+ "visible": visible,
+ "value": value,
+ "__type__": "update",
+ }
+ return updated_config
+
+ def _process_chat_messages(
+ self, chat_message: str | Tuple | List | Dict | None
+ ) -> str | Dict | None:
+ if chat_message is None:
+ return None
+ elif isinstance(chat_message, (tuple, list)):
+ mime_type = processing_utils.get_mimetype(chat_message[0])
+ return {
+ "name": chat_message[0],
+ "mime_type": mime_type,
+ "alt_text": chat_message[1] if len(chat_message) > 1 else None,
+ "data": None, # These last two fields are filled in by the frontend
+ "is_file": True,
+ }
+ elif isinstance(
+ chat_message, dict
+ ): # This happens for previously processed messages
+ return chat_message
+ elif isinstance(chat_message, str):
+ # return self.md.render(chat_message)
+ return str(self.md.convert(chat_message))
+ else:
+ raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
+
+ def postprocess(
+ self,
+ y: List[
+ Tuple[str | Tuple | List | Dict | None, str | Tuple | List | Dict | None]
+ ],
+ ) -> List[Tuple[str | Dict | None, str | Dict | None]]:
+ """
+ Parameters:
+ y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
+ Returns:
+ List of tuples representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information.
+ """
+ if y is None:
+ return []
+ processed_messages = []
+ for message_pair in y:
+ assert isinstance(
+ message_pair, (tuple, list)
+ ), f"Expected a list of lists or list of tuples. Received: {message_pair}"
+ assert (
+ len(message_pair) == 2
+ ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
+ processed_messages.append(
+ (
+ # self._process_chat_messages(message_pair[0]),
+ ''
+ + nh3.clean(message_pair[0])
+ + " ",
+ self._process_chat_messages(message_pair[1]),
+ )
+ )
+ return processed_messages
+
+ def style(self, height: int | None = None, **kwargs):
+ """
+ This method can be used to change the appearance of the Chatbot component.
+ """
+ if height is not None:
+ self._style["height"] = height
+ if kwargs.get("color_map") is not None:
+ warnings.warn("The 'color_map' parameter has been deprecated.")
+
+ Component.style(
+ self,
+ **kwargs,
+ )
+ return self
diff --git a/model/fastchat/serve/gradio_web_server.py b/model/fastchat/serve/gradio_web_server.py
new file mode 100644
index 0000000000000000000000000000000000000000..347602b32e93baf375c7237d09f4072701fa9631
--- /dev/null
+++ b/model/fastchat/serve/gradio_web_server.py
@@ -0,0 +1,509 @@
+import argparse
+from collections import defaultdict
+import datetime
+import json
+import os
+import time
+import uuid
+
+import gradio as gr
+import requests
+
+from fastchat.conversation import (
+ get_default_conv_template,
+ compute_skip_echo_len,
+ SeparatorStyle,
+)
+from fastchat.constants import LOGDIR
+from fastchat.utils import (
+ build_logger,
+ server_error_msg,
+ violates_moderation,
+ moderation_msg,
+)
+from fastchat.serve.gradio_patch import Chatbot as grChatbot
+from fastchat.serve.gradio_css import code_highlight_css
+
+
+logger = build_logger("gradio_web_server", "gradio_web_server.log")
+
+headers = {"User-Agent": "fastchat Client"}
+
+no_change_btn = gr.Button.update()
+enable_btn = gr.Button.update(interactive=True)
+disable_btn = gr.Button.update(interactive=False)
+
+controller_url = None
+enable_moderation = False
+models = []
+
+priority = {
+ "vicuna-13b": "aaa",
+ "koala-13b": "aab",
+ "oasst-pythia-12b": "aac",
+ "dolly-v2-12b": "aad",
+ "chatglm-6b": "aae",
+ "stablelm-tuned-alpha-7b": "aaf",
+}
+
+
+def set_global_vars(controller_url_, enable_moderation_, models_):
+ global controller_url, enable_moderation, models
+ controller_url = controller_url_
+ enable_moderation = enable_moderation_
+ models = models_
+
+
+def get_conv_log_filename():
+ t = datetime.datetime.now()
+ name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
+ return name
+
+
+def get_model_list(controller_url):
+ ret = requests.post(controller_url + "/refresh_all_workers")
+ assert ret.status_code == 200
+ ret = requests.post(controller_url + "/list_models")
+ models = ret.json()["models"]
+ models.sort(key=lambda x: priority.get(x, x))
+ logger.info(f"Models: {models}")
+ return models
+
+
+get_window_url_params = """
+function() {
+ const params = new URLSearchParams(window.location.search);
+ url_params = Object.fromEntries(params);
+ console.log("url_params", url_params);
+ return url_params;
+ }
+"""
+
+
+def load_demo_single(url_params):
+ dropdown_update = gr.Dropdown.update(visible=True)
+ if "model" in url_params:
+ model = url_params["model"]
+ if model in models:
+ dropdown_update = gr.Dropdown.update(value=model, visible=True)
+
+ state = None
+ return (
+ state,
+ dropdown_update,
+ gr.Chatbot.update(visible=True),
+ gr.Textbox.update(visible=True),
+ gr.Button.update(visible=True),
+ gr.Row.update(visible=True),
+ gr.Accordion.update(visible=True),
+ )
+
+
+def load_demo(url_params, request: gr.Request):
+ logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
+ return load_demo_single(url_params)
+
+
+def vote_last_response(state, vote_type, model_selector, request: gr.Request):
+ with open(get_conv_log_filename(), "a") as fout:
+ data = {
+ "tstamp": round(time.time(), 4),
+ "type": vote_type,
+ "model": model_selector,
+ "state": state.dict(),
+ "ip": request.client.host,
+ }
+ fout.write(json.dumps(data) + "\n")
+
+
+def upvote_last_response(state, model_selector, request: gr.Request):
+ logger.info(f"upvote. ip: {request.client.host}")
+ vote_last_response(state, "upvote", model_selector, request)
+ return ("",) + (disable_btn,) * 3
+
+
+def downvote_last_response(state, model_selector, request: gr.Request):
+ logger.info(f"downvote. ip: {request.client.host}")
+ vote_last_response(state, "downvote", model_selector, request)
+ return ("",) + (disable_btn,) * 3
+
+
+def flag_last_response(state, model_selector, request: gr.Request):
+ logger.info(f"flag. ip: {request.client.host}")
+ vote_last_response(state, "flag", model_selector, request)
+ return ("",) + (disable_btn,) * 3
+
+
+def regenerate(state, request: gr.Request):
+ logger.info(f"regenerate. ip: {request.client.host}")
+ state.messages[-1][-1] = None
+ state.skip_next = False
+ return (state, state.to_gradio_chatbot(), "") + (disable_btn,) * 5
+
+
+def clear_history(request: gr.Request):
+ logger.info(f"clear_history. ip: {request.client.host}")
+ state = None
+ return (state, [], "") + (disable_btn,) * 5
+
+
+def add_text(state, text, request: gr.Request):
+ logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}")
+
+ if state is None:
+ state = get_default_conv_template("vicuna").copy()
+
+ if len(text) <= 0:
+ state.skip_next = True
+ return (state, state.to_gradio_chatbot(), "") + (no_change_btn,) * 5
+ if enable_moderation:
+ flagged = violates_moderation(text)
+ if flagged:
+ logger.info(f"violate moderation. ip: {request.client.host}. text: {text}")
+ state.skip_next = True
+ return (state, state.to_gradio_chatbot(), moderation_msg) + (
+ no_change_btn,
+ ) * 5
+
+ text = text[:1536] # Hard cut-off
+ state.append_message(state.roles[0], text)
+ state.append_message(state.roles[1], None)
+ state.skip_next = False
+ return (state, state.to_gradio_chatbot(), "") + (disable_btn,) * 5
+
+
+def post_process_code(code):
+ sep = "\n```"
+ if sep in code:
+ blocks = code.split(sep)
+ if len(blocks) % 2 == 1:
+ for i in range(1, len(blocks), 2):
+ blocks[i] = blocks[i].replace("\\_", "_")
+ code = sep.join(blocks)
+ return code
+
+
+def http_bot(state, model_selector, temperature, max_new_tokens, request: gr.Request):
+ logger.info(f"http_bot. ip: {request.client.host}")
+ start_tstamp = time.time()
+ model_name = model_selector
+ temperature = float(temperature)
+ max_new_tokens = int(max_new_tokens)
+
+ if state.skip_next:
+ # This generate call is skipped due to invalid inputs
+ yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5
+ return
+
+ if len(state.messages) == state.offset + 2:
+ # First round of conversation
+ new_state = get_default_conv_template(model_name).copy()
+ new_state.conv_id = uuid.uuid4().hex
+ new_state.append_message(new_state.roles[0], state.messages[-2][1])
+ new_state.append_message(new_state.roles[1], None)
+ state = new_state
+
+ # Query worker address
+ ret = requests.post(
+ controller_url + "/get_worker_address", json={"model": model_name}
+ )
+ worker_addr = ret.json()["address"]
+ logger.info(f"model_name: {model_name}, worker_addr: {worker_addr}")
+
+ # No available worker
+ if worker_addr == "":
+ state.messages[-1][-1] = server_error_msg
+ yield (
+ state,
+ state.to_gradio_chatbot(),
+ disable_btn,
+ disable_btn,
+ disable_btn,
+ enable_btn,
+ enable_btn,
+ )
+ return
+
+ # Construct prompt
+ if "chatglm" in model_name:
+ prompt = state.messages[state.offset :]
+ else:
+ prompt = state.get_prompt()
+ skip_echo_len = compute_skip_echo_len(model_name, state, prompt)
+
+ # Make requests
+ pload = {
+ "model": model_name,
+ "prompt": prompt,
+ "temperature": temperature,
+ "max_new_tokens": max_new_tokens,
+ "stop": state.sep if state.sep_style == SeparatorStyle.SINGLE else None,
+ }
+ logger.info(f"==== request ====\n{pload}")
+
+ state.messages[-1][-1] = "▌"
+ yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
+
+ try:
+ # Stream output
+ response = requests.post(
+ worker_addr + "/worker_generate_stream",
+ headers=headers,
+ json=pload,
+ stream=True,
+ timeout=20,
+ )
+ for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
+ if chunk:
+ data = json.loads(chunk.decode())
+ if data["error_code"] == 0:
+ output = data["text"][skip_echo_len:].strip()
+ output = post_process_code(output)
+ state.messages[-1][-1] = output + "▌"
+ yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
+ else:
+ output = data["text"] + f" (error_code: {data['error_code']})"
+ state.messages[-1][-1] = output
+ yield (state, state.to_gradio_chatbot()) + (
+ disable_btn,
+ disable_btn,
+ disable_btn,
+ enable_btn,
+ enable_btn,
+ )
+ return
+ time.sleep(0.02)
+ except requests.exceptions.RequestException as e:
+ state.messages[-1][-1] = server_error_msg + f" (error_code: 4)"
+ yield (state, state.to_gradio_chatbot()) + (
+ disable_btn,
+ disable_btn,
+ disable_btn,
+ enable_btn,
+ enable_btn,
+ )
+ return
+
+ state.messages[-1][-1] = state.messages[-1][-1][:-1]
+ yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
+
+ finish_tstamp = time.time()
+ logger.info(f"{output}")
+
+ with open(get_conv_log_filename(), "a") as fout:
+ data = {
+ "tstamp": round(finish_tstamp, 4),
+ "type": "chat",
+ "model": model_name,
+ "gen_params": {
+ "temperature": temperature,
+ "max_new_tokens": max_new_tokens,
+ },
+ "start": round(start_tstamp, 4),
+ "finish": round(start_tstamp, 4),
+ "state": state.dict(),
+ "ip": request.client.host,
+ }
+ fout.write(json.dumps(data) + "\n")
+
+
+block_css = (
+ code_highlight_css
+ + """
+pre {
+ white-space: pre-wrap; /* Since CSS 2.1 */
+ white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
+ white-space: -pre-wrap; /* Opera 4-6 */
+ white-space: -o-pre-wrap; /* Opera 7 */
+ word-wrap: break-word; /* Internet Explorer 5.5+ */
+}
+#notice_markdown th {
+ display: none;
+}
+"""
+)
+
+
+def build_single_model_ui():
+ notice_markdown = """
+# 🏔️ Chat with Open Large Language Models
+- Vicuna: An Open-Source Chatbot Impressing GPT-4 with 90% ChatGPT Quality. [[Blog post]](https://vicuna.lmsys.org) [[Evaluation]](https://vicuna.lmsys.org/eval/)
+- Koala: A Dialogue Model for Academic Research. [[Blog post]](https://bair.berkeley.edu/blog/2023/04/03/koala/)
+- [[GitHub]](https://github.com/lm-sys/FastChat) [[Twitter]](https://twitter.com/lmsysorg) [[Discord]](https://discord.gg/h6kCZb72G7)
+
+### Terms of use
+By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. **The service collects user dialogue data for future research.**
+
+### Choose a model to chat with
+| | |
+| ---- | ---- |
+| [Vicuna](https://vicuna.lmsys.org): a chat assistant fine-tuned from LLaMA on user-shared conversations by LMSYS. | [Koala](https://bair.berkeley.edu/blog/2023/04/03/koala/): a dialogue model for academic research by BAIR |
+| [OpenAssistant (oasst)](https://open-assistant.io/): a chat-based assistant for everyone by LAION. | [Dolly](https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm): an instruction-tuned open large language model by Databricks. |
+| [ChatGLM](https://chatglm.cn/blog): an open bilingual dialogue language model by Tsinghua University | [StableLM](https://github.com/stability-AI/stableLM/): Stability AI language models. |
+| [Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html): a model fine-tuned from LLaMA on instruction-following demonstrations by Stanford. | [LLaMA](https://arxiv.org/abs/2302.13971): open and efficient foundation language models by Meta. |
+"""
+
+ learn_more_markdown = """
+### License
+The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
+"""
+
+ state = gr.State()
+ notice = gr.Markdown(notice_markdown, elem_id="notice_markdown")
+
+ with gr.Row(elem_id="model_selector_row"):
+ model_selector = gr.Dropdown(
+ choices=models,
+ value=models[0] if len(models) > 0 else "",
+ interactive=True,
+ show_label=False,
+ ).style(container=False)
+
+ chatbot = grChatbot(elem_id="chatbot", visible=False).style(height=550)
+ with gr.Row():
+ with gr.Column(scale=20):
+ textbox = gr.Textbox(
+ show_label=False,
+ placeholder="Enter text and press ENTER",
+ visible=False,
+ ).style(container=False)
+ with gr.Column(scale=1, min_width=50):
+ send_btn = gr.Button(value="Send", visible=False)
+
+ with gr.Row(visible=False) as button_row:
+ upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
+ downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
+ flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
+ # stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
+ regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
+ clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
+
+ with gr.Accordion("Parameters", open=False, visible=False) as parameter_row:
+ temperature = gr.Slider(
+ minimum=0.0,
+ maximum=1.0,
+ value=0.7,
+ step=0.1,
+ interactive=True,
+ label="Temperature",
+ )
+ max_output_tokens = gr.Slider(
+ minimum=0,
+ maximum=1024,
+ value=512,
+ step=64,
+ interactive=True,
+ label="Max output tokens",
+ )
+
+ gr.Markdown(learn_more_markdown)
+
+ # Register listeners
+ btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
+ upvote_btn.click(
+ upvote_last_response,
+ [state, model_selector],
+ [textbox, upvote_btn, downvote_btn, flag_btn],
+ )
+ downvote_btn.click(
+ downvote_last_response,
+ [state, model_selector],
+ [textbox, upvote_btn, downvote_btn, flag_btn],
+ )
+ flag_btn.click(
+ flag_last_response,
+ [state, model_selector],
+ [textbox, upvote_btn, downvote_btn, flag_btn],
+ )
+ regenerate_btn.click(regenerate, state, [state, chatbot, textbox] + btn_list).then(
+ http_bot,
+ [state, model_selector, temperature, max_output_tokens],
+ [state, chatbot] + btn_list,
+ )
+ clear_btn.click(clear_history, None, [state, chatbot, textbox] + btn_list)
+
+ model_selector.change(clear_history, None, [state, chatbot, textbox] + btn_list)
+
+ textbox.submit(
+ add_text, [state, textbox], [state, chatbot, textbox] + btn_list
+ ).then(
+ http_bot,
+ [state, model_selector, temperature, max_output_tokens],
+ [state, chatbot] + btn_list,
+ )
+ send_btn.click(
+ add_text, [state, textbox], [state, chatbot, textbox] + btn_list
+ ).then(
+ http_bot,
+ [state, model_selector, temperature, max_output_tokens],
+ [state, chatbot] + btn_list,
+ )
+
+ return state, model_selector, chatbot, textbox, send_btn, button_row, parameter_row
+
+
+def build_demo():
+ with gr.Blocks(
+ title="Chat with Open Large Language Models",
+ theme=gr.themes.Base(),
+ css=block_css,
+ ) as demo:
+ url_params = gr.JSON(visible=False)
+
+ (
+ state,
+ model_selector,
+ chatbot,
+ textbox,
+ send_btn,
+ button_row,
+ parameter_row,
+ ) = build_single_model_ui()
+
+ if args.model_list_mode == "once":
+ demo.load(
+ load_demo,
+ [url_params],
+ [
+ state,
+ model_selector,
+ chatbot,
+ textbox,
+ send_btn,
+ button_row,
+ parameter_row,
+ ],
+ _js=get_window_url_params,
+ )
+ else:
+ raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
+
+ return demo
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--host", type=str, default="0.0.0.0")
+ parser.add_argument("--port", type=int)
+ parser.add_argument("--controller-url", type=str, default="http://localhost:21001")
+ parser.add_argument("--concurrency-count", type=int, default=10)
+ parser.add_argument(
+ "--model-list-mode", type=str, default="once", choices=["once", "reload"]
+ )
+ parser.add_argument("--share", action="store_true")
+ parser.add_argument(
+ "--moderate", action="store_true", help="Enable content moderation"
+ )
+ args = parser.parse_args()
+ logger.info(f"args: {args}")
+
+ models = get_model_list(args.controller_url)
+ set_global_vars(args.controller_url, args.moderate, models)
+
+ logger.info(args)
+ demo = build_demo()
+ demo.queue(
+ concurrency_count=args.concurrency_count, status_update_rate=10, api_open=False
+ ).launch(
+ server_name=args.host, server_port=args.port, share=args.share, max_threads=200
+ )
diff --git a/model/fastchat/serve/gradio_web_server_multi.py b/model/fastchat/serve/gradio_web_server_multi.py
new file mode 100644
index 0000000000000000000000000000000000000000..705f6200c96e0e2cbc29729b472dc8157b7509e7
--- /dev/null
+++ b/model/fastchat/serve/gradio_web_server_multi.py
@@ -0,0 +1,503 @@
+import argparse
+from collections import defaultdict
+import datetime
+import json
+import os
+import time
+import uuid
+
+import gradio as gr
+import numpy as np
+import requests
+
+from fastchat.conversation import get_default_conv_template, SeparatorStyle
+from fastchat.constants import LOGDIR
+from fastchat.utils import (
+ build_logger,
+ server_error_msg,
+ violates_moderation,
+ moderation_msg,
+)
+from fastchat.serve.gradio_patch import Chatbot as grChatbot
+from fastchat.serve.gradio_web_server import (
+ http_bot,
+ set_global_vars,
+ get_window_url_params,
+ get_conv_log_filename,
+ block_css,
+ build_single_model_ui,
+ no_change_btn,
+ enable_btn,
+ disable_btn,
+ get_model_list,
+ load_demo_single,
+)
+from fastchat.serve.inference import compute_skip_echo_len
+
+
+logger = build_logger("gradio_web_server_multi", "gradio_web_server_multi.log")
+
+num_models = 2
+
+
+def load_demo_side_by_side(url_params):
+ states = (None,) * num_models
+
+ model_left = models[0]
+ if len(models) > 1:
+ weights = ([8, 4, 2, 1] + [1] * 32)[:len(models) - 1]
+ weights = weights / np.sum(weights)
+ model_right = np.random.choice(models[1:], p=weights)
+ else:
+ model_right = model_left
+
+ dropdown_updates = (
+ gr.Dropdown.update(model_left, visible=True),
+ gr.Dropdown.update(model_right, visible=True),
+ )
+
+ return (
+ states
+ + dropdown_updates
+ + (gr.Chatbot.update(visible=True),) * num_models
+ + (
+ gr.Textbox.update(visible=True),
+ gr.Box.update(visible=True),
+ gr.Row.update(visible=True),
+ gr.Row.update(visible=True),
+ gr.Accordion.update(visible=True),
+ )
+ )
+
+
+def load_demo(url_params, request: gr.Request):
+ logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
+ selected = 0
+ if "arena" in url_params or "compare" in url_params:
+ selected = 1
+ single_updates = load_demo_single(url_params)
+ side_by_side_updates = load_demo_side_by_side(url_params)
+ return (gr.Tabs.update(selected=selected),) + single_updates + side_by_side_updates
+
+
+def vote_last_response(states, vote_type, model_selectors, request: gr.Request):
+ with open(get_conv_log_filename(), "a") as fout:
+ data = {
+ "tstamp": round(time.time(), 4),
+ "type": vote_type,
+ "models": [x for x in model_selectors],
+ "states": [x.dict() for x in states],
+ "ip": request.client.host,
+ }
+ fout.write(json.dumps(data) + "\n")
+
+
+def leftvote_last_response(
+ state0, state1, model_selector0, model_selector1, request: gr.Request
+):
+ logger.info(f"leftvote. ip: {request.client.host}")
+ vote_last_response(
+ [state0, state1], "leftvote", [model_selector0, model_selector1], request
+ )
+ return ("",) + (disable_btn,) * 3
+
+
+def rightvote_last_response(
+ state0, state1, model_selector0, model_selector1, request: gr.Request
+):
+ logger.info(f"rightvote. ip: {request.client.host}")
+ vote_last_response(
+ [state0, state1], "rightvote", [model_selector0, model_selector1], request
+ )
+ return ("",) + (disable_btn,) * 3
+
+
+def tievote_last_response(
+ state0, state1, model_selector0, model_selector1, request: gr.Request
+):
+ logger.info(f"tievote. ip: {request.client.host}")
+ vote_last_response(
+ [state0, state1], "tievote", [model_selector0, model_selector1], request
+ )
+ return ("",) + (disable_btn,) * 3
+
+
+def regenerate(state0, state1, request: gr.Request):
+ logger.info(f"regenerate. ip: {request.client.host}")
+ states = [state0, state1]
+ for i in range(num_models):
+ states[i].messages[-1][-1] = None
+ states[i].skip_next = False
+ return states + [x.to_gradio_chatbot() for x in states] + [""] + [disable_btn] * 5
+
+
+def clear_history(request: gr.Request):
+ logger.info(f"clear_history. ip: {request.client.host}")
+ return [None] * num_models + [None] * num_models + [""] + [disable_btn] * 5
+
+
+def share_click(state0, state1, model_selector0, model_selector1,
+ request: gr.Request):
+ logger.info(f"share. ip: {request.client.host}")
+ if state0 is not None and state1 is not None:
+ vote_last_response(
+ [state0, state1], "share", [model_selector0, model_selector1], request
+ )
+
+
+def add_text(state0, state1, text, request: gr.Request):
+ logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}")
+ states = [state0, state1]
+
+ for i in range(num_models):
+ if states[i] is None:
+ states[i] = get_default_conv_template("vicuna").copy()
+
+ if len(text) <= 0:
+ for i in range(num_models):
+ states[i].skip_next = True
+ return (
+ states
+ + [x.to_gradio_chatbot() for x in states]
+ + [""]
+ + [
+ no_change_btn,
+ ]
+ * 5
+ )
+
+ if args.moderate:
+ flagged = violates_moderation(text)
+ if flagged:
+ logger.info(f"violate moderation. ip: {request.client.host}. text: {text}")
+ for i in range(num_models):
+ states[i].skip_next = True
+ return (
+ states
+ + [x.to_gradio_chatbot() for x in states]
+ + [moderation_msg]
+ + [
+ no_change_btn,
+ ]
+ * 5
+ )
+
+ text = text[:1536] # Hard cut-off
+ for i in range(num_models):
+ states[i].append_message(states[i].roles[0], text)
+ states[i].append_message(states[i].roles[1], None)
+ states[i].skip_next = False
+
+ return (
+ states
+ + [x.to_gradio_chatbot() for x in states]
+ + [""]
+ + [
+ disable_btn,
+ ]
+ * 5
+ )
+
+
+def http_bot_all(
+ state0,
+ state1,
+ model_selector0,
+ model_selector1,
+ temperature,
+ max_new_tokens,
+ request: gr.Request,
+):
+ logger.info(f"http_bot_all. ip: {request.client.host}")
+ states = [state0, state1]
+ model_selector = [model_selector0, model_selector1]
+ gen = []
+ for i in range(num_models):
+ gen.append(
+ http_bot(states[i], model_selector[i], temperature, max_new_tokens, request)
+ )
+
+ chatbots = [None] * num_models
+ while True:
+ stop = True
+ for i in range(num_models):
+ try:
+ ret = next(gen[i])
+ states[i], chatbots[i] = ret[0], ret[1]
+ buttons = ret[2:]
+ stop = False
+ except StopIteration:
+ pass
+ yield states + chatbots + list(buttons)
+ if stop:
+ break
+
+ for i in range(10):
+ if i % 2 == 0:
+ yield states + chatbots + [disable_btn] * 3 + list(buttons)[3:]
+ else:
+ yield states + chatbots + list(buttons)
+ time.sleep(0.2)
+
+
+def build_side_by_side_ui():
+ notice_markdown = """
+# ⚔️ Chatbot Arena ⚔️
+- Chat with state-of-the-art open models **side-by-side** and vote for which one is better!
+- [[GitHub]](https://github.com/lm-sys/FastChat) [[Twitter]](https://twitter.com/lmsysorg) [[Discord]](https://discord.gg/h6kCZb72G7)
+
+### Terms of use
+By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. **The service collects user dialogue data for future research.**
+The demo works better on desktop devices with a wide screen.
+
+### Choose two models to chat with
+| | |
+| ---- | ---- |
+| [Vicuna](https://vicuna.lmsys.org): a chat assistant fine-tuned from LLaMA on user-shared conversations by LMSYS. | [Koala](https://bair.berkeley.edu/blog/2023/04/03/koala/): a dialogue model for academic research by BAIR |
+| [OpenAssistant (oasst)](https://open-assistant.io/): a chat-based assistant for everyone by LAION. | [Dolly](https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm): an instruction-tuned open large language model by Databricks. |
+| [ChatGLM](https://chatglm.cn/blog): an open bilingual dialogue language model by Tsinghua University | [StableLM](https://github.com/stability-AI/stableLM/): Stability AI language models. |
+| [Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html): a model fine-tuned from LLaMA on instruction-following demonstrations by Stanford. | [LLaMA](https://arxiv.org/abs/2302.13971): open and efficient foundation language models by Meta. |
+"""
+
+ learn_more_markdown = """
+### License
+The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
+"""
+
+ states = [gr.State() for _ in range(num_models)]
+ model_selectors = [None] * num_models
+ chatbots = [None] * num_models
+
+ notice = gr.Markdown(notice_markdown, elem_id="notice_markdown")
+
+ with gr.Box(elem_id="share-region"):
+ with gr.Row():
+ for i in range(num_models):
+ with gr.Column():
+ model_selectors[i] = gr.Dropdown(
+ choices=models,
+ value=models[i] if len(models) > i else "",
+ interactive=True,
+ show_label=False,
+ ).style(container=False)
+
+ with gr.Row():
+ for i in range(num_models):
+ label = "Left" if i == 0 else "Right"
+ with gr.Column():
+ chatbots[i] = grChatbot(label=label, elem_id=f"chatbot{i}",
+ visible=False).style(height=550)
+
+ with gr.Box() as button_row:
+ with gr.Row():
+ leftvote_btn = gr.Button(value="👈 Left is better", interactive=False)
+ tie_btn = gr.Button(value="🤝 Tie", interactive=False)
+ rightvote_btn = gr.Button(value="👉 Right is better", interactive=False)
+
+ with gr.Row():
+ with gr.Column(scale=20):
+ textbox = gr.Textbox(
+ show_label=False,
+ placeholder="Enter text and press ENTER",
+ visible=False,
+ ).style(container=False)
+ with gr.Column(scale=1, min_width=50):
+ send_btn = gr.Button(value="Send", visible=False)
+
+ with gr.Row() as button_row2:
+ regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
+ clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
+ share_btn = gr.Button(value="📷 Share")
+
+ with gr.Accordion("Parameters", open=False, visible=True) as parameter_row:
+ temperature = gr.Slider(
+ minimum=0.0,
+ maximum=1.0,
+ value=0.7,
+ step=0.1,
+ interactive=True,
+ label="Temperature",
+ )
+ max_output_tokens = gr.Slider(
+ minimum=0,
+ maximum=1024,
+ value=512,
+ step=64,
+ interactive=True,
+ label="Max output tokens",
+ )
+
+ gr.Markdown(learn_more_markdown)
+
+ # Register listeners
+ btn_list = [leftvote_btn, rightvote_btn, tie_btn, regenerate_btn, clear_btn]
+ leftvote_btn.click(
+ leftvote_last_response,
+ states + model_selectors,
+ [textbox, leftvote_btn, rightvote_btn, tie_btn],
+ )
+ rightvote_btn.click(
+ rightvote_last_response,
+ states + model_selectors,
+ [textbox, leftvote_btn, rightvote_btn, tie_btn],
+ )
+ tie_btn.click(
+ tievote_last_response,
+ states + model_selectors,
+ [textbox, leftvote_btn, rightvote_btn, tie_btn],
+ )
+ regenerate_btn.click(
+ regenerate, states, states + chatbots + [textbox] + btn_list
+ ).then(
+ http_bot_all,
+ states + model_selectors + [temperature, max_output_tokens],
+ states + chatbots + btn_list,
+ )
+ clear_btn.click(clear_history, None, states + chatbots + [textbox] + btn_list)
+
+ share_js="""
+function (a, b, c, d) {
+ const captureElement = document.querySelector('#share-region');
+ html2canvas(captureElement)
+ .then(canvas => {
+ canvas.style.display = 'none'
+ document.body.appendChild(canvas)
+ return canvas
+ })
+ .then(canvas => {
+ const image = canvas.toDataURL('image/png')
+ const a = document.createElement('a')
+ a.setAttribute('download', 'chatbot-arena.png')
+ a.setAttribute('href', image)
+ a.click()
+ canvas.remove()
+ });
+ return [a, b, c, d];
+}
+"""
+ share_btn.click(share_click, states + model_selectors, [], _js=share_js)
+
+ for i in range(num_models):
+ model_selectors[i].change(
+ clear_history, None, states + chatbots + [textbox] + btn_list
+ )
+
+ textbox.submit(
+ add_text, states + [textbox], states + chatbots + [textbox] + btn_list
+ ).then(
+ http_bot_all,
+ states + model_selectors + [temperature, max_output_tokens],
+ states + chatbots + btn_list,
+ )
+ send_btn.click(
+ add_text, states + [textbox], states + chatbots + [textbox] + btn_list
+ ).then(
+ http_bot_all,
+ states + model_selectors + [temperature, max_output_tokens],
+ states + chatbots + btn_list,
+ )
+
+ return (
+ states,
+ model_selectors,
+ chatbots,
+ textbox,
+ send_btn,
+ button_row,
+ button_row2,
+ parameter_row,
+ )
+
+
+def build_demo():
+ with gr.Blocks(
+ title="Chat with Open Large Language Models",
+ theme=gr.themes.Base(),
+ css=block_css,
+ ) as demo:
+ with gr.Tabs() as tabs:
+ with gr.Tab("Single Model", id=0):
+ (
+ a_state,
+ a_model_selector,
+ a_chatbot,
+ a_textbox,
+ a_send_btn,
+ a_button_row,
+ a_parameter_row,
+ ) = build_single_model_ui()
+ a_list = [
+ a_state,
+ a_model_selector,
+ a_chatbot,
+ a_textbox,
+ a_send_btn,
+ a_button_row,
+ a_parameter_row,
+ ]
+
+ with gr.Tab("Chatbot Arena", id=1):
+ (
+ b_states,
+ b_model_selectors,
+ b_chatbots,
+ b_textbox,
+ b_send_btn,
+ b_button_row,
+ b_button_row2,
+ b_parameter_row,
+ ) = build_side_by_side_ui()
+ b_list = (
+ b_states
+ + b_model_selectors
+ + b_chatbots
+ + [
+ b_textbox,
+ b_send_btn,
+ b_button_row,
+ b_button_row2,
+ b_parameter_row,
+ ]
+ )
+
+ url_params = gr.JSON(visible=False)
+
+ if args.model_list_mode == "once":
+ demo.load(
+ load_demo,
+ [url_params],
+ [tabs] + a_list + b_list,
+ _js=get_window_url_params,
+ )
+ else:
+ raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
+
+ return demo
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--host", type=str, default="0.0.0.0")
+ parser.add_argument("--port", type=int)
+ parser.add_argument("--controller-url", type=str, default="http://localhost:21001")
+ parser.add_argument("--concurrency-count", type=int, default=10)
+ parser.add_argument(
+ "--model-list-mode", type=str, default="once", choices=["once", "reload"]
+ )
+ parser.add_argument("--share", action="store_true")
+ parser.add_argument(
+ "--moderate", action="store_true", help="Enable content moderation"
+ )
+ args = parser.parse_args()
+ logger.info(f"args: {args}")
+
+ models = get_model_list(args.controller_url)
+ set_global_vars(args.controller_url, args.moderate, models)
+
+ logger.info(args)
+ demo = build_demo()
+ demo.queue(
+ concurrency_count=args.concurrency_count, status_update_rate=10, api_open=False
+ ).launch(
+ server_name=args.host, server_port=args.port, share=args.share, max_threads=200
+ )
diff --git a/model/fastchat/serve/huggingface_api.py b/model/fastchat/serve/huggingface_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..9dd4ea466d72345670163c94874ae0a377e874b6
--- /dev/null
+++ b/model/fastchat/serve/huggingface_api.py
@@ -0,0 +1,77 @@
+"""
+Usage:
+python3 -m fastchat.serve.huggingface_api --model ~/model_weights/vicuna-7b/
+"""
+import argparse
+import json
+
+import torch
+from transformers import AutoTokenizer, AutoModelForCausalLM
+
+from fastchat.conversation import get_default_conv_template, compute_skip_echo_len
+from fastchat.serve.inference import load_model
+
+
+@torch.inference_mode()
+def main(args):
+ model, tokenizer = load_model(
+ args.model_path,
+ args.device,
+ args.num_gpus,
+ args.max_gpu_memory,
+ args.load_8bit,
+ debug=args.debug,
+ )
+
+ msg = args.message
+
+ conv = get_default_conv_template(args.model_path).copy()
+ conv.append_message(conv.roles[0], msg)
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+
+ inputs = tokenizer([prompt])
+ output_ids = model.generate(
+ torch.as_tensor(inputs.input_ids).cuda(),
+ do_sample=True,
+ temperature=0.7,
+ max_new_tokens=1024,
+ )
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
+ skip_echo_len = compute_skip_echo_len(args.model_path, conv, prompt)
+ outputs = outputs[skip_echo_len:]
+
+ print(f"{conv.roles[0]}: {msg}")
+ print(f"{conv.roles[1]}: {outputs}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--model-path",
+ type=str,
+ default="facebook/opt-350m",
+ help="The path to the weights",
+ )
+ parser.add_argument(
+ "--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda"
+ )
+ parser.add_argument("--num-gpus", type=str, default="1")
+ parser.add_argument(
+ "--max-gpu-memory",
+ type=str,
+ help="The maximum memory per gpu. Use a string like '13Gib'",
+ )
+ parser.add_argument(
+ "--load-8bit", action="store_true", help="Use 8-bit quantization."
+ )
+ parser.add_argument(
+ "--conv-template", type=str, default=None, help="Conversation prompt template."
+ )
+ parser.add_argument("--temperature", type=float, default=0.7)
+ parser.add_argument("--max-new-tokens", type=int, default=512)
+ parser.add_argument("--debug", action="store_true")
+ parser.add_argument("--message", type=str, default="Hello! Who are you?")
+ args = parser.parse_args()
+
+ main(args)
diff --git a/model/fastchat/serve/inference.py b/model/fastchat/serve/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..a07e9e8981490eb6fa1107190de60723dcce8ae1
--- /dev/null
+++ b/model/fastchat/serve/inference.py
@@ -0,0 +1,556 @@
+"""Inference for FastChat models."""
+import abc
+from typing import Optional
+import warnings
+import os,json,csv
+import torch
+
+try:
+ from transformers import (
+ AutoTokenizer,
+ AutoModelForCausalLM,
+ LlamaTokenizer,
+ LlamaForCausalLM,
+ AutoModel,
+ AutoModelForSeq2SeqLM,
+ )
+except ImportError:
+ from transformers import (
+ AutoTokenizer,
+ AutoModelForCausalLM,
+ LLaMATokenizer,
+ LLamaForCausalLM,
+ AutoModel,
+ AutoModelForSeq2SeqLM,
+ )
+
+from model.fastchat.conversation import (
+ conv_templates,
+ get_default_conv_template,
+ compute_skip_echo_len,
+ SeparatorStyle,
+)
+from model.fastchat.serve.compression import compress_module
+from model.fastchat.serve.monkey_patch_non_inplace import (
+ replace_llama_attn_with_non_inplace_operations,
+)
+from model.fastchat.serve.serve_chatglm import chatglm_generate_stream
+
+
+def raise_warning_for_old_weights(model_path, model):
+ if "vicuna" in model_path.lower():
+ try:
+ is_vicuna = isinstance(model, LlamaForCausalLM)
+ except Exception:
+ is_vicuna = isinstance(model, LLamaForCausalLM)
+ if is_vicuna and model.model.vocab_size > 32000:
+ warnings.warn(
+ "\nYou are probably using the old Vicuna-v0 model, "
+ "which will generate unexpected results with the "
+ "current fschat.\nYou can try one of the following methods:\n"
+ "1. Upgrade your weights to the new Vicuna-v1.1: https://github.com/lm-sys/FastChat#vicuna-weights.\n"
+ "2. Use the old conversation template by `python3 -m fastchat.serve.cli --model-path /path/to/vicuna-v0 --conv-template conv_one_shot`\n"
+ "3. Downgrade fschat to fschat==0.1.10 (Not recommonded).\n"
+ )
+
+
+def get_gpu_memory(max_gpus=None):
+ gpu_memory = []
+ num_gpus = (
+ torch.cuda.device_count()
+ if max_gpus is None
+ else min(max_gpus, torch.cuda.device_count())
+ )
+
+ for gpu_id in range(num_gpus):
+ with torch.cuda.device(gpu_id):
+ device = torch.cuda.current_device()
+ gpu_properties = torch.cuda.get_device_properties(device)
+ total_memory = gpu_properties.total_memory / (1024**3)
+ allocated_memory = torch.cuda.memory_allocated() / (1024**3)
+ available_memory = total_memory - allocated_memory
+ gpu_memory.append(available_memory)
+ return gpu_memory
+
+
+def load_model(
+ model_path, device, num_gpus, max_gpu_memory=None, load_8bit=False, debug=False
+):
+ if device == "cpu":
+ kwargs = {}
+ elif device == "cuda":
+ kwargs = {"torch_dtype": torch.float16}
+ if num_gpus == "auto":
+ kwargs["device_map"] = "auto"
+ else:
+ num_gpus = int(num_gpus)
+ if num_gpus != 1:
+ kwargs["device_map"] = "auto"
+ if max_gpu_memory is None:
+ kwargs[
+ "device_map"
+ ] = "sequential" # This is important for not the same VRAM sizes
+ available_gpu_memory = get_gpu_memory(num_gpus)
+ kwargs["max_memory"] = {
+ i: str(int(available_gpu_memory[i] * 0.85)) + "GiB"
+ for i in range(num_gpus)
+ }
+ else:
+ kwargs["max_memory"] = {i: max_gpu_memory for i in range(num_gpus)}
+ print("init_kwargs", kwargs)
+ elif device == "mps":
+ kwargs = {"torch_dtype": torch.float16}
+ # Avoid bugs in mps backend by not using in-place operations.
+ replace_llama_attn_with_non_inplace_operations()
+ else:
+ raise ValueError(f"Invalid device: {device}")
+
+ if "chatglm" in model_path:
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
+ model = AutoModel.from_pretrained(
+ model_path, trust_remote_code=True, **kwargs
+ ).cuda()
+ elif "google/flan-t5" in model_path:
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
+ model = AutoModelForSeq2SeqLM.from_pretrained(
+ model_path, low_cpu_mem_usage=True, **kwargs
+ )
+ elif "dolly" in model_path:
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
+ model = AutoModelForCausalLM.from_pretrained(
+ model_path, low_cpu_mem_usage=True, **kwargs
+ )
+ # 50277 means "### End"
+ tokenizer.eos_token_id = 50277
+ elif "pythia" in model_path or "stablelm" in model_path:
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
+ model = AutoModelForCausalLM.from_pretrained(
+ model_path, low_cpu_mem_usage=True, **kwargs
+ )
+ else:
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
+ model = AutoModelForCausalLM.from_pretrained(
+ model_path, low_cpu_mem_usage=True, **kwargs
+ )
+ raise_warning_for_old_weights(model_path, model)
+
+ if load_8bit:
+ compress_module(model, device)
+
+ if (device == "cuda" and num_gpus == 1) or device == "mps":
+ model.to(device)
+
+ if debug:
+ print(model)
+
+ return model, tokenizer
+
+
+@torch.inference_mode()
+def generate_stream(
+ model, tokenizer, params, device, context_len=2048, stream_interval=2
+):
+ prompt = params["prompt"]
+ l_prompt = len(prompt)
+ temperature = float(params.get("temperature", 1.0))
+ max_new_tokens = int(params.get("max_new_tokens", 32))
+ stop_str = params.get("stop", None)
+ stop_token_ids = params.get("stop_ids", [tokenizer.eos_token_id])
+
+ input_ids = tokenizer(prompt).input_ids
+ output_ids = list(input_ids)
+ print("token len:", len(input_ids)) ## TODO
+ max_src_len = context_len - max_new_tokens - 8
+ input_ids = input_ids[-max_src_len:]
+
+ for i in range(max_new_tokens):
+ if i == 0:
+ if model.config.is_encoder_decoder:
+ encoder_outputs = model.encoder(
+ input_ids=torch.as_tensor([input_ids], device=device)
+ )
+ out = model(
+ torch.as_tensor([input_ids], device=device),
+ decoder_input_ids=torch.as_tensor(
+ [[model.generation_config.decoder_start_token_id]],
+ device=device,
+ ),
+ encoder_outputs=encoder_outputs,
+ use_cache=True,
+ )
+ logits = out.logits
+ past_key_values = out.past_key_values
+ else:
+ out = model(torch.as_tensor([input_ids], device=device), use_cache=True)
+ logits = out.logits
+ past_key_values = out.past_key_values
+ else:
+ if model.config.is_encoder_decoder:
+ out = model(
+ input_ids=torch.as_tensor([input_ids], device=device),
+ use_cache=True,
+ encoder_outputs=encoder_outputs,
+ decoder_input_ids=torch.as_tensor([[token]], device=device),
+ past_key_values=past_key_values,
+ )
+ logits = out.logits
+ past_key_values = out.past_key_values
+ else:
+ out = model(
+ input_ids=torch.as_tensor([[token]], device=device),
+ use_cache=True,
+ past_key_values=past_key_values,
+ )
+ logits = out.logits
+ past_key_values = out.past_key_values
+
+ last_token_logits = logits[0][-1]
+
+ if device == "mps":
+ # Switch to CPU by avoiding some bugs in mps backend.
+ last_token_logits = last_token_logits.float().to("cpu")
+
+ if temperature < 1e-4:
+ token = int(torch.argmax(last_token_logits))
+ else:
+ probs = torch.softmax(last_token_logits / temperature, dim=-1)
+ token = int(torch.multinomial(probs, num_samples=1))
+
+ output_ids.append(token)
+
+ if token in stop_token_ids:
+ stopped = True
+ else:
+ stopped = False
+
+ if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped:
+ output = tokenizer.decode(output_ids, skip_special_tokens=True)
+ if stop_str:
+ pos = output.rfind(stop_str, l_prompt)
+ if pos != -1:
+ output = output[:pos]
+ stopped = True
+ yield output
+
+ if stopped:
+ break
+
+ del past_key_values
+
+
+class ChatIO(abc.ABC):
+ @abc.abstractmethod
+ def prompt_for_input(self, role: str) -> str:
+ """Prompt for input from a role."""
+
+ @abc.abstractmethod
+ def prompt_for_output(self, role: str):
+ """Prompt for output from a role."""
+
+ @abc.abstractmethod
+ def stream_output(self, output_stream, skip_echo_len: int):
+ """Stream output."""
+
+
+def chat_loop(
+ model_path: str,
+ device: str,
+ num_gpus: str,
+ max_gpu_memory: str,
+ load_8bit: bool,
+ conv_template,
+ temperature: float,
+ max_new_tokens: int,
+ chatio: ChatIO,
+ debug: bool,
+):
+ # Model
+ model, tokenizer = load_model(
+ model_path, device, num_gpus, max_gpu_memory, load_8bit, debug
+ )
+ is_chatglm = "chatglm" in str(type(model)).lower()
+
+ # Chat
+ if conv_template:
+ conv = conv_template.copy()
+ else:
+ conv = get_default_conv_template(model_path).copy()
+
+ while True:
+ try:
+ inp = chatio.prompt_for_input(conv.roles[0])
+ except EOFError:
+ inp = ""
+ if not inp:
+ print("exit...")
+ break
+
+ conv.append_message(conv.roles[0], inp)
+ conv.append_message(conv.roles[1], None)
+
+ if is_chatglm:
+ prompt = conv.messages[conv.offset :]
+ generate_stream_func = chatglm_generate_stream
+ else:
+ generate_stream_func = generate_stream
+ prompt = conv.get_prompt()
+
+
+ skip_echo_len = compute_skip_echo_len(model_path, conv, prompt)
+ stop_str = (
+ conv.sep
+ if conv.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.BAIZE]
+ else None
+ )
+
+ params = {
+ "model": model_path,
+ "prompt": prompt,
+ "temperature": temperature,
+ "max_new_tokens": max_new_tokens,
+ "stop": stop_str,
+ }
+
+ chatio.prompt_for_output(conv.roles[1])
+ output_stream = generate_stream_func(model, tokenizer, params, device)
+ outputs = chatio.stream_output(output_stream, skip_echo_len)
+ # NOTE: strip is important to align with the training data.
+ conv.messages[-1][-1] = outputs.strip()
+ if debug:
+ print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
+
+def question_loop(
+ model_path: str,
+ device: str,
+ num_gpus: str,
+ max_gpu_memory: str,
+ load_8bit: bool,
+ conv_template: Optional[str],
+ temperature: float,
+ max_new_tokens: int,
+ chatio: ChatIO,
+ debug: bool,
+ prompt_caption: dict = None,
+ prompt_caption_path: str = None,
+ output_path: str = None,
+):
+ # Model
+ model, tokenizer = load_model(
+ model_path, device, num_gpus, max_gpu_memory, load_8bit, debug
+ )
+ is_chatglm = "chatglm" in str(type(model)).lower()
+
+ # Chat
+ if conv_template:
+ conv = conv_templates[conv_template].copy()
+ else:
+ conv = get_default_conv_template(model_path).copy()
+
+ # Question
+ if prompt_caption:
+ questions = prompt_caption
+ elif not prompt_caption and prompt_caption_path:
+ with open(prompt_caption_path, 'r') as f:
+ questions = json.load(f)
+ else:
+ raise ValueError("prompt_caption or prompt_caption_path must be provided")
+
+
+
+ captions = {}
+ for id,question in questions.items():
+
+ conv.append_message(conv.roles[0], question)
+ conv.append_message(conv.roles[1], None)
+
+ if is_chatglm:
+ prompt = conv.messages[conv.offset :]
+ generate_stream_func = chatglm_generate_stream
+ else:
+ generate_stream_func = generate_stream
+ prompt = conv.get_prompt()
+
+ skip_echo_len = compute_skip_echo_len(model_path, conv, prompt)
+ stop_str = (
+ conv.sep
+ if conv.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.BAIZE]
+ else None
+ )
+
+ params = {
+ "model": model_path,
+ "prompt": prompt,
+ "temperature": temperature,
+ "max_new_tokens": max_new_tokens,
+ "stop": stop_str,
+ }
+
+ chatio.prompt_for_output(conv.roles[1])
+ output_stream = generate_stream_func(model, tokenizer, params, device)
+ outputs = chatio.stream_output(output_stream, skip_echo_len)
+ captions[id] = outputs
+ # clear conv for next question
+ del conv
+ conv = get_default_conv_template(model_path).copy()
+ if debug:
+ print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
+ with open(output_path, 'w') as f:
+ json.dump(captions, f)
+ print(captions)
+ return captions
+
+def get_test(file_path):
+ data_info = dict()
+ # if data_info exists, load it
+ if os.path.exists('data_info.json'):
+ print("data info exists, loading...")
+ with open('data_info.json', 'r') as fp:
+ data_info = json.load(fp)
+ return data_info
+ with open(file_path, 'r') as csvfile:
+ reader = csv.reader(csvfile, delimiter=',')
+ # skip the first row
+ next(reader)
+ for row in reader:
+ # num,key,question,answer,vid_id,gif_name,description
+ if row[3] == '' or row[3] not in ['yes', 'no']:
+ continue
+ video = row[4]
+ try:
+ data_info[video]['questions'][row[1]] = row[2]
+ data_info[video]['answers'][row[1]] = row[3]
+ except:
+ data_info[video] = dict()
+ data_info[video]['questions'] = dict()
+ data_info[video]['answers'] = dict()
+ data_info[video]['infer'] = dict() ### empty dict for inference results
+ data_info[video]['questions'][row[1]] = row[2]
+ data_info[video]['answers'][row[1]] = row[3]
+ with open('data_info.json', 'w') as fp:
+ json.dump(data_info, fp)
+ return data_info
+
+def answer_loop(
+ model_path: str,
+ device: str,
+ num_gpus: str,
+ max_gpu_memory: str,
+ load_8bit: bool,
+ conv_template: Optional[str],
+ temperature: float,
+ max_new_tokens: int,
+ chatio: ChatIO,
+ debug: bool,
+ prompt_caption: dict = None,
+ prompt_caption_path: str = None,
+ output_path: str = None,
+):
+ # Model
+ model, tokenizer = load_model(
+ model_path, device, num_gpus, max_gpu_memory, load_8bit, debug
+ )
+ is_chatglm = "chatglm" in str(type(model)).lower()
+
+ # Chat
+ if conv_template:
+ conv = conv_templates[conv_template].copy()
+ else:
+ conv = get_default_conv_template(model_path).copy()
+
+ # Question
+ if os.path.exists(answer_path):
+ with open(answer_path, 'r') as f:
+ import json
+ print("answer file"+ str(answer_path) + "exists, loading...")
+ data = json.load(f)
+ else:
+ print("loading origin data info...")
+ data = get_test(data_info_path)
+
+ if question_path and caption_path:
+ import json
+ with open(question_path, 'r') as f:
+ questions = json.load(f)
+
+
+
+ for id,prompted_cap in questions.items():
+ # single loop for one video
+ captions = {}
+ qid_list = []
+ question_list = []
+ global_counter = 0
+ counter = 0
+ question_batch_size = 10
+ for qid, question in data[id]['questions'].items():
+ global_counter += 1
+ counter += 1
+ qid_list.append(qid)
+ question_list.append(question)
+ prompted_questions = ''
+ # if it's the last step of the loop, set the batch size to the counter
+ if global_counter == len(data[id]['questions']):
+ question_batch_size = counter
+
+ if counter == question_batch_size:
+ for i in range(len(qid_list)):
+ prompted_questions += 'Question ' + str(i) + '. ' + question_list[i] + '\n'
+ print(prompted_cap+prompted_questions)
+ conv.append_message(conv.roles[0], prompted_cap+prompted_questions)
+ conv.append_message(conv.roles[1], None)
+
+ if is_chatglm:
+ prompt = conv.messages[conv.offset :]
+ generate_stream_func = chatglm_generate_stream
+ else:
+ generate_stream_func = generate_stream
+ prompt = conv.get_prompt()
+
+ skip_echo_len = compute_skip_echo_len(model_path, conv, prompt)
+ stop_str = (
+ conv.sep
+ if conv.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.BAIZE]
+ else None
+ )
+
+ params = {
+ "model": model_path,
+ "prompt": prompt,
+ "temperature": temperature,
+ "max_new_tokens": max_new_tokens,
+ "stop": stop_str,
+ }
+
+ chatio.prompt_for_output(conv.roles[1])
+ output_stream = generate_stream_func(model, tokenizer, params, device)
+ outputs = chatio.stream_output(output_stream, skip_echo_len)
+ if question_batch_size == 1:
+ data[id]['infer'][qid_list[0]] = outputs
+ else:
+ output = outputs.split('\n')
+ print(output)
+ for i in range(len(qid_list)):
+ try:
+ data[id]['infer'][qid_list[i]] = output[i][3:] # remove the index
+ print(output[i][3:])
+ except Exception as e:
+ # save to file of current video name and exception question id
+ print("error")
+ with open("error_info.txt", 'a') as f:
+ f.write(id + ':'+'\n')
+ f.write(str(e))
+ f.write('\n')
+ raise Exception("error")
+ captions[id] = outputs
+ # clear conv for next question
+ del conv
+ counter = 0
+ qid_list = []
+ question_list = []
+ conv = get_default_conv_template(model_path).copy()
+ if debug:
+ print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
+ with open(caption_path, 'w') as f:
+ json.dump(captions, f)
+ with open(answer_path, 'w') as f:
+ json.dump(data, f)
\ No newline at end of file
diff --git a/model/fastchat/serve/model_worker.py b/model/fastchat/serve/model_worker.py
new file mode 100644
index 0000000000000000000000000000000000000000..65aa2b726fd8de9b57bebdcd73ec4ee350f88af2
--- /dev/null
+++ b/model/fastchat/serve/model_worker.py
@@ -0,0 +1,268 @@
+"""
+A model worker executes the model.
+"""
+import argparse
+import asyncio
+import dataclasses
+import logging
+import json
+import os
+import time
+from typing import List, Union
+import threading
+import uuid
+
+from fastapi import FastAPI, Request, BackgroundTasks
+from fastapi.responses import StreamingResponse
+import requests
+
+try:
+ from transformers import (
+ AutoTokenizer,
+ AutoModelForCausalLM,
+ LlamaTokenizer,
+ AutoModel,
+ )
+except ImportError:
+ from transformers import (
+ AutoTokenizer,
+ AutoModelForCausalLM,
+ LLaMATokenizer,
+ AutoModel,
+ )
+import torch
+import uvicorn
+
+from fastchat.constants import WORKER_HEART_BEAT_INTERVAL
+from fastchat.serve.inference import load_model, generate_stream
+from fastchat.serve.serve_chatglm import chatglm_generate_stream
+from fastchat.utils import build_logger, server_error_msg, pretty_print_semaphore
+
+GB = 1 << 30
+
+worker_id = str(uuid.uuid4())[:6]
+logger = build_logger("model_worker", f"model_worker_{worker_id}.log")
+global_counter = 0
+
+model_semaphore = None
+
+
+def heart_beat_worker(controller):
+ while True:
+ time.sleep(WORKER_HEART_BEAT_INTERVAL)
+ controller.send_heart_beat()
+
+
+class ModelWorker:
+ def __init__(
+ self,
+ controller_addr,
+ worker_addr,
+ worker_id,
+ no_register,
+ model_path,
+ model_name,
+ device,
+ num_gpus,
+ max_gpu_memory,
+ load_8bit=False,
+ ):
+ self.controller_addr = controller_addr
+ self.worker_addr = worker_addr
+ self.worker_id = worker_id
+ if model_path.endswith("/"):
+ model_path = model_path[:-1]
+ self.model_name = model_name or model_path.split("/")[-1]
+ self.device = device
+
+ logger.info(f"Loading the model {self.model_name} on worker {worker_id} ...")
+ self.model, self.tokenizer = load_model(
+ model_path, device, num_gpus, max_gpu_memory, load_8bit
+ )
+
+ if hasattr(self.model.config, "max_sequence_length"):
+ self.context_len = self.model.config.max_sequence_length
+ elif hasattr(self.model.config, "max_position_embeddings"):
+ self.context_len = self.model.config.max_position_embeddings
+ else:
+ self.context_len = 2048
+
+ is_chatglm = "chatglm" in str(type(self.model)).lower()
+ if is_chatglm:
+ self.generate_stream_func = chatglm_generate_stream
+ else:
+ self.generate_stream_func = generate_stream
+
+ if not no_register:
+ self.register_to_controller()
+ self.heart_beat_thread = threading.Thread(
+ target=heart_beat_worker, args=(self,)
+ )
+ self.heart_beat_thread.start()
+
+ def register_to_controller(self):
+ logger.info("Register to controller")
+
+ url = self.controller_addr + "/register_worker"
+ data = {
+ "worker_name": self.worker_addr,
+ "check_heart_beat": True,
+ "worker_status": self.get_status(),
+ }
+ r = requests.post(url, json=data)
+ assert r.status_code == 200
+
+ def send_heart_beat(self):
+ logger.info(
+ f"Send heart beat. Models: {[self.model_name]}. "
+ f"Semaphore: {pretty_print_semaphore(model_semaphore)}. "
+ f"global_counter: {global_counter}"
+ )
+
+ url = self.controller_addr + "/receive_heart_beat"
+
+ while True:
+ try:
+ ret = requests.post(
+ url,
+ json={
+ "worker_name": self.worker_addr,
+ "queue_length": self.get_queue_length(),
+ },
+ timeout=5,
+ )
+ exist = ret.json()["exist"]
+ break
+ except requests.exceptions.RequestException as e:
+ logger.error(f"heart beat error: {e}")
+ time.sleep(5)
+
+ if not exist:
+ self.register_to_controller()
+
+ def get_queue_length(self):
+ if (
+ model_semaphore is None
+ or model_semaphore._value is None
+ or model_semaphore._waiters is None
+ ):
+ return 0
+ else:
+ return (
+ args.limit_model_concurrency
+ - model_semaphore._value
+ + len(model_semaphore._waiters)
+ )
+
+ def get_status(self):
+ return {
+ "model_names": [self.model_name],
+ "speed": 1,
+ "queue_length": self.get_queue_length(),
+ }
+
+ def generate_stream_gate(self, params):
+ try:
+ for output in self.generate_stream_func(
+ self.model,
+ self.tokenizer,
+ params,
+ self.device,
+ self.context_len,
+ args.stream_interval,
+ ):
+ ret = {
+ "text": output,
+ "error_code": 0,
+ }
+ yield json.dumps(ret).encode() + b"\0"
+ except torch.cuda.OutOfMemoryError:
+ ret = {
+ "text": server_error_msg,
+ "error_code": 1,
+ }
+ yield json.dumps(ret).encode() + b"\0"
+
+
+app = FastAPI()
+
+
+def release_model_semaphore():
+ model_semaphore.release()
+
+
+@app.post("/worker_generate_stream")
+async def api_generate_stream(request: Request):
+ global model_semaphore, global_counter
+ global_counter += 1
+ params = await request.json()
+
+ if model_semaphore is None:
+ model_semaphore = asyncio.Semaphore(args.limit_model_concurrency)
+ await model_semaphore.acquire()
+ generator = worker.generate_stream_gate(params)
+ background_tasks = BackgroundTasks()
+ background_tasks.add_task(release_model_semaphore)
+ return StreamingResponse(generator, background=background_tasks)
+
+
+@app.post("/worker_get_status")
+async def api_get_status(request: Request):
+ return worker.get_status()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--host", type=str, default="localhost")
+ parser.add_argument("--port", type=int, default=21002)
+ parser.add_argument("--worker-address", type=str, default="http://localhost:21002")
+ parser.add_argument(
+ "--controller-address", type=str, default="http://localhost:21001"
+ )
+ parser.add_argument(
+ "--model-path",
+ type=str,
+ default="facebook/opt-350m",
+ help="The path to the weights",
+ )
+ parser.add_argument("--model-name", type=str, help="Optional name")
+ parser.add_argument(
+ "--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda"
+ )
+ parser.add_argument("--num-gpus", type=int, default=1)
+ parser.add_argument(
+ "--gpus",
+ type=str,
+ default=None,
+ help="A single GPU like 1 or multiple GPUs like 0,2"
+ )
+ parser.add_argument(
+ "--max-gpu-memory",
+ type=str,
+ help="The maximum memory per gpu. Use a string like '13Gib'",
+ )
+ parser.add_argument("--load-8bit", action="store_true")
+ parser.add_argument("--limit-model-concurrency", type=int, default=5)
+ parser.add_argument("--stream-interval", type=int, default=2)
+ parser.add_argument("--no-register", action="store_true")
+ args = parser.parse_args()
+ logger.info(f"args: {args}")
+
+ if args.gpus:
+ if args.num_gpus and len(args.gpus.split(",")) < int(args.num_gpus):
+ raise ValueError(f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!")
+ os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
+
+ worker = ModelWorker(
+ args.controller_address,
+ args.worker_address,
+ worker_id,
+ args.no_register,
+ args.model_path,
+ args.model_name,
+ args.device,
+ args.num_gpus,
+ args.max_gpu_memory,
+ args.load_8bit,
+ )
+ uvicorn.run(app, host=args.host, port=args.port, log_level="info")
diff --git a/model/fastchat/serve/monkey_patch_non_inplace.py b/model/fastchat/serve/monkey_patch_non_inplace.py
new file mode 100644
index 0000000000000000000000000000000000000000..9661d70751261a11bbc33b57967efcf09d3cbe0c
--- /dev/null
+++ b/model/fastchat/serve/monkey_patch_non_inplace.py
@@ -0,0 +1,118 @@
+"""
+Monkey patch the llama implementation in the huggingface/transformers library.
+Avoid bugs in mps backend by not using in-place operations.
+"""
+import math
+from typing import List, Optional, Tuple
+
+import torch
+from torch import nn
+import transformers
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2].clone()
+ x2 = x[..., x.shape[-1] // 2 :].clone()
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
+ gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1]
+ gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3])
+ cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
+ sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = (
+ self.q_proj(hidden_states)
+ .view(bsz, q_len, self.num_heads, self.head_dim)
+ .transpose(1, 2)
+ )
+ key_states = (
+ self.k_proj(hidden_states)
+ .view(bsz, q_len, self.num_heads, self.head_dim)
+ .transpose(1, 2)
+ )
+ value_states = (
+ self.v_proj(hidden_states)
+ .view(bsz, q_len, self.num_heads, self.head_dim)
+ .transpose(1, 2)
+ )
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value[0].shape[-2]
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+ query_states, key_states = apply_rotary_pos_emb(
+ query_states, key_states, cos, sin, position_ids
+ )
+ # [bsz, nh, t, hd]
+
+ if past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+
+ past_key_value = (key_states, value_states) if use_cache else None
+
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(
+ self.head_dim
+ )
+
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights + attention_mask
+ attn_weights = torch.max(
+ attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)
+ )
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(
+ query_states.dtype
+ )
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+def replace_llama_attn_with_non_inplace_operations():
+ """Avoid bugs in mps backend by not using in-place operations."""
+ transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
diff --git a/model/fastchat/serve/register_worker.py b/model/fastchat/serve/register_worker.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c2c40295e0351f25709ba25554c9329f15bf0d2
--- /dev/null
+++ b/model/fastchat/serve/register_worker.py
@@ -0,0 +1,26 @@
+"""
+Manually register workers.
+
+Usage:
+python3 -m fastchat.serve.register_worker --controller http://localhost:21001 --worker-name http://localhost:21002
+"""
+
+import argparse
+
+import requests
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--controller-address", type=str)
+ parser.add_argument("--worker-name", type=str)
+ parser.add_argument("--check-heart-beat", action="store_true")
+ args = parser.parse_args()
+
+ url = args.controller_address + "/register_worker"
+ data = {
+ "worker_name": args.worker_name,
+ "check_heart_beat": args.check_heart_beat,
+ "worker_status": None,
+ }
+ r = requests.post(url, json=data)
+ assert r.status_code == 200
diff --git a/model/fastchat/serve/serve_chatglm.py b/model/fastchat/serve/serve_chatglm.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b7745625d90ddcc7976bff767633f05ed45dde4
--- /dev/null
+++ b/model/fastchat/serve/serve_chatglm.py
@@ -0,0 +1,30 @@
+import torch
+from typing import List, Tuple
+
+
+@torch.inference_mode()
+def chatglm_generate_stream(
+ model, tokenizer, params, device, context_len=2048, stream_interval=2
+):
+ """Generate text using model's chat api"""
+ messages = params["prompt"]
+ max_new_tokens = int(params.get("max_new_tokens", 256))
+ temperature = float(params.get("temperature", 1.0))
+ top_p = float(params.get("top_p", 0.7))
+
+ gen_kwargs = {
+ "max_new_tokens": max_new_tokens,
+ "do_sample": True,
+ "top_p": top_p,
+ "temperature": temperature,
+ "logits_processor": None,
+ }
+
+ hist = []
+ for i in range(0, len(messages) - 2, 2):
+ hist.append((messages[i][1], messages[i + 1][1]))
+ query = messages[-2][1]
+
+ for response, new_hist in model.stream_chat(tokenizer, query, hist):
+ output = query + " " + response
+ yield output
diff --git a/model/fastchat/serve/test_message.py b/model/fastchat/serve/test_message.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef2a2e36ec0732d428c8defecad300502a35c05a
--- /dev/null
+++ b/model/fastchat/serve/test_message.py
@@ -0,0 +1,81 @@
+import argparse
+import json
+
+import requests
+
+from fastchat.conversation import (
+ get_default_conv_template,
+ compute_skip_echo_len,
+ SeparatorStyle,
+)
+
+
+def main():
+ model_name = args.model_name
+
+ if args.worker_address:
+ worker_addr = args.worker_address
+ else:
+ controller_addr = args.controller_address
+ ret = requests.post(controller_addr + "/refresh_all_workers")
+ ret = requests.post(controller_addr + "/list_models")
+ models = ret.json()["models"]
+ models.sort()
+ print(f"Models: {models}")
+
+ ret = requests.post(
+ controller_addr + "/get_worker_address", json={"model": model_name}
+ )
+ worker_addr = ret.json()["address"]
+ print(f"worker_addr: {worker_addr}")
+
+ if worker_addr == "":
+ return
+
+ conv = get_default_conv_template(model_name).copy()
+ conv.append_message(conv.roles[0], args.message)
+ conv.append_message(conv.roles[1], None)
+ prompt = conv.get_prompt()
+
+ headers = {"User-Agent": "fastchat Client"}
+ pload = {
+ "model": model_name,
+ "prompt": prompt,
+ "max_new_tokens": args.max_new_tokens,
+ "temperature": args.temperature,
+ "stop": conv.sep if conv.sep_style == SeparatorStyle.SINGLE else conv.sep2,
+ }
+ response = requests.post(
+ worker_addr + "/worker_generate_stream",
+ headers=headers,
+ json=pload,
+ stream=True,
+ )
+
+ print(f"{conv.roles[0]}: {args.message}")
+ for chunk in response.iter_lines(
+ chunk_size=8192, decode_unicode=False, delimiter=b"\0"
+ ):
+ if chunk:
+ data = json.loads(chunk.decode("utf-8"))
+ skip_echo_len = compute_skip_echo_len(model_name, conv, prompt)
+ output = data["text"][skip_echo_len:].strip()
+ print(f"{conv.roles[1]}: {output}", end="\r")
+ print("")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--controller-address", type=str, default="http://localhost:21001"
+ )
+ parser.add_argument("--worker-address", type=str)
+ parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
+ parser.add_argument("--temperature", type=float, default=0.0)
+ parser.add_argument("--max-new-tokens", type=int, default=32)
+ parser.add_argument(
+ "--message", type=str, default="Tell me a story with more than 1000 words."
+ )
+ args = parser.parse_args()
+
+ main()
diff --git a/model/fastchat/serve/test_throughput.py b/model/fastchat/serve/test_throughput.py
new file mode 100644
index 0000000000000000000000000000000000000000..9cc5f45c7e06deb596b51213cd2667fd8361dbfd
--- /dev/null
+++ b/model/fastchat/serve/test_throughput.py
@@ -0,0 +1,115 @@
+"""Benchmarking script to test the throughput of serving workers."""
+import argparse
+import json
+
+import requests
+import threading
+import time
+
+from fastchat.conversation import default_conversation
+
+
+def main():
+ if args.worker_address:
+ worker_addr = args.worker_address
+ else:
+ controller_addr = args.controller_address
+ ret = requests.post(controller_addr + "/refresh_all_workers")
+ ret = requests.post(controller_addr + "/list_models")
+ models = ret.json()["models"]
+ models.sort()
+ print(f"Models: {models}")
+
+ ret = requests.post(
+ controller_addr + "/get_worker_address", json={"model": args.model_name}
+ )
+ worker_addr = ret.json()["address"]
+ print(f"worker_addr: {worker_addr}")
+
+ if worker_addr == "":
+ return
+
+ conv = default_conversation.copy()
+ conv.append_message(conv.roles[0], "Tell me a story with more than 1000 words")
+ prompt_template = conv.get_prompt()
+ prompts = [prompt_template for _ in range(args.n_thread)]
+
+ headers = {"User-Agent": "fastchat Client"}
+ ploads = [
+ {
+ "model": args.model_name,
+ "prompt": prompts[i],
+ "max_new_tokens": args.max_new_tokens,
+ "temperature": 0.0,
+ # "stop": conv.sep,
+ }
+ for i in range(len(prompts))
+ ]
+
+ def send_request(results, i):
+ if args.test_dispatch:
+ ret = requests.post(
+ controller_addr + "/get_worker_address", json={"model": args.model_name}
+ )
+ thread_worker_addr = ret.json()["address"]
+ else:
+ thread_worker_addr = worker_addr
+ print(f"thread {i} goes to {thread_worker_addr}")
+ response = requests.post(
+ thread_worker_addr + "/worker_generate_stream",
+ headers=headers,
+ json=ploads[i],
+ stream=False,
+ )
+ k = list(
+ response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0")
+ )
+ # print(k)
+ response_new_words = json.loads(k[-2].decode("utf-8"))["text"]
+ error_code = json.loads(k[-2].decode("utf-8"))["error_code"]
+ # print(f"=== Thread {i} ===, words: {1}, error code: {error_code}")
+ results[i] = len(response_new_words.split(" ")) - len(prompts[i].split(" "))
+
+ # use N threads to prompt the backend
+ tik = time.time()
+ threads = []
+ results = [None] * args.n_thread
+ for i in range(args.n_thread):
+ t = threading.Thread(target=send_request, args=(results, i))
+ t.start()
+ # time.sleep(0.5)
+ threads.append(t)
+
+ for t in threads:
+ t.join()
+
+ print(f"Time (POST): {time.time() - tik} s")
+ # n_words = 0
+ # for i, response in enumerate(results):
+ # # print(prompt[i].replace(conv.sep, "\n"), end="")
+ # # make sure the streaming finishes at EOS or stopping criteria
+ # k = list(response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0"))
+ # response_new_words = json.loads(k[-2].decode("utf-8"))["text"]
+ # # print(response_new_words)
+ # n_words += len(response_new_words.split(" ")) - len(prompts[i].split(" "))
+ n_words = sum(results)
+ time_seconds = time.time() - tik
+ print(
+ f"Time (Completion): {time_seconds}, n threads: {args.n_thread}, "
+ f"throughput: {n_words / time_seconds} words/s."
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--controller-address", type=str, default="http://localhost:21001"
+ )
+ parser.add_argument("--worker-address", type=str)
+ parser.add_argument("--model-name", type=str, default="vicuna")
+ parser.add_argument("--max-new-tokens", type=int, default=2048)
+ parser.add_argument("--n-thread", type=int, default=8)
+ parser.add_argument("--test-dispatch", action="store_true")
+ args = parser.parse_args()
+
+ main()
diff --git a/model/fastchat/train/llama_flash_attn_monkey_patch.py b/model/fastchat/train/llama_flash_attn_monkey_patch.py
new file mode 100644
index 0000000000000000000000000000000000000000..00fc39edff8f3e8b23bc5083e82db162153bb916
--- /dev/null
+++ b/model/fastchat/train/llama_flash_attn_monkey_patch.py
@@ -0,0 +1,114 @@
+from typing import List, Optional, Tuple
+
+import torch
+from torch import nn
+
+import transformers
+from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
+
+from einops import rearrange
+
+from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
+from flash_attn.bert_padding import unpad_input, pad_input
+
+
+def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel
+
+ attention_mask: [bsz, q_len]
+ """
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = (
+ self.q_proj(hidden_states)
+ .view(bsz, q_len, self.num_heads, self.head_dim)
+ .transpose(1, 2)
+ )
+ key_states = (
+ self.k_proj(hidden_states)
+ .view(bsz, q_len, self.num_heads, self.head_dim)
+ .transpose(1, 2)
+ )
+ value_states = (
+ self.v_proj(hidden_states)
+ .view(bsz, q_len, self.num_heads, self.head_dim)
+ .transpose(1, 2)
+ )
+ # [bsz, q_len, nh, hd]
+ # [bsz, nh, q_len, hd]
+
+ kv_seq_len = key_states.shape[-2]
+ assert past_key_value is None, "past_key_value is not supported"
+
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+ query_states, key_states = apply_rotary_pos_emb(
+ query_states, key_states, cos, sin, position_ids
+ )
+ # [bsz, nh, t, hd]
+ assert not output_attentions, "output_attentions is not supported"
+ assert not use_cache, "use_cache is not supported"
+
+ # Flash attention codes from
+ # https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/flash_attention.py
+
+ # transform the data into the format required by flash attention
+ qkv = torch.stack(
+ [query_states, key_states, value_states], dim=2
+ ) # [bsz, nh, 3, q_len, hd]
+ qkv = qkv.transpose(1, 3) # [bsz, q_len, 3, nh, hd]
+ # We have disabled _prepare_decoder_attention_mask in LlamaModel
+ # the attention_mask should be the same as the key_padding_mask
+ key_padding_mask = attention_mask
+
+ if key_padding_mask is None:
+ qkv = rearrange(qkv, "b s ... -> (b s) ...")
+ max_s = q_len
+ cu_q_lens = torch.arange(
+ 0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device
+ )
+ output = flash_attn_unpadded_qkvpacked_func(
+ qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
+ )
+ output = rearrange(output, "(b s) ... -> b s ...", b=bsz)
+ else:
+ nheads = qkv.shape[-2]
+ x = rearrange(qkv, "b s three h d -> b s (three h d)")
+ x_unpad, indices, cu_q_lens, max_s = unpad_input(x, key_padding_mask)
+ x_unpad = rearrange(
+ x_unpad, "nnz (three h d) -> nnz three h d", three=3, h=nheads
+ )
+ output_unpad = flash_attn_unpadded_qkvpacked_func(
+ x_unpad, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
+ )
+ output = rearrange(
+ pad_input(
+ rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices, bsz, q_len
+ ),
+ "b s (h d) -> b s h d",
+ h=nheads,
+ )
+ return self.o_proj(rearrange(output, "b s h d -> b s (h d)")), None, None
+
+
+# Disable the transformation of the attention mask in LlamaModel as the flash attention
+# requires the attention mask to be the same as the key_padding_mask
+def _prepare_decoder_attention_mask(
+ self, attention_mask, input_shape, inputs_embeds, past_key_values_length
+):
+ # [bsz, seq_len]
+ return attention_mask
+
+
+def replace_llama_attn_with_flash_attn():
+ transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = (
+ _prepare_decoder_attention_mask
+ )
+ transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
diff --git a/model/fastchat/train/train.py b/model/fastchat/train/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..485b0f17f89c3eaaca69fe26a61e929dc1c35d6f
--- /dev/null
+++ b/model/fastchat/train/train.py
@@ -0,0 +1,250 @@
+# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
+# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+from dataclasses import dataclass, field
+import json
+import pathlib
+from typing import Dict, Optional, Sequence
+
+import torch
+from torch.utils.data import Dataset
+import transformers
+from transformers import Trainer
+from transformers.trainer_pt_utils import LabelSmoother
+
+from fastchat.conversation import get_default_conv_template, SeparatorStyle
+
+IGNORE_TOKEN_ID = LabelSmoother.ignore_index
+
+
+@dataclass
+class ModelArguments:
+ model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
+
+
+@dataclass
+class DataArguments:
+ data_path: str = field(
+ default=None, metadata={"help": "Path to the training data."}
+ )
+ lazy_preprocess: bool = False
+
+
+@dataclass
+class TrainingArguments(transformers.TrainingArguments):
+ cache_dir: Optional[str] = field(default=None)
+ optim: str = field(default="adamw_torch")
+ model_max_length: int = field(
+ default=512,
+ metadata={
+ "help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
+ },
+ )
+
+
+local_rank = None
+
+
+def rank0_print(*args):
+ if local_rank == 0:
+ print(*args)
+
+
+def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
+ """Collects the state dict and dump to disk."""
+ state_dict = trainer.model.state_dict()
+ if trainer.args.should_save:
+ cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
+ del state_dict
+ trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
+
+
+def preprocess(
+ sources,
+ tokenizer: transformers.PreTrainedTokenizer,
+) -> Dict:
+ conv = get_default_conv_template("vicuna").copy()
+ roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
+
+ # Apply prompt templates
+ conversations = []
+ for i, source in enumerate(sources):
+ if roles[source[0]["from"]] != conv.roles[0]:
+ # Skip the first one if it is not from human
+ source = source[1:]
+
+ conv.messages = []
+ for j, sentence in enumerate(source):
+ role = roles[sentence["from"]]
+ assert role == conv.roles[j % 2], f"{i}"
+ conv.append_message(role, sentence["value"])
+ conversations.append(conv.get_prompt())
+
+ # Tokenize conversations
+ input_ids = tokenizer(
+ conversations,
+ return_tensors="pt",
+ padding="max_length",
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ ).input_ids
+ targets = input_ids.clone()
+
+ assert conv.sep_style == SeparatorStyle.TWO
+
+ # Mask targets
+ sep = conv.sep + conv.roles[1] + ": "
+ for conversation, target in zip(conversations, targets):
+ total_len = int(target.ne(tokenizer.pad_token_id).sum())
+
+ rounds = conversation.split(conv.sep2)
+ cur_len = 1
+ for i, rou in enumerate(rounds):
+ if rou == "":
+ break
+
+ parts = rou.split(sep)
+ if len(parts) != 2:
+ break
+ parts[0] += sep
+ round_len = len(tokenizer(rou).input_ids)
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 2
+
+ target[cur_len : cur_len + instruction_len] = IGNORE_TOKEN_ID
+
+ # rank0_print(tokenizer.decode(target[cur_len+instruction_len:cur_len+round_len]))
+
+ cur_len += round_len
+ target[cur_len:] = IGNORE_TOKEN_ID
+
+ if cur_len < tokenizer.model_max_length:
+ if cur_len != total_len:
+ rank0_print(
+ f"WARNING: tokenization mismatch " f"{cur_len} vs. {total_len}"
+ )
+
+ return dict(
+ input_ids=input_ids,
+ labels=targets,
+ attention_mask=input_ids.ne(tokenizer.pad_token_id),
+ )
+
+
+class SupervisedDataset(Dataset):
+ """Dataset for supervised fine-tuning."""
+
+ def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer):
+ super(SupervisedDataset, self).__init__()
+ rank0_print("Loading data...")
+ list_data_dict = json.load(open(data_path, "r"))
+
+ rank0_print("Formatting inputs...")
+ sources = [example["conversations"] for example in list_data_dict]
+ data_dict = preprocess(sources, tokenizer)
+
+ self.input_ids = data_dict["input_ids"]
+ self.labels = data_dict["labels"]
+ self.attention_mask = data_dict["attention_mask"]
+
+ def __len__(self):
+ return len(self.input_ids)
+
+ def __getitem__(self, i) -> Dict[str, torch.Tensor]:
+ return dict(
+ input_ids=self.input_ids[i],
+ labels=self.labels[i],
+ attention_mask=self.attention_mask[i],
+ )
+
+
+class LazySupervisedDataset(Dataset):
+ """Dataset for supervised fine-tuning."""
+
+ def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer):
+ super(LazySupervisedDataset, self).__init__()
+ self.tokenizer = tokenizer
+
+ rank0_print("Loading data...")
+ list_data_dict = json.load(open(data_path, "r"))
+
+ rank0_print("Formatting inputs...Skip in lazy mode")
+ self.tokenizer = tokenizer
+ self.list_data_dict = list_data_dict
+
+ def __len__(self):
+ return len(self.list_data_dict)
+
+ def __getitem__(self, i) -> Dict[str, torch.Tensor]:
+ sources = self.list_data_dict[i]
+ if isinstance(i, int):
+ sources = [sources]
+ data_dict = preprocess([e["conversations"] for e in sources], self.tokenizer)
+ if isinstance(i, int):
+ data_dict = dict(
+ input_ids=data_dict["input_ids"][0],
+ labels=data_dict["labels"][0],
+ attention_mask=data_dict["attention_mask"][0],
+ )
+ return data_dict
+
+
+def make_supervised_data_module(
+ tokenizer: transformers.PreTrainedTokenizer, data_args
+) -> Dict:
+ """Make dataset and collator for supervised fine-tuning."""
+ dataset_cls = (
+ LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset
+ )
+ train_dataset = dataset_cls(tokenizer=tokenizer, data_path=data_args.data_path)
+ return dict(train_dataset=train_dataset, eval_dataset=None)
+
+
+def train():
+ global local_rank
+
+ parser = transformers.HfArgumentParser(
+ (ModelArguments, DataArguments, TrainingArguments)
+ )
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
+ local_rank = training_args.local_rank
+ model = transformers.AutoModelForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ )
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ model_max_length=training_args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ tokenizer.pad_token = tokenizer.unk_token
+
+ data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
+ trainer = Trainer(
+ model=model, tokenizer=tokenizer, args=training_args, **data_module
+ )
+
+ if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
+ trainer.train(resume_from_checkpoint=True)
+ else:
+ trainer.train()
+ trainer.save_state()
+ safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
+
+
+if __name__ == "__main__":
+ train()
diff --git a/model/fastchat/train/train_lora.py b/model/fastchat/train/train_lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..273df0232e981859ab189613ad0f875e2c88c6cf
--- /dev/null
+++ b/model/fastchat/train/train_lora.py
@@ -0,0 +1,151 @@
+# Usage: deepspeed train_lora.py --deepspeed <$PATH_TO_DEEPSPEED_CONFIG>
+
+# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
+# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass, field
+import logging
+import pathlib
+import typing
+
+from deepspeed import zero
+from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
+from peft import LoraConfig, get_peft_model
+import transformers
+from transformers import Trainer
+
+from fastchat.train.train import (
+ DataArguments,
+ ModelArguments,
+ TrainingArguments,
+ make_supervised_data_module,
+)
+
+from fastchat.train.llama_flash_attn_monkey_patch import (
+ replace_llama_attn_with_flash_attn,
+)
+
+replace_llama_attn_with_flash_attn()
+
+
+@dataclass
+class LoraArguments:
+ lora_r: int = 8
+ lora_alpha: int = 16
+ lora_dropout: float = 0.05
+ lora_target_modules: typing.List[str] = field(
+ default_factory=lambda: ["q_proj", "v_proj"]
+ )
+ lora_weight_path: str = ""
+ bias: str = "none"
+
+
+def maybe_zero_3(param):
+ if hasattr(param, "ds_id"):
+ assert param.ds_status == ZeroParamStatus.NOT_AVAILABLE
+ with zero.GatheredParameters([param]):
+ param = param.data.cpu().clone().detach()
+ return param
+
+
+# Borrowed from peft.utils.get_peft_model_state_dict
+def get_peft_state_maybe_zero_3(state_dict, bias):
+ if bias == "none":
+ to_return = {
+ k: state_dict[k].cpu().clone().detach() for k in state_dict if "lora_" in k
+ }
+ elif bias == "all":
+ to_return = {
+ k: state_dict[k] for k in state_dict if "lora_" in k or "bias" in k
+ }
+ elif bias == "lora_only":
+ to_return = {}
+ for k in state_dict:
+ if "lora_" in k:
+ to_return[k] = state_dict[k]
+ bias_name = k.split("lora_")[0] + "bias"
+ if bias_name in state_dict:
+ to_return[bias_name] = state_dict[bias_name]
+ else:
+ raise NotImplementedError
+ to_return = {k: maybe_zero_3(v) for k, v in to_return.items()}
+ return to_return
+
+
+def train():
+ parser = transformers.HfArgumentParser(
+ (ModelArguments, DataArguments, TrainingArguments, LoraArguments)
+ )
+ (
+ model_args,
+ data_args,
+ training_args,
+ lora_args,
+ ) = parser.parse_args_into_dataclasses()
+
+ model = transformers.AutoModelForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ )
+ lora_config = LoraConfig(
+ r=lora_args.lora_r,
+ lora_alpha=lora_args.lora_alpha,
+ target_modules=lora_args.lora_target_modules,
+ lora_dropout=lora_args.lora_dropout,
+ bias=lora_args.bias,
+ task_type="CAUSAL_LM",
+ )
+ model = get_peft_model(model, lora_config)
+ if training_args.deepspeed is not None and training_args.local_rank == 0:
+ model.print_trainable_parameters()
+
+ if training_args.gradient_checkpointing:
+ logging.warning(
+ "gradient checkpointing with lora makes requires_grad "
+ "incorrect and needs a monkey patch in Trainer or the "
+ "wrapped model's forward. ref: "
+ "https://github.com/lm-sys/FastChat/pull/138#issuecomment-1509172198"
+ )
+
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ model_max_length=training_args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ tokenizer.pad_token = tokenizer.unk_token
+
+ data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
+ trainer = Trainer(
+ model=model, tokenizer=tokenizer, args=training_args, **data_module
+ )
+
+ model.config.use_cache = False
+
+ if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
+ trainer.train(resume_from_checkpoint=True)
+ else:
+ trainer.train()
+ trainer.save_state()
+
+ # Save states. Weights might be a placeholder in zero3 and need a gather
+ state_dict = get_peft_state_maybe_zero_3(model.state_dict(), lora_args.bias)
+ if training_args.local_rank == 0:
+ model.save_pretrained(training_args.output_dir, state_dict=state_dict)
+
+
+if __name__ == "__main__":
+ train()
diff --git a/model/fastchat/train/train_mem.py b/model/fastchat/train/train_mem.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4b33528482f160a10720c8cc02df451c905b3bd
--- /dev/null
+++ b/model/fastchat/train/train_mem.py
@@ -0,0 +1,13 @@
+# Make it more memory efficient by monkey patching the LLaMA model with FlashAttn.
+
+# Need to call this before importing transformers.
+from fastchat.train.llama_flash_attn_monkey_patch import (
+ replace_llama_attn_with_flash_attn,
+)
+
+replace_llama_attn_with_flash_attn()
+
+from fastchat.train.train import train
+
+if __name__ == "__main__":
+ train()
diff --git a/model/fastchat/utils.py b/model/fastchat/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c74e102b5bc428bc7d5c507ad13030c84881ad6
--- /dev/null
+++ b/model/fastchat/utils.py
@@ -0,0 +1,159 @@
+import datetime
+import logging
+import logging.handlers
+import os
+import sys
+import json
+
+import requests
+import torch
+
+from fastchat.constants import LOGDIR
+
+server_error_msg = (
+ "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
+)
+moderation_msg = (
+ "YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE TRY AGAIN."
+)
+
+handler = None
+
+
+def build_logger(logger_name, logger_filename):
+ global handler
+
+ formatter = logging.Formatter(
+ fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ )
+
+ # Set the format of root handlers
+ if not logging.getLogger().handlers:
+ logging.basicConfig(level=logging.INFO, encoding="utf-8")
+ logging.getLogger().handlers[0].setFormatter(formatter)
+
+ # Redirect stdout and stderr to loggers
+ stdout_logger = logging.getLogger("stdout")
+ stdout_logger.setLevel(logging.INFO)
+ sl = StreamToLogger(stdout_logger, logging.INFO)
+ sys.stdout = sl
+
+ stderr_logger = logging.getLogger("stderr")
+ stderr_logger.setLevel(logging.ERROR)
+ sl = StreamToLogger(stderr_logger, logging.ERROR)
+ sys.stderr = sl
+
+ # Get logger
+ logger = logging.getLogger(logger_name)
+ logger.setLevel(logging.INFO)
+
+ # Add a file handler for all loggers
+ if handler is None:
+ os.makedirs(LOGDIR, exist_ok=True)
+ filename = os.path.join(LOGDIR, logger_filename)
+ handler = logging.handlers.TimedRotatingFileHandler(
+ filename, when="D", utc=True
+ )
+ handler.setFormatter(formatter)
+
+ for name, item in logging.root.manager.loggerDict.items():
+ if isinstance(item, logging.Logger):
+ item.addHandler(handler)
+
+ return logger
+
+
+class StreamToLogger(object):
+ """
+ Fake file-like stream object that redirects writes to a logger instance.
+ """
+
+ def __init__(self, logger, log_level=logging.INFO):
+ self.terminal = sys.stdout
+ self.logger = logger
+ self.log_level = log_level
+ self.linebuf = ""
+
+ def __getattr__(self, attr):
+ return getattr(self.terminal, attr)
+
+ def write(self, buf):
+ temp_linebuf = self.linebuf + buf
+ self.linebuf = ""
+ for line in temp_linebuf.splitlines(True):
+ # From the io.TextIOWrapper docs:
+ # On output, if newline is None, any '\n' characters written
+ # are translated to the system default line separator.
+ # By default sys.stdout.write() expects '\n' newlines and then
+ # translates them so this is still cross platform.
+ if line[-1] == "\n":
+ encoded_message = line.encode("utf-8", "ignore").decode("utf-8")
+ self.logger.log(self.log_level, encoded_message.rstrip())
+ else:
+ self.linebuf += line
+
+ def flush(self):
+ if self.linebuf != "":
+ encoded_message = self.linebuf.encode("utf-8", "ignore").decode("utf-8")
+ self.logger.log(self.log_level, encoded_message.rstrip())
+ self.linebuf = ""
+
+
+def disable_torch_init():
+ """
+ Disable the redundant torch default initialization to accelerate model creation.
+ """
+ import torch
+
+ setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
+ setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
+
+
+def violates_moderation(text):
+ """
+ Check whether the text violates OpenAI moderation API.
+ """
+ url = "https://api.openai.com/v1/moderations"
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer " + os.environ["OPENAI_API_KEY"],
+ }
+ text = text.replace("\n", "")
+ data = "{" + '"input": ' + f'"{text}"' + "}"
+ data = data.encode("utf-8")
+ try:
+ ret = requests.post(url, headers=headers, data=data, timeout=5)
+ flagged = ret.json()["results"][0]["flagged"]
+ except requests.exceptions.RequestException as e:
+ flagged = False
+ except KeyError as e:
+ flagged = False
+
+ return flagged
+
+
+# Flan-t5 trained with HF+FSDP saves corrupted weights for shared embeddings,
+# Use this function to make sure it can be correctly loaded.
+def clean_flant5_ckpt(ckpt_path):
+ index_file = os.path.join(ckpt_path, "pytorch_model.bin.index.json")
+ index_json = json.load(open(index_file, "r"))
+
+ weightmap = index_json["weight_map"]
+
+ share_weight_file = weightmap["shared.weight"]
+ share_weight = torch.load(os.path.join(ckpt_path, share_weight_file))[
+ "shared.weight"
+ ]
+
+ for weight_name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"]:
+ weight_file = weightmap[weight_name]
+ weight = torch.load(os.path.join(ckpt_path, weight_file))
+ weight[weight_name] = share_weight
+ torch.save(weight, os.path.join(ckpt_path, weight_file))
+
+
+def pretty_print_semaphore(semaphore):
+ if semaphore is None:
+ return "None"
+ return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})"
diff --git a/model/summary/TextSummarizer.py b/model/summary/TextSummarizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d0d37a69dce5ad0f209c970e091d671f29de364
--- /dev/null
+++ b/model/summary/TextSummarizer.py
@@ -0,0 +1,99 @@
+import torch
+from transformers import T5ForConditionalGeneration, T5TokenizerFast
+
+
+class TextSummarizer:
+
+ def __init__(self, device='cuda'):
+
+ self._load_model(
+ model_type="t5",
+ model_dir=
+ "./pretrained_models/flan-t5-large-finetuned-openai-summarize_from_feedback",
+ device=device)
+
+ def _load_model(self,
+ model_type: str = "t5",
+ model_dir: str = "outputs",
+ device: str = 'cuda'):
+ """
+ loads a checkpoint for inferencing/prediction
+ Args:
+ model_type (str, optional): "t5" or "mt5". Defaults to "t5".
+ model_dir (str, optional): path to model directory.
+ Defaults to "outputs".
+ device (str, optional): device to run. Defaults to "cuda".
+ """
+ if model_type == "t5":
+ self.model = T5ForConditionalGeneration.from_pretrained(
+ f"{model_dir}")
+ self.tokenizer = T5TokenizerFast.from_pretrained(f"{model_dir}")
+ else:
+ raise NotImplementedError(
+ f"model_type {model_type} not implemented")
+
+ self.device = torch.device(device)
+
+ self.model = self.model.to(self.device)
+
+ def _predict(
+ self,
+ source_text: str,
+ max_length: int = 512,
+ num_return_sequences: int = 1,
+ num_beams: int = 2,
+ top_k: int = 50,
+ top_p: float = 0.95,
+ do_sample: bool = True,
+ repetition_penalty: float = 2.5,
+ length_penalty: float = 1.0,
+ early_stopping: bool = True,
+ skip_special_tokens: bool = True,
+ clean_up_tokenization_spaces: bool = True,
+ ):
+ """
+ generates prediction for T5/MT5 model
+ Args:
+ source_text (str): any text for generating predictions
+ max_length (int, optional): max token length of prediction.
+ Defaults to 512.
+ num_return_sequences (int, optional): number of predictions to be
+ returned. Defaults to 1.
+ num_beams (int, optional): number of beams. Defaults to 2.
+ top_k (int, optional): Defaults to 50.
+ top_p (float, optional): Defaults to 0.95.
+ do_sample (bool, optional): Defaults to True.
+ repetition_penalty (float, optional): Defaults to 2.5.
+ length_penalty (float, optional): Defaults to 1.0.
+ early_stopping (bool, optional): Defaults to True.
+ skip_special_tokens (bool, optional): Defaults to True.
+ clean_up_tokenization_spaces (bool, optional): Defaults to True.
+ Returns:
+ list[str]: returns predictions
+ """
+ input_ids = self.tokenizer.encode(
+ source_text, return_tensors="pt", add_special_tokens=True)
+ input_ids = input_ids.to(self.device)
+ generated_ids = self.model.generate(
+ input_ids=input_ids,
+ num_beams=num_beams,
+ max_length=max_length,
+ repetition_penalty=repetition_penalty,
+ length_penalty=length_penalty,
+ early_stopping=early_stopping,
+ top_p=top_p,
+ top_k=top_k,
+ num_return_sequences=num_return_sequences,
+ )
+ preds = [
+ self.tokenizer.decode(
+ g,
+ skip_special_tokens=skip_special_tokens,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ ) for g in generated_ids
+ ]
+ return preds
+
+ def __call__(self, source_text):
+ generated_text = self._predict(source_text=source_text)
+ return generated_text
diff --git a/model/summary/__init__.py b/model/summary/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..434d199a9b30a69be239b433aa888df4502fcf56
--- /dev/null
+++ b/model/summary/__init__.py
@@ -0,0 +1 @@
+from .TextSummarizer import TextSummarizer
\ No newline at end of file
diff --git a/model/utils/__init__.py b/model/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b56dd0568b35b95a259ed1975526b6d6774002c6
--- /dev/null
+++ b/model/utils/__init__.py
@@ -0,0 +1,3 @@
+from .extract_clip_feature import extract_clip_feature_single_video_fps
+from .generate_tf_record import generate
+from .scenic_call import ScenicCall, ScenicModel
\ No newline at end of file
diff --git a/model/utils/extract_clip_feature.py b/model/utils/extract_clip_feature.py
new file mode 100644
index 0000000000000000000000000000000000000000..a980bd90e29afc0ed8925a61565f65a7771c81bb
--- /dev/null
+++ b/model/utils/extract_clip_feature.py
@@ -0,0 +1,121 @@
+import clip
+import numpy as np
+import torch
+from mmaction.datasets.transforms import (CenterCrop, DecordDecode, DecordInit,
+ FormatShape, Resize)
+from torchvision import transforms
+
+
+def extract_clip_feature_single_video_fps(
+ video_path: str,
+ clip_ckpt_path: str = 'ViT-L-14.pt',
+ device: str = 'cuda'):
+
+ class SampleFrames1FPS(object):
+ '''Sample frames at 1 fps.
+
+ Required Keys:
+ - total_frames
+ - start_index
+ - avg_fps
+
+ Added Keys:
+ - frame_interval
+ - frame_inds
+ - num_clips
+ '''
+
+ def transform(self, video_info: dict) -> dict:
+ video_info['frame_inds'] = np.arange(
+ video_info['start_index'],
+ video_info['total_frames'],
+ video_info['avg_fps'],
+ dtype=int) # np.arange(start, stop, step, dtype)
+ video_info['frame_interval'] = 1
+ video_info['num_clips'] = len(video_info['frame_inds'])
+ return video_info
+
+ class SampleFrames5FPS(object):
+ '''Sample frames at 5 fps.
+
+ Required Keys:
+ - total_frames
+ - start_index
+ - avg_fps
+
+ Added Keys:
+ - frame_interval
+ - frame_inds
+ - num_clips
+ '''
+
+ def transform(self, video_info: dict) -> dict:
+ video_info['frame_inds'] = np.arange(
+ video_info['start_index'],
+ video_info['total_frames'],
+ video_info['avg_fps'] // 5,
+ dtype=int)
+ video_info['frame_interval'] = 1
+ video_info['num_clips'] = len(video_info['frame_inds'])
+ return video_info
+
+ video_info = {'filename': video_path, 'start_index': 0}
+ video_processors = [
+ DecordInit(),
+ SampleFrames1FPS(),
+ DecordDecode(),
+ Resize(scale=(-1, 224)),
+ CenterCrop(crop_size=224),
+ FormatShape(input_format='NCHW'),
+ ]
+
+ # decode video to imgs
+ for processor in video_processors:
+ video_info = processor.transform(video_info)
+
+ imgs = torch.from_numpy(video_info['imgs']) # uint8 img tensor
+
+ imgs_transforms = transforms.Compose([
+ transforms.ConvertImageDtype(dtype=torch.float32),
+ transforms.Normalize(
+ mean=(0.48145466, 0.4578275, 0.40821073),
+ std=(0.26862954, 0.26130258, 0.27577711),
+ inplace=False)
+ ])
+
+ # uint8 -> float, then normalize
+ imgs = imgs_transforms(imgs).to(device)
+
+ # load model
+ clip_model, _ = clip.load(clip_ckpt_path, device)
+
+ # encode imgs get features
+ with torch.no_grad():
+ video_feat = clip_model.encode_image(imgs)
+
+ return video_feat, video_info
+
+
+if __name__ == '__main__':
+
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+
+ video_names = [
+ 'cook.mp4', 'latex.mp4', 'nba.mp4', 'temple_of_heaven.mp4',
+ 'south_pole.mp4', 'tv_series.mp4', 'formula_one.mp4', 'make-up.mp4',
+ 'police.mp4'
+ ]
+ video_dir = '/mnt/petrelfs/wangyiqin/vid_cap/examples/videos/'
+
+ for video_name in video_names:
+ video_feat = extract_clip_feature_single_video_fps(
+ video_path=video_dir + video_name,
+ clip_ckpt_path='ViT-L-14.pt',
+ device=device)
+ video_feat = video_feat.cpu()
+ # compress to one dimension
+ video_feat = video_feat.numpy()
+
+ np.save('clip_features/20/' + video_name[:-4] + '.npy', video_feat)
+ print(video_feat.shape)
+ print(video_name + ' DONE')
diff --git a/model/utils/generate_tf_record.py b/model/utils/generate_tf_record.py
new file mode 100644
index 0000000000000000000000000000000000000000..881a91935a3a7980215d6b96a4ab1fcf599277cf
--- /dev/null
+++ b/model/utils/generate_tf_record.py
@@ -0,0 +1,278 @@
+# Copyright 2021 DeepMind Technologies Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Python script to generate TFRecords of SequenceExample from csv."""
+
+import contextlib
+import math
+import os
+from typing import Optional, Sequence
+
+from absl import app
+from absl import flags
+import numpy as np
+import pandas as pd
+import tensorflow as tf
+from tqdm import tqdm
+
+flags.DEFINE_string("csv_path", None, "Input csv")
+flags.DEFINE_string("output_path", None, "Tfrecords output path.")
+flags.DEFINE_string(
+ "features_path",
+ None,
+ "In case features are stored in individual files and not in the csv.",
+)
+flags.DEFINE_integer(
+ "num_shards",
+ -1,
+ (
+ "Number of shards to output, -1 means"
+ "it will automatically adapt to the sqrt(num_examples)."
+ ),
+)
+flags.DEFINE_bool("shuffle_csv", False, "Whether or not to shuffle the csv.")
+FLAGS = flags.FLAGS
+
+
+@contextlib.contextmanager
+def _close_on_exit(writers):
+ """Call close on all writers on exit."""
+ try:
+ yield writers
+ finally:
+ for writer in writers:
+ writer.close()
+
+
+def add_float_list(key: str, values: Sequence[float],
+ sequence: tf.train.SequenceExample):
+ sequence.feature_lists.feature_list[key].feature.add(
+ ).float_list.value[:] = values
+
+
+def add_bytes_list(key: str, values: Sequence[bytes],
+ sequence: tf.train.SequenceExample):
+ sequence.feature_lists.feature_list[key].feature.add(
+ ).bytes_list.value[:] = values
+
+
+def add_int_list(key: str, values: Sequence[int],
+ sequence: tf.train.SequenceExample):
+ sequence.feature_lists.feature_list[key].feature.add(
+ ).int64_list.value[:] = values
+
+
+def set_context_int_list(key: str, value: Sequence[int],
+ sequence: tf.train.SequenceExample):
+ sequence.context.feature[key].int64_list.value[:] = value
+
+
+def set_context_bytes(key: str, value: bytes,
+ sequence: tf.train.SequenceExample):
+ sequence.context.feature[key].bytes_list.value[:] = (value,)
+
+
+def set_context_float(key: str, value: float,
+ sequence: tf.train.SequenceExample):
+ sequence.context.feature[key].float_list.value[:] = (value,)
+
+
+def set_context_int(key: str, value: int, sequence: tf.train.SequenceExample):
+ sequence.context.feature[key].int64_list.value[:] = (value,)
+
+
+def generate_sequence_example(video_id: str,
+ start: Optional[Sequence[float]],
+ end: Optional[Sequence[float]],
+ caption: Optional[Sequence[str]],
+ asr_start: Sequence[float],
+ asr_end: Sequence[float],
+ asr_string: Sequence[str],
+ features: Sequence[Sequence[float]],
+ duration: int,
+ split: Sequence[int] = None):
+ """Generate a sequence example."""
+
+ # Initiate the sequence example.
+ seq_example = tf.train.SequenceExample()
+
+ # Add dense captioning annotations if these exist.
+ if caption is not None:
+ for s, e, c in zip(start, end, caption):
+ seq_example.context.feature[
+ "video/timestamps/start"
+ ].int64_list.value.append(s)
+ seq_example.context.feature[
+ "video/timestamps/end"
+ ].int64_list.value.append(e)
+ seq_example.context.feature["caption/string"].bytes_list.value.append(
+ c.encode()
+ )
+
+ # Add ASR.
+ if asr_start:
+ for s, e, c in zip(asr_start, asr_end, asr_string):
+ seq_example.context.feature[
+ "ASR/timestamps/start"
+ ].int64_list.value.append(s)
+ seq_example.context.feature["ASR/timestamps/end"].int64_list.value.append(
+ e
+ )
+ seq_example.context.feature["ASR/string"].bytes_list.value.append(
+ c.encode()
+ )
+
+ # Add visual features.
+ for f in features:
+ add_float_list("image/clip_embeddings", f, seq_example)
+
+ if split is not None:
+ for s in split:
+ seq_example.context.feature["split"].int64_list.value.append(s)
+
+ # Add other metadata.
+ set_context_bytes("videoid", video_id.encode(), seq_example)
+ set_context_int("video/duration", duration, seq_example)
+ return seq_example
+
+def generate(video_info):
+ # reads the input csv.
+ # input_csv = pd.read_csv(FLAGS.csv_path)
+ # if FLAGS.num_shards == -1:
+ # num_shards = int(math.sqrt(len(video_info)))
+ # else:
+ # num_shards = FLAGS.num_shards
+ num_shards = 1
+ # Set up the TFRecordWriters.
+ # basename = os.path.splitext(os.path.basename(FLAGS.csv_path))[0]
+ basename = video_info['basename']
+ shard_names = [
+ os.path.join(video_info['output_path'], f"{basename}-{i:05d}-of-{num_shards:05d}")
+ for i in range(num_shards)
+ ]
+ writers = [tf.io.TFRecordWriter(shard_name) for shard_name in shard_names]
+
+ with _close_on_exit(writers) as writers:
+ for i in tqdm(range(len(video_info))):
+ print(
+ "Processing example %d of %d (%d%%) \r" %
+ (i, len(video_info), i * 100 / len(video_info)),
+ end="")
+ # no gds needed
+ start = None
+ end = None
+ caption = None
+
+ asr_start = video_info["asr_start"]
+ if isinstance(asr_start, str):
+ asr_start = eval(asr_start) # pylint:disable=eval-used
+ asr_end = video_info["asr_end"]
+ if isinstance(asr_end, str):
+ asr_end = eval(asr_end) # pylint:disable=eval-used
+ asr_string = video_info["asr_string"]
+ if isinstance(asr_string, str):
+ asr_string = eval(asr_string) # pylint:disable=eval-used
+ video_id = video_info["video_id"]
+ split = None
+ # pylint:disable=eval-used
+ if "features" not in video_info: # load on the fly
+ assert video_info['features_path']
+ features = list(
+ np.load(os.path.join(video_info['features_path'], video_id + ".npy"))
+ )
+ else:
+ features = video_info["features"] # pylint:disable=eval-used
+ duration = int(video_info["duration"])
+ seq_ex = generate_sequence_example(
+ video_id,
+ start,
+ end,
+ caption,
+ asr_start,
+ asr_end,
+ asr_string,
+ features,
+ duration,
+ split)
+ writers[i % len(writers)].write(seq_ex.SerializeToString())
+
+def main(*args):
+ # reads the input csv.
+ input_csv = pd.read_csv(FLAGS.csv_path)
+ if FLAGS.num_shards == -1:
+ num_shards = int(math.sqrt(len(input_csv)))
+ else:
+ num_shards = FLAGS.num_shards
+ # Set up the TFRecordWriters.
+ basename = os.path.splitext(os.path.basename(FLAGS.csv_path))[0]
+ shard_names = [
+ os.path.join(FLAGS.output_path, f"{basename}-{i:05d}-of-{num_shards:05d}")
+ for i in range(num_shards)
+ ]
+ writers = [tf.io.TFRecordWriter(shard_name) for shard_name in shard_names]
+
+ if FLAGS.shuffle_csv:
+ input_csv = input_csv.sample(frac=1)
+ with _close_on_exit(writers) as writers:
+ for i in tqdm(range(len(input_csv))):
+ print(
+ "Processing example %d of %d (%d%%) \r" %
+ (i, len(input_csv), i * 100 / len(input_csv)),
+ end="")
+ if "caption" in input_csv:
+ start = eval(input_csv["start"].values[i]) # pylint:disable=eval-used
+ end = eval(input_csv["end"].values[i]) # pylint:disable=eval-used
+ caption = eval(input_csv["caption"].values[i]) # pylint:disable=eval-used
+ else:
+ start = None
+ end = None
+ caption = None
+ asr_start = input_csv["asr_start"].values[i]
+ if isinstance(asr_start, str):
+ asr_start = eval(asr_start) # pylint:disable=eval-used
+ asr_end = input_csv["asr_end"].values[i]
+ if isinstance(asr_end, str):
+ asr_end = eval(asr_end) # pylint:disable=eval-used
+ asr_string = input_csv["asr_string"].values[i]
+ if isinstance(asr_string, str):
+ asr_string = eval(asr_string) # pylint:disable=eval-used
+ video_id = input_csv["video_id"].values[i]
+ split = None
+ if "split" in input_csv:
+ split = input_csv["split"].values[i]
+ if isinstance(split, str):
+ split = eval(split) # pylint:disable=eval-used
+ if "features" not in input_csv: # load on the fly
+ assert FLAGS.features_path
+ features = list(
+ np.load(os.path.join(FLAGS.features_path, video_id + ".npy"))
+ )
+ else:
+ features = eval(input_csv["features"].values[i]) # pylint:disable=eval-used
+ duration = int(input_csv["duration"].values[i])
+ seq_ex = generate_sequence_example(
+ video_id,
+ start,
+ end,
+ caption,
+ asr_start,
+ asr_end,
+ asr_string,
+ features,
+ duration,
+ split)
+ writers[i % len(writers)].write(seq_ex.SerializeToString())
+
+
+if __name__ == "__main__":
+ app.run(main)
diff --git a/model/utils/scenic_call.py b/model/utils/scenic_call.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6e581094fd7a8dfec6602386dbc63abd5d80488
--- /dev/null
+++ b/model/utils/scenic_call.py
@@ -0,0 +1,268 @@
+import functools
+
+from absl import app
+from absl import flags
+from absl import logging
+
+from clu import metric_writers
+from clu import platform
+import flax.linen as nn
+import jax
+from ml_collections import config_flags
+import tensorflow as tf
+
+import sys, os
+from pathlib import Path
+# append current path to sys.path
+sys.path.append(str(Path(__file__).parent.parent.parent / "scenic"))
+
+import logging
+import flax
+from flax import jax_utils
+from flax.training import checkpoints
+from scenic.projects.vid2seq import models, trainer
+from scenic.train_lib_deprecated import train_utils
+from scenic import app
+import ml_collections
+import numpy as np
+import jax.numpy as jnp
+from clu import metric_writers
+from scenic.projects.vid2seq.datasets.dense_video_captioning_tfrecord_dataset import get_datasets
+from scenic.projects.vid2seq import dvc_eval
+
+MAX_CAPTION_STR_LEN = 200
+MAX_KEY_STR_LEN = 400
+
+class ScenicModel:
+ def __init__(self, flags):
+ self.FLAGS = flags
+ jax.config.config_with_absl()
+ run = (functools.partial(self._run_main, main=self._init_model))
+ run(self._init_model)
+ def _run_main(self, argv, *, main):
+ """Runs the `main` method after some initial setup."""
+ del argv
+ # Hide any GPUs form TensorFlow. Otherwise, TF might reserve memory and make
+ # it unavailable to JAX.
+ tf.config.experimental.set_visible_devices([], 'GPU')
+
+ # Enable wrapping of all module calls in a named_call for easier profiling:
+ nn.enable_named_call()
+
+ logging.info('JAX host: %d / %d', jax.process_index(), jax.process_count())
+ logging.info('JAX devices: %r', jax.devices())
+
+ # Add a note so that we can tell which task is which JAX host.
+ # (task 0 is not guaranteed to be the host 0)
+ platform.work_unit().set_task_status(
+ f'host_id: {jax.process_index()}, host_count: {jax.process_count()}')
+ if jax.process_index() == 0:
+ platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,
+ self.FLAGS.workdir, 'Workdir')
+ self.FLAGS.config.dataset_configs.base_dir = self.FLAGS.data_dir
+ self.FLAGS.config.init_from.checkpoint_path = self.FLAGS.ckpt_dir
+ rng = jax.random.PRNGKey(self.FLAGS.config.rng_seed)
+ logging.info('RNG: %s', rng)
+
+ writer = metric_writers.create_default_writer(
+ self.FLAGS.workdir, just_logging=jax.process_index() > 0, asynchronous=True)
+
+ return main(rng=rng, config=self.FLAGS.config, workdir=self.FLAGS.workdir, writer=writer)
+
+
+ def _init_model(self, rng: jnp.ndarray, config: ml_collections.ConfigDict, workdir: str,
+ writer: metric_writers.MetricWriter):
+ data_rng, rng = jax.random.split(rng)
+ dataset_dict = get_datasets(config, data_rng=data_rng)
+
+ datasets_metadata = {
+ name: ds.meta_data
+ for name, ds in dataset_dict.items()
+ }
+ all_datasets = []
+ all_datasets_num_train_examples = []
+ for name, metadata in datasets_metadata.items():
+ all_datasets.append(name)
+ all_datasets_num_train_examples.append(
+ metadata.get('num_train_examples', 0))
+ dataset = dataset_dict[all_datasets[0]]
+
+ model_cls = models.DenseVideoCaptioningModel
+ model = model_cls(config, dataset.meta_data)
+ train_state, start_step = trainer.init_state(model, dataset, config,
+ workdir, rng)
+
+ self.train_state = jax_utils.replicate(train_state)
+ logging.info('Number of processes is %s', jax.process_count())
+ del rng
+
+ import functools
+ self.infer_step_pmapped = jax.pmap(
+ functools.partial(
+ trainer.infer_step,
+ model=model,
+ config=config,
+ debug=config.debug_eval),
+ axis_name='batch',
+ )
+
+ self.tokenizer = trainer.get_tokenizer(config)
+ # dsname = 'validation'
+ # self.iterator = dataset.valid_iter[dsname]
+
+ self.config = config
+ self.data_rng = data_rng
+
+ def __call__(self, data_dir=None):
+ # self.FLAGS.config.dataset_configs.base_dir = data_dir
+ dataset_dict = get_datasets(self.config, data_rng=self.data_rng)
+ self.iterator = dataset_dict["youcook"].valid_iter['validation']
+ batch = next(self.iterator)
+
+ train_state = train_utils.sync_model_state_across_replicas(self.train_state)
+ eval_packs = {}
+ keys = []
+ eval_pack = {
+ 'gts':
+ dvc_eval.convert_strings_to_uint8_arrays(
+ batch['caption_strings'], MAX_CAPTION_STR_LEN),
+ 'key':
+ dvc_eval.convert_strings_to_uint8_arrays(
+ batch['videoid'], MAX_KEY_STR_LEN),
+ 'batch_mask':
+ batch['batch_mask'],
+ 'duration':
+ batch['duration'],
+ 'gts_start':
+ batch['timestamp_start'],
+ 'gts_end':
+ batch['timestamp_end'],
+ 'split':
+ batch['split'] if 'split' in batch else
+ np.ones_like(batch['timestamp_start']),
+ }
+ to_del = ['caption_strings', 'key', 'videoid', 'timestamp_start',
+ 'timestamp_end', 'split'] # 'duration',
+ for x in to_del:
+ if x in batch:
+ del batch[x]
+
+ # import pdb
+ # pdb.set_trace()
+
+ _, preds = self.infer_step_pmapped(train_state, batch) #model, config)
+ # import pdb
+ # pdb.set_trace()
+ eval_pack['pred'] = preds
+ eval_pack = jax.tree_map(
+ lambda x: x.reshape((np.prod(x.shape[:2]),) + x.shape[2:]), eval_pack)
+
+ vocabulary_size = self.config.dataset_configs.vocabulary_size
+ # pred_text = trainer.decode_tokens(preds, tokenizer, vocabulary_size)
+
+ # print(preds, pred_text)
+ format_outputs = []
+ for i, valid in enumerate(eval_pack['batch_mask']):
+ print("===============video[", str(0), "]====================")
+ if valid:
+ key = dvc_eval.convert_uint8_array_to_string(eval_pack['key'][i])
+ if key in eval_packs: # redundant video
+ continue
+ keys.append(key)
+
+ pred, pred_timestamps = [], []
+ # get indexes in the predicted seq that delimit the pred segments
+ indexes = [
+ j for j in range(len(eval_pack['pred'][i]) - 1)
+ if eval_pack['pred'][i][j] >= vocabulary_size and
+ eval_pack['pred'][i][j + 1] >= vocabulary_size
+ ] # pylint: disable=g-complex-comprehension
+
+ last_processed = -2
+ order = self.config.dataset_configs.order
+
+ # iterate over predicted segments and decode them
+ for j in range(len(indexes)):
+ if indexes[j] == last_processed + 1: # 3 timestamps != 2 events
+ continue
+
+ # get predicted tokens and transform to string
+ if order == 'ld':
+ start_idx = indexes[j] + 2
+ end_idx = indexes[j + 1] if j < len(indexes) - 1 else len(
+ eval_pack['pred'][i])
+ else:
+ start_idx = indexes[j - 1] + 2 if j > 0 else 0
+ end_idx = indexes[j]
+ pred_seq = [int(eval_pack['pred'][i][k]) for k in range(start_idx, end_idx)]
+ pred_text = trainer.decode_tokens(pred_seq, self.tokenizer, vocabulary_size)
+
+ # get start and end
+ num_bins = 100 # from config
+ max_offset = num_bins - 1
+ pred_time = [
+ (int(eval_pack['pred'][i][indexes[j]])
+ - vocabulary_size) *
+ eval_pack['duration'][i] / max_offset,
+ (int(eval_pack['pred'][i][indexes[j] + 1]) -
+ vocabulary_size) *
+ eval_pack['duration'][i] / max_offset
+ ]
+
+ # if pred_time[1] <= pred_time[0]: # remove end < start
+ # continue
+ last_processed = indexes[j]
+
+ pred.append(pred_text)
+ pred_timestamps.append(pred_time)
+
+ # round to 2 decimal places
+ format_output = "[{x}s, {y}s] ".format(x=np.around(pred_time[0][0]/1000000, decimals=2), y=np.around(pred_time[1][0]/1000000, decimals=2))
+ format_output += pred_text
+ format_outputs.append(format_output)
+ print(format_outputs)
+ print("===============================================")
+ return format_outputs
+
+class ScenicCall:
+ def __init__(self, main, flags):
+ self.main = main
+ self.FLAGS = flags
+
+ def __call__(self):
+ return self.run()
+
+ def run(self):
+ # Provide access to --jax_backend_target and --jax_xla_backend flags.
+ jax.config.config_with_absl()
+ run = (functools.partial(self._run_main, main=self.main))
+ return run(self.main)
+
+ def _run_main(self, argv, *, main):
+ """Runs the `main` method after some initial setup."""
+ del argv
+ # Hide any GPUs form TensorFlow. Otherwise, TF might reserve memory and make
+ # it unavailable to JAX.
+ tf.config.experimental.set_visible_devices([], 'GPU')
+
+ # Enable wrapping of all module calls in a named_call for easier profiling:
+ nn.enable_named_call()
+
+ logging.info('JAX host: %d / %d', jax.process_index(), jax.process_count())
+ logging.info('JAX devices: %r', jax.devices())
+
+ # Add a note so that we can tell which task is which JAX host.
+ # (task 0 is not guaranteed to be the host 0)
+ platform.work_unit().set_task_status(
+ f'host_id: {jax.process_index()}, host_count: {jax.process_count()}')
+ if jax.process_index() == 0:
+ platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,
+ self.FLAGS.workdir, 'Workdir')
+ self.FLAGS.config.dataset_configs.base_dir = self.FLAGS.data_dir
+ rng = jax.random.PRNGKey(self.FLAGS.config.rng_seed)
+ logging.info('RNG: %s', rng)
+
+ writer = metric_writers.create_default_writer(
+ self.FLAGS.workdir, just_logging=jax.process_index() > 0, asynchronous=True)
+
+ return main(rng=rng, config=self.FLAGS.config, workdir=self.FLAGS.workdir, writer=writer)
diff --git a/model/vision/DenseCaptioner.py b/model/vision/DenseCaptioner.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe53bfacfb1ced70bc83283cf4beafe2bc52d682
--- /dev/null
+++ b/model/vision/DenseCaptioner.py
@@ -0,0 +1,17 @@
+from model.vision.grit_src.image_dense_captions import image_caption_api
+import cv2
+
+
+class DenseCaptioner():
+
+ def __init__(self, device):
+ self.device = device
+
+ def __call__(self, imgs):
+ dense_captions = []
+ for img in imgs:
+ cv2_img = cv2.merge([img[2], img[1], img[0]]) # BGR
+ caption = image_caption_api(cv2_img, device=self.device)
+ dense_captions.append(caption)
+
+ return dense_captions
diff --git a/model/vision/GRiT2/grit_b_densecap_objectdet.pth b/model/vision/GRiT2/grit_b_densecap_objectdet.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ca279a4ea3a6e2e42781f40b97caeb50b3283ce4
--- /dev/null
+++ b/model/vision/GRiT2/grit_b_densecap_objectdet.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53b6e9b3fd948eac55b574c9b6f94ad0743dff46ba449df7ac2d33009ee92ef1
+size 417381733
diff --git a/model/vision/ImageCaptioner.py b/model/vision/ImageCaptioner.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8269e18f5c5af2f35712723c468ff6d1c75ce73
--- /dev/null
+++ b/model/vision/ImageCaptioner.py
@@ -0,0 +1,26 @@
+import torch
+from transformers import Blip2ForConditionalGeneration, Blip2Processor
+
+
+class ImageCaptioner:
+
+ def __init__(self, device='cuda'):
+ self.device = device
+ if self.device == 'cpu':
+ self.data_type = torch.float32
+ else:
+ self.data_type = torch.float16
+ self.processor = Blip2Processor.from_pretrained(
+ "/home/user/app/pretrained_models/blip2-opt-2.7b")
+ self.model = Blip2ForConditionalGeneration.from_pretrained(
+ "/home/user/app/pretrained_models/blip2-opt-2.7b",
+ torch_dtype=self.data_type, device_map="auto")
+
+ def __call__(self, imgs):
+ inputs = self.processor(
+ images=imgs, return_tensors="pt").to(self.device, self.data_type)
+ generated_ids = self.model.generate(**inputs)
+ generated_text = self.processor.batch_decode(
+ generated_ids, skip_special_tokens=True)
+
+ return generated_text
diff --git a/model/vision/Vid2SeqCaptioner.py b/model/vision/Vid2SeqCaptioner.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb3f8cadb7cd78856f58d313741c7e2ff02bc920
--- /dev/null
+++ b/model/vision/Vid2SeqCaptioner.py
@@ -0,0 +1,75 @@
+from typing import Any
+from model.utils import extract_clip_feature_single_video_fps, generate, ScenicCall, ScenicModel
+from config import vid2seq_config
+
+import torch
+
+
+import sys, os
+from pathlib import Path
+# append current path to sys.path
+sys.path.append(str(Path(__file__).parent.parent.parent / "scenic"))
+from scenic.projects.vid2seq.playground import generate as vid2seq_generate
+
+class Flag(object):
+ pass
+
+class Vid2SeqCaptioner:
+ """Vid2SeqCaptioner is a class that uses a video to generate a caption for the video.
+
+ Description:
+ It uses the Google Scenic Vid2Seq as the base model. Note that Scenic is a project designed to use with TPUs. And GPU resources(70G VMeomry at least) maybe be not enough to run the model. So, we need to use the CPU to run the model.
+ """
+ def __init__(self, config):
+ self.config = config
+ flags = Flag()
+ flags.workdir = self.config['work_dir']
+ flags.config = vid2seq_config.get_config()
+ # flags.config = self.config['config_path']
+ flags.data_dir = self.config['output_path']
+ flags.ckpt_dir = self.config['checkpoint_path']
+ self.model = ScenicModel(flags)
+
+ def __call__(self, video_path):
+ self._preprocess(video_path)
+ return self.model()
+ # call = ScenicCall(vid2seq_generate, flags)
+ # return call()
+
+ def _preprocess(self, video_path):
+ """Preprocess the video.
+
+ Description:
+ Pipeline: CLIP -> *.npy -> *.csv(video) -> generate_from_file.py -> file0000-0003.tfrecord
+ Args:
+ video_path: The path of the video.
+ """
+
+ # Extract CLIP features first
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+ video_feat, video_info = extract_clip_feature_single_video_fps(
+ video_path=video_path,
+ clip_ckpt_path=self.config['clip_path'],
+ device=device
+ )
+
+ # get numpy array
+ video_feat = video_feat.cpu()
+ video_feat = video_feat.numpy()
+
+ # create a dict to store the video info
+ video_info_dict = {
+ 'basename' : 'test',
+ 'output_path' : self.config['output_path'],
+ 'asr_start' : None,
+ 'asr_end' : None,
+ 'asr_string' : None,
+ 'video_id' : video_path.split('/')[-1].split('.')[0],
+ 'features' : video_feat,
+ 'duration' : video_info['total_frames'] / video_info['avg_fps'] * 1000000,
+ }
+ # begin to generate tfrecord file
+ generate(video_info_dict)
+ print("tfrecord file generated at {}".format(self.config['output_path']))
+
+
\ No newline at end of file
diff --git a/model/vision/__init__.py b/model/vision/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9cbaabff2797f2e16387cc41e4e5f91bb8468340
--- /dev/null
+++ b/model/vision/__init__.py
@@ -0,0 +1,3 @@
+from .DenseCaptioner import DenseCaptioner
+from .ImageCaptioner import ImageCaptioner
+# from .Vid2SeqCaptioner import Vid2SeqCaptioner
\ No newline at end of file
diff --git a/model/vision/grit_src/configs/Base.yaml b/model/vision/grit_src/configs/Base.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..445690acaafacfba6b54f28b4cf32e40c4bcae9d
--- /dev/null
+++ b/model/vision/grit_src/configs/Base.yaml
@@ -0,0 +1,77 @@
+MODEL:
+ META_ARCHITECTURE: "GRiT"
+ MASK_ON: True
+ PROPOSAL_GENERATOR:
+ NAME: "CenterNet"
+ FPN:
+ IN_FEATURES: ["layer3", "layer4", "layer5"]
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
+ PIXEL_STD: [58.395, 57.12, 57.375]
+ ROI_HEADS:
+ NAME: GRiTROIHeadsAndTextDecoder
+ IN_FEATURES: ["p3", "p4", "p5"]
+ IOU_THRESHOLDS: [0.6]
+ NUM_CLASSES: 1
+ SCORE_THRESH_TEST: 0.02
+ NMS_THRESH_TEST: 0.5
+ OBJECT_FEAT_POOLER_RES: 14
+ ROI_BOX_CASCADE_HEAD:
+ IOUS: [0.6, 0.7, 0.8]
+ ROI_BOX_HEAD:
+ NAME: "FastRCNNConvFCHead"
+ NUM_FC: 2
+ POOLER_RESOLUTION: 7
+ CLS_AGNOSTIC_BBOX_REG: True
+ MULT_PROPOSAL_SCORE: True
+ ROI_MASK_HEAD:
+ NAME: "MaskRCNNConvUpsampleHead"
+ NUM_CONV: 4
+ POOLER_RESOLUTION: 14
+ CLS_AGNOSTIC_MASK: True
+ CENTERNET:
+ NUM_CLASSES: 1
+ REG_WEIGHT: 1.
+ NOT_NORM_REG: True
+ ONLY_PROPOSAL: True
+ WITH_AGN_HM: True
+ INFERENCE_TH: 0.0001
+ PRE_NMS_TOPK_TRAIN: 4000
+ POST_NMS_TOPK_TRAIN: 2000
+ PRE_NMS_TOPK_TEST: 1000
+ POST_NMS_TOPK_TEST: 256
+ NMS_TH_TRAIN: 0.9
+ NMS_TH_TEST: 0.9
+ POS_WEIGHT: 0.5
+ NEG_WEIGHT: 0.5
+ IGNORE_HIGH_FP: 0.85
+DATASETS:
+ TRAIN: ("coco_2017_train",)
+ TEST: ("coco_2017_val",)
+DATALOADER:
+ SAMPLER_TRAIN: "MultiDatasetSampler"
+ DATASET_RATIO: [1]
+ DATASET_INPUT_SIZE: [1024]
+ DATASET_INPUT_SCALE: [[0.1, 2.0]]
+ FILTER_EMPTY_ANNOTATIONS: False
+ NUM_WORKERS: 8
+TEST:
+ DETECTIONS_PER_IMAGE: 256
+SOLVER:
+ LR_SCHEDULER_NAME: "WarmupCosineLR"
+ CHECKPOINT_PERIOD: 10000
+ WARMUP_ITERS: 1000
+ WARMUP_FACTOR: 0.001
+ USE_CUSTOM_SOLVER: True
+ OPTIMIZER: "ADAMW"
+ MAX_ITER: 180000
+ IMS_PER_BATCH: 64
+ BASE_LR: 0.00008
+ VIT_LAYER_DECAY: True
+ CLIP_GRADIENTS:
+ ENABLED: True
+INPUT:
+ FORMAT: RGB
+ CUSTOM_AUG: EfficientDetResizeCrop
+ TRAIN_SIZE: 640
+USE_ACT_CHECKPOINT: True
+VERSION: 2
\ No newline at end of file
diff --git a/model/vision/grit_src/configs/GRiT_B_DenseCap.yaml b/model/vision/grit_src/configs/GRiT_B_DenseCap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0e7d2d2c7448d330d9356b3af90b975b2ce7d528
--- /dev/null
+++ b/model/vision/grit_src/configs/GRiT_B_DenseCap.yaml
@@ -0,0 +1,20 @@
+_BASE_: "Base.yaml"
+MODEL:
+ TRAIN_TASK: ["DenseCap"]
+ TEST_TASK: "DenseCap"
+ MASK_ON: False
+ ROI_HEADS:
+ SOFT_NMS_ENABLED: False
+ BEAM_SIZE: 1
+ WEIGHTS: "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_base.pth"
+ BACKBONE:
+ NAME: build_vit_fpn_backbone
+ VIT_LAYERS: 12
+SOLVER:
+ VIT_LAYER_DECAY_RATE: 0.7
+DATASETS:
+ TRAIN: ("vg_train",)
+ TEST: ("vg_test",)
+DATALOADER:
+ DATASET_BS: 2
+OUTPUT_DIR: "./output/GRiT_B_DenseCap"
\ No newline at end of file
diff --git a/model/vision/grit_src/configs/GRiT_B_DenseCap_ObjectDet.yaml b/model/vision/grit_src/configs/GRiT_B_DenseCap_ObjectDet.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..49f3ef13ab8bf0eb8515c009e70e1d33687efd39
--- /dev/null
+++ b/model/vision/grit_src/configs/GRiT_B_DenseCap_ObjectDet.yaml
@@ -0,0 +1,23 @@
+_BASE_: "Base.yaml"
+MODEL:
+ TRAIN_TASK: ["ObjectDet", "DenseCap"]
+ TEST_TASK: "DenseCap" # DenseCap or ObjectDet: Choose one for testing
+ MASK_ON: True
+ ROI_HEADS:
+ SOFT_NMS_ENABLED: False
+ BEAM_SIZE: 1
+ WEIGHTS: "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_base.pth"
+ BACKBONE:
+ NAME: build_vit_fpn_backbone
+ VIT_LAYERS: 12
+SOLVER:
+ VIT_LAYER_DECAY_RATE: 0.7
+DATASETS:
+ TRAIN: ("GRiT_coco2017_train", "vg_train")
+ TEST: ("coco_2017_test-dev",)
+DATALOADER:
+ DATASET_RATIO: [1, 1]
+ DATASET_BS: 2
+ DATASET_INPUT_SIZE: [1024, 1024]
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.1, 2.0]]
+OUTPUT_DIR: "./output/GRiT_B_DenseCap_ObjectDet"
\ No newline at end of file
diff --git a/model/vision/grit_src/configs/GRiT_B_ObjectDet.yaml b/model/vision/grit_src/configs/GRiT_B_ObjectDet.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e7a75052f84b7913480cc5ca0e29c03e4dbea4ef
--- /dev/null
+++ b/model/vision/grit_src/configs/GRiT_B_ObjectDet.yaml
@@ -0,0 +1,20 @@
+_BASE_: "Base.yaml"
+MODEL:
+ TRAIN_TASK: ["ObjectDet"]
+ TEST_TASK: "ObjectDet"
+ MASK_ON: True
+ ROI_HEADS:
+ SOFT_NMS_ENABLED: True
+ BEAM_SIZE: 3
+ WEIGHTS: "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_base.pth"
+ BACKBONE:
+ NAME: build_vit_fpn_backbone
+ VIT_LAYERS: 12
+SOLVER:
+ VIT_LAYER_DECAY_RATE: 0.7
+DATASETS:
+ TRAIN: ("GRiT_coco2017_train",)
+ TEST: ("coco_2017_val",)
+DATALOADER:
+ DATASET_BS: 2
+OUTPUT_DIR: "./output/GRiT_B_ObjectDet"
\ No newline at end of file
diff --git a/model/vision/grit_src/configs/GRiT_H_ObjectDet.yaml b/model/vision/grit_src/configs/GRiT_H_ObjectDet.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..000a1d4629b695f5c4b4741fe28d0b8561c11cdb
--- /dev/null
+++ b/model/vision/grit_src/configs/GRiT_H_ObjectDet.yaml
@@ -0,0 +1,21 @@
+_BASE_: "Base.yaml"
+MODEL:
+ TRAIN_TASK: ["ObjectDet"]
+ TEST_TASK: "ObjectDet"
+ MASK_ON: True
+ ROI_HEADS:
+ SOFT_NMS_ENABLED: True
+ BEAM_SIZE: 3
+ WEIGHTS: "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_huge_p14to16.pth"
+ BACKBONE:
+ NAME: build_vit_fpn_backbone_huge
+ VIT_LAYERS: 32
+SOLVER:
+ MAX_ITER: 135000
+ VIT_LAYER_DECAY_RATE: 0.9
+DATASETS:
+ TRAIN: ("GRiT_coco2017_train",)
+ TEST: ("coco_2017_val",)
+DATALOADER:
+ DATASET_BS: 1
+OUTPUT_DIR: "./output/GRiT_H_ObjectDet"
\ No newline at end of file
diff --git a/model/vision/grit_src/configs/GRiT_L_ObjectDet.yaml b/model/vision/grit_src/configs/GRiT_L_ObjectDet.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b6e3b97f08fe4671e1a686b6cb6a83f8fc52f9a7
--- /dev/null
+++ b/model/vision/grit_src/configs/GRiT_L_ObjectDet.yaml
@@ -0,0 +1,20 @@
+_BASE_: "Base.yaml"
+MODEL:
+ TRAIN_TASK: ["ObjectDet"]
+ TEST_TASK: "ObjectDet"
+ MASK_ON: True
+ ROI_HEADS:
+ SOFT_NMS_ENABLED: True
+ BEAM_SIZE: 3
+ WEIGHTS: "detectron2://ImageNetPretrained/MAE/mae_pretrain_vit_large.pth"
+ BACKBONE:
+ NAME: build_vit_fpn_backbone_large
+ VIT_LAYERS: 24
+SOLVER:
+ VIT_LAYER_DECAY_RATE: 0.8
+DATASETS:
+ TRAIN: ("GRiT_coco2017_train",)
+ TEST: ("coco_2017_val",)
+DATALOADER:
+ DATASET_BS: 1
+OUTPUT_DIR: "./output/GRiT_L_ObjectDet"
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/__init__.py b/model/vision/grit_src/grit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..81f24566b0093edc133440090715b20ee569ca37
--- /dev/null
+++ b/model/vision/grit_src/grit/__init__.py
@@ -0,0 +1,7 @@
+from .modeling.meta_arch import grit
+from .modeling.roi_heads import grit_roi_heads
+from .modeling.backbone import vit
+
+from .data.datasets import object365
+from .data.datasets import vg
+from .data.datasets import grit_coco
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/config.py b/model/vision/grit_src/grit/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..fabe7f0fbe1e41c6eb280f8f7d6ae2e9c4911135
--- /dev/null
+++ b/model/vision/grit_src/grit/config.py
@@ -0,0 +1,50 @@
+from detectron2.config import CfgNode as CN
+
+
+def add_grit_config(cfg):
+ _C = cfg
+
+ _C.MODEL.BEAM_SIZE = 1
+ _C.MODEL.TRAIN_TASK = ["ObjectDet", "DenseCap"]
+ _C.MODEL.TEST_TASK = "DenseCap" # This can be varied if the model is jointly trained on multiple tasks
+
+ _C.MODEL.ROI_BOX_HEAD.USE_BIAS = 0.0 # >= 0: not use
+ _C.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE = False
+
+ _C.MODEL.ROI_HEADS.MASK_WEIGHT = 1.0
+ _C.MODEL.ROI_HEADS.OBJECT_FEAT_POOLER_RES = 14
+ _C.MODEL.ROI_HEADS.SOFT_NMS_ENABLED = False
+
+ # Backbones
+ _C.MODEL.VIT_LAYERS = 12
+
+ # Text Decoder
+ _C.TEXT_DECODER = CN()
+ _C.TEXT_DECODER.VOCAB_SIZE = 30522
+ _C.TEXT_DECODER.HIDDEN_SIZE = 768
+ _C.TEXT_DECODER.NUM_LAYERS = 6
+ _C.TEXT_DECODER.ATTENTION_HEADS = 12
+ _C.TEXT_DECODER.FEEDFORWARD_SIZE = 768 * 4
+
+ # Multi-dataset dataloader
+ _C.DATALOADER.DATASET_RATIO = [1, 1] # sample ratio
+ _C.DATALOADER.DATASET_BS = 1
+ _C.DATALOADER.DATASET_INPUT_SIZE = [1024, 1024]
+ _C.DATALOADER.DATASET_INPUT_SCALE = [(0.1, 2.0), (0.1, 2.0)]
+ _C.DATALOADER.DATASET_MIN_SIZES = [(640, 800), (640, 800)]
+ _C.DATALOADER.DATASET_MAX_SIZES = [1333, 1333]
+
+ _C.SOLVER.USE_CUSTOM_SOLVER = True
+ _C.SOLVER.OPTIMIZER = 'ADAMW'
+ _C.SOLVER.VIT_LAYER_DECAY = True
+ _C.SOLVER.VIT_LAYER_DECAY_RATE = 0.7
+
+ _C.INPUT.CUSTOM_AUG = 'EfficientDetResizeCrop'
+ _C.INPUT.TRAIN_SIZE = 1024
+ _C.INPUT.TEST_SIZE = 1024
+ _C.INPUT.SCALE_RANGE = (0.1, 2.)
+ # 'default' for fixed short / long edge
+ _C.INPUT.TEST_INPUT_TYPE = 'default'
+
+ _C.FIND_UNUSED_PARAM = True
+ _C.USE_ACT_CHECKPOINT = True
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/custom_solver.py b/model/vision/grit_src/grit/custom_solver.py
new file mode 100644
index 0000000000000000000000000000000000000000..87f7d61ed756acf9326b7ab4097a989a9e6c7532
--- /dev/null
+++ b/model/vision/grit_src/grit/custom_solver.py
@@ -0,0 +1,88 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+# Modified by Jialian Wu from https://github.com/facebookresearch/Detic/blob/main/detic/custom_solver.py
+import itertools
+from typing import Any, Callable, Dict, Iterable, List, Set, Type, Union
+import torch
+
+from detectron2.config import CfgNode
+
+from detectron2.solver.build import maybe_add_gradient_clipping
+
+
+def build_custom_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
+ params: List[Dict[str, Any]] = []
+ memo: Set[torch.nn.parameter.Parameter] = set()
+ optimizer_type = cfg.SOLVER.OPTIMIZER
+
+ for key, value in model.named_parameters(recurse=True):
+ if not value.requires_grad:
+ continue
+ # Avoid duplicating parameters
+ if value in memo:
+ continue
+ memo.add(value)
+ lr = cfg.SOLVER.BASE_LR
+ weight_decay = cfg.SOLVER.WEIGHT_DECAY
+
+ if cfg.SOLVER.VIT_LAYER_DECAY:
+ lr = lr * get_vit_lr_decay_rate(key, cfg.SOLVER.VIT_LAYER_DECAY_RATE, cfg.MODEL.VIT_LAYERS)
+
+ param = {"params": [value], "lr": lr}
+ if optimizer_type != 'ADAMW':
+ param['weight_decay'] = weight_decay
+ params += [param]
+
+ def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class
+ # detectron2 doesn't have full model gradient clipping now
+ clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
+ enable = (
+ cfg.SOLVER.CLIP_GRADIENTS.ENABLED
+ and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
+ and clip_norm_val > 0.0
+ )
+
+ class FullModelGradientClippingOptimizer(optim):
+ def step(self, closure=None):
+ all_params = itertools.chain(*[x["params"] for x in self.param_groups])
+ torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
+ super().step(closure=closure)
+
+ return FullModelGradientClippingOptimizer if enable else optim
+
+
+ if optimizer_type == 'SGD':
+ optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
+ params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM,
+ nesterov=cfg.SOLVER.NESTEROV
+ )
+ elif optimizer_type == 'ADAMW':
+ optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
+ params, cfg.SOLVER.BASE_LR,
+ weight_decay=cfg.SOLVER.WEIGHT_DECAY
+ )
+ else:
+ raise NotImplementedError(f"no optimizer type {optimizer_type}")
+ if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
+ optimizer = maybe_add_gradient_clipping(cfg, optimizer)
+ return optimizer
+
+
+def get_vit_lr_decay_rate(name, lr_decay_rate=1.0, num_layers=12):
+ """
+ Calculate lr decay rate for different ViT blocks.
+ Args:
+ name (string): parameter name.
+ lr_decay_rate (float): base lr decay rate.
+ num_layers (int): number of ViT blocks.
+
+ Returns:
+ lr decay rate for the given parameter.
+ """
+ layer_id = num_layers + 1
+ if name.startswith("backbone"):
+ if ".pos_embed" in name or ".patch_embed" in name:
+ layer_id = 0
+ elif ".blocks." in name and ".residual." not in name:
+ layer_id = int(name[name.find(".blocks.") :].split(".")[2]) + 1
+
+ return lr_decay_rate ** (num_layers + 1 - layer_id)
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/data/custom_build_augmentation.py b/model/vision/grit_src/grit/data/custom_build_augmentation.py
new file mode 100644
index 0000000000000000000000000000000000000000..49a52d011c09dbe027d41ee7e50127c392a8bf33
--- /dev/null
+++ b/model/vision/grit_src/grit/data/custom_build_augmentation.py
@@ -0,0 +1,44 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from detectron2.data import transforms as T
+from .transforms.custom_augmentation_impl import EfficientDetResizeCrop
+
+
+def build_custom_augmentation(cfg, is_train, scale=None, size=None, \
+ min_size=None, max_size=None):
+ """
+ Create a list of default :class:`Augmentation` from config.
+ Now it includes resizing and flipping.
+
+ Returns:
+ list[Augmentation]
+ """
+ if cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge':
+ if is_train:
+ min_size = cfg.INPUT.MIN_SIZE_TRAIN if min_size is None else min_size
+ max_size = cfg.INPUT.MAX_SIZE_TRAIN if max_size is None else max_size
+ sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
+ else:
+ min_size = cfg.INPUT.MIN_SIZE_TEST
+ max_size = cfg.INPUT.MAX_SIZE_TEST
+ sample_style = "choice"
+ augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
+ elif cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop':
+ if is_train:
+ scale = cfg.INPUT.SCALE_RANGE if scale is None else scale
+ size = cfg.INPUT.TRAIN_SIZE if size is None else size
+ else:
+ scale = (1, 1)
+ size = cfg.INPUT.TEST_SIZE
+ augmentation = [EfficientDetResizeCrop(size, scale)]
+ else:
+ assert 0, cfg.INPUT.CUSTOM_AUG
+
+ if is_train:
+ augmentation.append(T.RandomFlip())
+ return augmentation
+
+
+build_custom_transform_gen = build_custom_augmentation
+"""
+Alias for backward-compatibility.
+"""
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/data/custom_dataset_dataloader.py b/model/vision/grit_src/grit/data/custom_dataset_dataloader.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea9c4172f838d130df297bed9c0755669720c39d
--- /dev/null
+++ b/model/vision/grit_src/grit/data/custom_dataset_dataloader.py
@@ -0,0 +1,250 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# Modified by Jialian Wu from https://github.com/facebookresearch/Detic/blob/main/detic/data/custom_dataset_dataloader.py
+import operator
+import torch
+import torch.utils.data
+from detectron2.utils.comm import get_world_size
+
+from detectron2.config import configurable
+from torch.utils.data.sampler import BatchSampler, Sampler
+from detectron2.data.common import DatasetFromList, MapDataset
+from detectron2.data.dataset_mapper import DatasetMapper
+from detectron2.data.build import get_detection_dataset_dicts, build_batch_data_loader
+from detectron2.data.samplers import TrainingSampler
+from detectron2.data.build import worker_init_reset_seed, print_instances_class_histogram
+from detectron2.data.build import filter_images_with_only_crowd_annotations
+from detectron2.data.build import filter_images_with_few_keypoints
+from detectron2.data.build import check_metadata_consistency
+from detectron2.data.catalog import MetadataCatalog, DatasetCatalog
+from detectron2.utils import comm
+import itertools
+from typing import Optional
+
+
+def _custom_train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):
+ sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
+ if 'MultiDataset' in sampler_name:
+ dataset_dicts = get_detection_dataset_dicts_with_source(
+ cfg.DATASETS.TRAIN,
+ filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
+ min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
+ if cfg.MODEL.KEYPOINT_ON else 0,
+ proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
+ )
+ else:
+ dataset_dicts = get_detection_dataset_dicts(
+ cfg.DATASETS.TRAIN,
+ filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
+ min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
+ if cfg.MODEL.KEYPOINT_ON else 0,
+ proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
+ )
+
+ if mapper is None:
+ mapper = DatasetMapper(cfg, True)
+
+ if sampler is not None:
+ pass
+ elif sampler_name == "TrainingSampler":
+ sampler = TrainingSampler(len(dataset))
+ elif sampler_name == "MultiDatasetSampler":
+ sampler = MultiDatasetSampler(
+ dataset_dicts,
+ dataset_ratio=cfg.DATALOADER.DATASET_RATIO,
+ )
+ else:
+ raise ValueError("Unknown training sampler: {}".format(sampler_name))
+
+ return {
+ "dataset": dataset_dicts,
+ "sampler": sampler,
+ "mapper": mapper,
+ "total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
+ "num_workers": cfg.DATALOADER.NUM_WORKERS,
+ 'dataset_bs': cfg.DATALOADER.DATASET_BS,
+ 'num_datasets': len(cfg.DATASETS.TRAIN)
+ }
+
+
+@configurable(from_config=_custom_train_loader_from_config)
+def build_custom_train_loader(
+ dataset, *, mapper, sampler,
+ total_batch_size=16,
+ num_workers=0,
+ num_datasets=1,
+ dataset_bs=1
+):
+
+ if isinstance(dataset, list):
+ dataset = DatasetFromList(dataset, copy=False)
+ if mapper is not None:
+ dataset = MapDataset(dataset, mapper)
+ if sampler is None:
+ sampler = TrainingSampler(len(dataset))
+ assert isinstance(sampler, torch.utils.data.sampler.Sampler)
+
+ return build_dataset_batch_data_loader(
+ dataset_bs,
+ dataset,
+ sampler,
+ total_batch_size,
+ num_datasets=num_datasets,
+ num_workers=num_workers,
+ )
+
+
+def build_dataset_batch_data_loader(
+ dataset_bs, dataset, sampler, total_batch_size, num_datasets, num_workers=0
+):
+
+ world_size = get_world_size()
+ assert (
+ total_batch_size > 0 and total_batch_size % world_size == 0
+ ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(
+ total_batch_size, world_size
+ )
+
+ data_loader = torch.utils.data.DataLoader(
+ dataset,
+ sampler=sampler,
+ num_workers=num_workers,
+ batch_sampler=None,
+ collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
+ worker_init_fn=worker_init_reset_seed,
+ )
+
+ if num_datasets > 1:
+ return MultiDatasets(data_loader, dataset_bs, num_datasets)
+ else:
+ return SingleDataset(data_loader, dataset_bs)
+
+
+def get_detection_dataset_dicts_with_source(
+ dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None
+):
+ assert len(dataset_names)
+ dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names]
+ for dataset_name, dicts in zip(dataset_names, dataset_dicts):
+ assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
+
+ for source_id, (dataset_name, dicts) in \
+ enumerate(zip(dataset_names, dataset_dicts)):
+ assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
+ for d in dicts:
+ d['dataset_source'] = source_id
+
+ if "annotations" in dicts[0]:
+ try:
+ class_names = MetadataCatalog.get(dataset_name).thing_classes
+ check_metadata_consistency("thing_classes", dataset_name)
+ print_instances_class_histogram(dicts, class_names)
+ except AttributeError: # class names are not available for this dataset
+ pass
+
+ assert proposal_files is None
+
+ dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
+
+ has_instances = "annotations" in dataset_dicts[0]
+ if filter_empty and has_instances:
+ dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
+ if min_keypoints > 0 and has_instances:
+ dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
+
+ return dataset_dicts
+
+
+class MultiDatasetSampler(Sampler):
+ def __init__(
+ self,
+ dataset_dicts,
+ dataset_ratio,
+ seed: Optional[int] = None,
+ ):
+ sizes = [0 for _ in range(len(dataset_ratio))]
+ for d in dataset_dicts:
+ sizes[d['dataset_source']] += 1
+ print('dataset sizes', sizes)
+ self.sizes = sizes
+ assert len(dataset_ratio) == len(sizes), \
+ 'length of dataset ratio {} should be equal to number if dataset {}'.format(
+ len(dataset_ratio), len(sizes)
+ )
+ if seed is None:
+ seed = comm.shared_random_seed()
+ self._seed = int(seed)
+ self._rank = comm.get_rank()
+ self._world_size = comm.get_world_size()
+
+ self.dataset_ids = torch.tensor(
+ [d['dataset_source'] for d in dataset_dicts], dtype=torch.long)
+ self.dataset_ratio = dataset_ratio
+
+ dataset_weight = [torch.ones(s) * max(sizes) / s * r / sum(dataset_ratio) \
+ for i, (r, s) in enumerate(zip(dataset_ratio, sizes))]
+ dataset_weight = torch.cat(dataset_weight)
+
+ self.weights = dataset_weight
+ self.sample_epoch_size = len(self.weights)
+
+ def __iter__(self):
+ start = self._rank
+ yield from itertools.islice(
+ self._infinite_indices(), start, None, self._world_size)
+
+ def _infinite_indices(self):
+ g = torch.Generator()
+ g.manual_seed(self._seed)
+ while True:
+ if len(self.dataset_ratio) > 1:
+ # multiple datasets
+ ids = torch.multinomial(
+ self.weights, self.sample_epoch_size, generator=g,
+ replacement=True)
+ nums = [(self.dataset_ids[ids] == i).sum().int().item() \
+ for i in range(len(self.sizes))]
+ yield from ids
+ else:
+ # single dataset
+ yield from torch.randperm(self.sizes[0], generator=g).tolist()
+
+
+class SingleDataset(torch.utils.data.IterableDataset):
+ def __init__(self, dataset, batch_sizes):
+ self.dataset = dataset
+ self.batch_sizes = batch_sizes
+ self._buckets = [[] for _ in range(2)]
+
+ def __iter__(self):
+ for d in self.dataset:
+ w, h = d["width"], d["height"]
+ aspect_ratio_bucket_id = 0 if w > h else 1
+ bucket_id = aspect_ratio_bucket_id
+ bucket = self._buckets[bucket_id]
+ bucket.append(d)
+ if len(bucket) == self.batch_sizes:
+ yield bucket[:]
+ del bucket[:]
+
+
+class MultiDatasets(torch.utils.data.IterableDataset):
+ def __init__(self, dataset, batch_sizes, num_datasets):
+ self.dataset = dataset
+ self.batch_sizes = batch_sizes
+ self._buckets = [[] for _ in range(2 * num_datasets)]
+ self.iter_idx = 0
+ self.num_datasets = num_datasets
+
+ def __iter__(self):
+ for d in self.dataset:
+ w, h = d["width"], d["height"]
+ aspect_ratio_bucket_id = 0 if w > h else 1
+ bucket_id = d['dataset_source'] * 2 + aspect_ratio_bucket_id
+ bucket = self._buckets[bucket_id]
+ if len(bucket) < self.batch_sizes:
+ bucket.append(d)
+ selected_dataset = self.iter_idx % self.num_datasets
+ if len(bucket) == self.batch_sizes and selected_dataset == d['dataset_source']:
+ self.iter_idx += 1
+ yield bucket[:]
+ del bucket[:]
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/data/custom_dataset_mapper.py b/model/vision/grit_src/grit/data/custom_dataset_mapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e21edb3d151dafdca5c4debfb7341a9ed0efdd9
--- /dev/null
+++ b/model/vision/grit_src/grit/data/custom_dataset_mapper.py
@@ -0,0 +1,149 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+# Modified by Jialian Wu from https://github.com/facebookresearch/Detic/blob/main/detic/data/custom_dataset_mapper.py
+import copy
+import numpy as np
+import torch
+
+from detectron2.config import configurable
+
+from detectron2.data import detection_utils as utils
+from detectron2.data import transforms as T
+from detectron2.data.dataset_mapper import DatasetMapper
+from .custom_build_augmentation import build_custom_augmentation
+from itertools import compress
+import logging
+
+__all__ = ["CustomDatasetMapper", "ObjDescription"]
+logger = logging.getLogger(__name__)
+
+
+class CustomDatasetMapper(DatasetMapper):
+ @configurable
+ def __init__(self, is_train: bool,
+ dataset_augs=[],
+ **kwargs):
+ if is_train:
+ self.dataset_augs = [T.AugmentationList(x) for x in dataset_augs]
+ super().__init__(is_train, **kwargs)
+
+ @classmethod
+ def from_config(cls, cfg, is_train: bool = True):
+ ret = super().from_config(cfg, is_train)
+ if is_train:
+ if cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop':
+ dataset_scales = cfg.DATALOADER.DATASET_INPUT_SCALE
+ dataset_sizes = cfg.DATALOADER.DATASET_INPUT_SIZE
+ ret['dataset_augs'] = [
+ build_custom_augmentation(cfg, True, scale, size) \
+ for scale, size in zip(dataset_scales, dataset_sizes)]
+ else:
+ assert cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge'
+ min_sizes = cfg.DATALOADER.DATASET_MIN_SIZES
+ max_sizes = cfg.DATALOADER.DATASET_MAX_SIZES
+ ret['dataset_augs'] = [
+ build_custom_augmentation(
+ cfg, True, min_size=mi, max_size=ma) \
+ for mi, ma in zip(min_sizes, max_sizes)]
+ else:
+ ret['dataset_augs'] = []
+
+ return ret
+
+ def __call__(self, dataset_dict):
+ dataset_dict_out = self.prepare_data(dataset_dict)
+
+ # When augmented image is too small, do re-augmentation
+ retry = 0
+ while (dataset_dict_out["image"].shape[1] < 32 or dataset_dict_out["image"].shape[2] < 32):
+ retry += 1
+ if retry == 100:
+ logger.info('Retry 100 times for augmentation. Make sure the image size is not too small.')
+ logger.info('Find image information below')
+ logger.info(dataset_dict)
+ dataset_dict_out = self.prepare_data(dataset_dict)
+
+ return dataset_dict_out
+
+ def prepare_data(self, dataset_dict_in):
+ dataset_dict = copy.deepcopy(dataset_dict_in)
+ if 'file_name' in dataset_dict:
+ ori_image = utils.read_image(
+ dataset_dict["file_name"], format=self.image_format)
+ else:
+ ori_image, _, _ = self.tar_dataset[dataset_dict["tar_index"]]
+ ori_image = utils._apply_exif_orientation(ori_image)
+ ori_image = utils.convert_PIL_to_numpy(ori_image, self.image_format)
+ utils.check_image_size(dataset_dict, ori_image)
+
+ aug_input = T.AugInput(copy.deepcopy(ori_image), sem_seg=None)
+ if self.is_train:
+ transforms = \
+ self.dataset_augs[dataset_dict['dataset_source']](aug_input)
+ else:
+ transforms = self.augmentations(aug_input)
+ image, sem_seg_gt = aug_input.image, aug_input.sem_seg
+
+ image_shape = image.shape[:2]
+ dataset_dict["image"] = torch.as_tensor(
+ np.ascontiguousarray(image.transpose(2, 0, 1)))
+
+ if not self.is_train:
+ # USER: Modify this if you want to keep them for some reason.
+ dataset_dict.pop("annotations", None)
+ return dataset_dict
+
+ if "annotations" in dataset_dict:
+ if len(dataset_dict["annotations"]) > 0:
+ object_descriptions = [an['object_description'] for an in dataset_dict["annotations"]]
+ else:
+ object_descriptions = []
+ # USER: Modify this if you want to keep them for some reason.
+ for anno in dataset_dict["annotations"]:
+ if not self.use_instance_mask:
+ anno.pop("segmentation", None)
+ if not self.use_keypoint:
+ anno.pop("keypoints", None)
+
+ all_annos = [
+ (utils.transform_instance_annotations(
+ obj, transforms, image_shape,
+ keypoint_hflip_indices=self.keypoint_hflip_indices,
+ ), obj.get("iscrowd", 0))
+ for obj in dataset_dict.pop("annotations")
+ ]
+ annos = [ann[0] for ann in all_annos if ann[1] == 0]
+ instances = utils.annotations_to_instances(
+ annos, image_shape, mask_format=self.instance_mask_format
+ )
+
+ instances.gt_object_descriptions = ObjDescription(object_descriptions)
+
+ del all_annos
+ if self.recompute_boxes:
+ instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
+ dataset_dict["instances"] = utils.filter_empty_instances(instances)
+
+ return dataset_dict
+
+
+class ObjDescription:
+ def __init__(self, object_descriptions):
+ self.data = object_descriptions
+
+ def __getitem__(self, item):
+ assert type(item) == torch.Tensor
+ assert item.dim() == 1
+ if len(item) > 0:
+ assert item.dtype == torch.int64 or item.dtype == torch.bool
+ if item.dtype == torch.int64:
+ return ObjDescription([self.data[x.item()] for x in item])
+ elif item.dtype == torch.bool:
+ return ObjDescription(list(compress(self.data, item)))
+
+ return ObjDescription(list(compress(self.data, item)))
+
+ def __len__(self):
+ return len(self.data)
+
+ def __repr__(self):
+ return "ObjDescription({})".format(self.data)
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/data/datasets/grit_coco.py b/model/vision/grit_src/grit/data/datasets/grit_coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..fea81f7dd8ad2c27dac8438753b845ab64cef81e
--- /dev/null
+++ b/model/vision/grit_src/grit/data/datasets/grit_coco.py
@@ -0,0 +1,112 @@
+import logging
+import os
+from fvcore.common.timer import Timer
+from detectron2.structures import BoxMode
+from fvcore.common.file_io import PathManager
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from lvis import LVIS
+
+logger = logging.getLogger(__name__)
+
+__all__ = ["load_GRiTcoco_json", "register_GRiTcoco_instances"]
+
+
+def register_GRiTcoco_instances(name, metadata, json_file, image_root):
+ """
+ """
+ DatasetCatalog.register(name, lambda: load_GRiTcoco_json(
+ json_file, image_root, name))
+ MetadataCatalog.get(name).set(
+ json_file=json_file, image_root=image_root,
+ evaluator_type="coco", **metadata
+ )
+
+
+def get_GRiTcoco_meta():
+ categories = [{'supercategory': 'object', 'id': 1, 'name': 'object'}]
+ categories = sorted(categories, key=lambda x: x["id"])
+ thing_classes = [k["name"] for k in categories]
+ meta = {"thing_classes": thing_classes}
+ return meta
+
+
+def load_GRiTcoco_json(json_file, image_root, dataset_name=None):
+ '''
+ Load COCO class name text for object description for GRiT
+ '''
+
+ json_file = PathManager.get_local_path(json_file)
+
+ timer = Timer()
+ lvis_api = LVIS(json_file)
+ if timer.seconds() > 1:
+ logger.info("Loading {} takes {:.2f} seconds.".format(
+ json_file, timer.seconds()))
+
+ class_names = {}
+ sort_cat = sorted(lvis_api.dataset['categories'], key=lambda x: x['id'])
+ for x in sort_cat:
+ class_names[x['id']] = x['name']
+
+ img_ids = sorted(lvis_api.imgs.keys())
+ imgs = lvis_api.load_imgs(img_ids)
+ anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
+
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
+ assert len(set(ann_ids)) == len(ann_ids), \
+ "Annotation ids in '{}' are not unique".format(json_file)
+
+ imgs_anns = list(zip(imgs, anns))
+ logger.info("Loaded {} images in the LVIS v1 format from {}".format(
+ len(imgs_anns), json_file))
+
+ dataset_dicts = []
+
+ for (img_dict, anno_dict_list) in imgs_anns:
+ record = {}
+ if "file_name" in img_dict:
+ file_name = img_dict["file_name"]
+ record["file_name"] = os.path.join(image_root, file_name)
+
+ record["height"] = int(img_dict["height"])
+ record["width"] = int(img_dict["width"])
+ image_id = record["image_id"] = img_dict["id"]
+
+ objs = []
+ for anno in anno_dict_list:
+ assert anno["image_id"] == image_id
+ if anno.get('iscrowd', 0) > 0:
+ continue
+ obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
+ obj["category_id"] = 0
+ obj["object_description"] = class_names[anno['category_id']]
+ if 'segmentation' in anno:
+ segm = anno["segmentation"]
+ valid_segm = [poly for poly in segm \
+ if len(poly) % 2 == 0 and len(poly) >= 6]
+ if not len(segm) == len(valid_segm):
+ print('Annotation contains an invalid polygon with < 3 points')
+ assert len(segm) > 0
+ obj["segmentation"] = segm
+ objs.append(obj)
+ record["annotations"] = objs
+ if len(record["annotations"]) == 0:
+ continue
+ record["task"] = "ObjectDet"
+ dataset_dicts.append(record)
+
+ return dataset_dicts
+
+
+_CUSTOM_SPLITS_LVIS = {
+ "GRiT_coco2017_train": ("coco/train2017/", "coco/annotations/instances_train2017.json"),
+}
+
+
+for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS.items():
+ register_GRiTcoco_instances(
+ key,
+ get_GRiTcoco_meta(),
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
+ os.path.join("datasets", image_root),
+ )
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/data/datasets/object365.py b/model/vision/grit_src/grit/data/datasets/object365.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b8cc19da23d8397284b50588ee46e750b5b7552
--- /dev/null
+++ b/model/vision/grit_src/grit/data/datasets/object365.py
@@ -0,0 +1,111 @@
+import logging
+import os
+from fvcore.common.timer import Timer
+from detectron2.structures import BoxMode
+from fvcore.common.file_io import PathManager
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from lvis import LVIS
+
+logger = logging.getLogger(__name__)
+
+__all__ = ["load_o365_json", "register_o365_instances"]
+
+
+def register_o365_instances(name, metadata, json_file, image_root):
+ DatasetCatalog.register(name, lambda: load_o365_json(
+ json_file, image_root, name))
+ MetadataCatalog.get(name).set(
+ json_file=json_file, image_root=image_root,
+ evaluator_type="lvis", **metadata
+ )
+
+
+def get_o365_meta():
+ categories = [{'supercategory': 'object', 'id': 1, 'name': 'object'}]
+ o365_categories = sorted(categories, key=lambda x: x["id"])
+ thing_classes = [k["name"] for k in o365_categories]
+ meta = {"thing_classes": thing_classes}
+ return meta
+
+
+def load_o365_json(json_file, image_root, dataset_name=None):
+ '''
+ Load Object365 class name text for object description for GRiT
+ '''
+
+ json_file = PathManager.get_local_path(json_file)
+
+ timer = Timer()
+ lvis_api = LVIS(json_file)
+ if timer.seconds() > 1:
+ logger.info("Loading {} takes {:.2f} seconds.".format(
+ json_file, timer.seconds()))
+
+ class_names = {}
+ sort_cat = sorted(lvis_api.dataset['categories'], key=lambda x: x['id'])
+ for x in sort_cat:
+ if '/' in x['name']:
+ text = ''
+ for xx in x['name'].split('/'):
+ text += xx
+ text += ' '
+ text = text[:-1]
+ else:
+ text = x['name']
+ class_names[x['id']] = text
+
+ img_ids = sorted(lvis_api.imgs.keys())
+ imgs = lvis_api.load_imgs(img_ids)
+ anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
+
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
+ assert len(set(ann_ids)) == len(ann_ids), \
+ "Annotation ids in '{}' are not unique".format(json_file)
+
+ imgs_anns = list(zip(imgs, anns))
+ logger.info("Loaded {} images in the LVIS v1 format from {}".format(
+ len(imgs_anns), json_file))
+
+ dataset_dicts = []
+
+ for (img_dict, anno_dict_list) in imgs_anns:
+ record = {}
+ if "file_name" in img_dict:
+ file_name = img_dict["file_name"]
+ record["file_name"] = os.path.join(image_root, file_name)
+
+ record["height"] = int(img_dict["height"])
+ record["width"] = int(img_dict["width"])
+ image_id = record["image_id"] = img_dict["id"]
+
+ objs = []
+ for anno in anno_dict_list:
+ assert anno["image_id"] == image_id
+ if anno.get('iscrowd', 0) > 0:
+ continue
+ obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
+ obj["category_id"] = 0
+ obj["object_description"] = class_names[anno['category_id']]
+
+ objs.append(obj)
+ record["annotations"] = objs
+ if len(record["annotations"]) == 0:
+ continue
+ record["task"] = "ObjectDet"
+ dataset_dicts.append(record)
+
+ return dataset_dicts
+
+
+_CUSTOM_SPLITS_LVIS = {
+ "object365_train": ("object365/images/train/", "object365/annotations/train_v1.json"),
+}
+
+
+for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS.items():
+ register_o365_instances(
+ key,
+ get_o365_meta(),
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
+ os.path.join("datasets", image_root),
+ )
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/data/datasets/vg.py b/model/vision/grit_src/grit/data/datasets/vg.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d47a80d9f88b89ca3064dbc4945b0246162e5d1
--- /dev/null
+++ b/model/vision/grit_src/grit/data/datasets/vg.py
@@ -0,0 +1,98 @@
+import logging
+import os
+from fvcore.common.timer import Timer
+from detectron2.structures import BoxMode
+from fvcore.common.file_io import PathManager
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from lvis import LVIS
+
+logger = logging.getLogger(__name__)
+
+__all__ = ["load_vg_json", "register_vg_instances"]
+
+
+def register_vg_instances(name, metadata, json_file, image_root):
+ """
+ """
+ DatasetCatalog.register(name, lambda: load_vg_json(
+ json_file, image_root, name))
+ MetadataCatalog.get(name).set(
+ json_file=json_file, image_root=image_root,
+ evaluator_type="vg", **metadata
+ )
+
+
+def get_vg_meta():
+ categories = [{'supercategory': 'object', 'id': 1, 'name': 'object'}]
+ vg_categories = sorted(categories, key=lambda x: x["id"])
+ thing_classes = [k["name"] for k in vg_categories]
+ meta = {"thing_classes": thing_classes}
+ return meta
+
+
+def load_vg_json(json_file, image_root, dataset_name=None):
+
+ json_file = PathManager.get_local_path(json_file)
+
+ timer = Timer()
+ lvis_api = LVIS(json_file)
+ if timer.seconds() > 1:
+ logger.info("Loading {} takes {:.2f} seconds.".format(
+ json_file, timer.seconds()))
+
+ img_ids = sorted(lvis_api.imgs.keys())
+ imgs = lvis_api.load_imgs(img_ids)
+ anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
+
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
+ assert len(set(ann_ids)) == len(ann_ids), \
+ "Annotation ids in '{}' are not unique".format(json_file)
+
+ imgs_anns = list(zip(imgs, anns))
+ logger.info("Loaded {} images in the LVIS v1 format from {}".format(
+ len(imgs_anns), json_file))
+
+ dataset_dicts = []
+
+ for (img_dict, anno_dict_list) in imgs_anns:
+ record = {}
+ if "file_name" in img_dict:
+ file_name = img_dict["file_name"]
+ record["file_name"] = os.path.join(image_root, file_name)
+
+ record["height"] = int(img_dict["height"])
+ record["width"] = int(img_dict["width"])
+ image_id = record["image_id"] = img_dict["id"]
+
+ objs = []
+ for anno in anno_dict_list:
+ assert anno["image_id"] == image_id
+ if anno.get('iscrowd', 0) > 0:
+ continue
+ obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
+ obj["category_id"] = 0
+ obj["object_description"] = anno["caption"]
+
+ objs.append(obj)
+ record["annotations"] = objs
+ if len(record["annotations"]) == 0:
+ continue
+ record["task"] = "DenseCap"
+ dataset_dicts.append(record)
+
+ return dataset_dicts
+
+
+_CUSTOM_SPLITS_LVIS = {
+ "vg_train": ("vg/images", "vg/annotations/train.json"),
+ "vg_test": ("vg/images", "vg/annotations/test.json"),
+}
+
+
+for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS.items():
+ register_vg_instances(
+ key,
+ get_vg_meta(),
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
+ os.path.join("datasets", image_root),
+ )
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/data/transforms/custom_augmentation_impl.py b/model/vision/grit_src/grit/data/transforms/custom_augmentation_impl.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b9637f3ad41e3ba513636219e49371296d9ab9f
--- /dev/null
+++ b/model/vision/grit_src/grit/data/transforms/custom_augmentation_impl.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+# Part of the code is from https://github.com/rwightman/efficientdet-pytorch/blob/master/effdet/data/transforms.py
+# Modified by Xingyi Zhou
+# The original code is under Apache-2.0 License
+import numpy as np
+from PIL import Image
+
+from detectron2.data.transforms.augmentation import Augmentation
+from .custom_transform import EfficientDetResizeCropTransform
+
+__all__ = [
+ "EfficientDetResizeCrop",
+]
+
+
+class EfficientDetResizeCrop(Augmentation):
+ """
+ Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
+ If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
+ """
+
+ def __init__(
+ self, size, scale, interp=Image.BILINEAR
+ ):
+ """
+ """
+ super().__init__()
+ self.target_size = (size, size)
+ self.scale = scale
+ self.interp = interp
+
+ def get_transform(self, img):
+ # Select a random scale factor.
+ scale_factor = np.random.uniform(*self.scale)
+ scaled_target_height = scale_factor * self.target_size[0]
+ scaled_target_width = scale_factor * self.target_size[1]
+ # Recompute the accurate scale_factor using rounded scaled image size.
+ width, height = img.shape[1], img.shape[0]
+ img_scale_y = scaled_target_height / height
+ img_scale_x = scaled_target_width / width
+ img_scale = min(img_scale_y, img_scale_x)
+
+ # Select non-zero random offset (x, y) if scaled image is larger than target size
+ scaled_h = int(height * img_scale)
+ scaled_w = int(width * img_scale)
+ offset_y = scaled_h - self.target_size[0]
+ offset_x = scaled_w - self.target_size[1]
+ offset_y = int(max(0.0, float(offset_y)) * np.random.uniform(0, 1))
+ offset_x = int(max(0.0, float(offset_x)) * np.random.uniform(0, 1))
+ return EfficientDetResizeCropTransform(
+ scaled_h, scaled_w, offset_y, offset_x, img_scale, self.target_size, self.interp)
diff --git a/model/vision/grit_src/grit/data/transforms/custom_transform.py b/model/vision/grit_src/grit/data/transforms/custom_transform.py
new file mode 100644
index 0000000000000000000000000000000000000000..423063a4ea14fe92caaed7efc69d8596a597485e
--- /dev/null
+++ b/model/vision/grit_src/grit/data/transforms/custom_transform.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+# Part of the code is from https://github.com/rwightman/efficientdet-pytorch/blob/master/effdet/data/transforms.py
+# Modified by Xingyi Zhou
+# The original code is under Apache-2.0 License
+import numpy as np
+import torch
+import torch.nn.functional as F
+from fvcore.transforms.transform import (
+ CropTransform,
+ HFlipTransform,
+ NoOpTransform,
+ Transform,
+ TransformList,
+)
+from PIL import Image
+
+try:
+ import cv2 # noqa
+except ImportError:
+ # OpenCV is an optional dependency at the moment
+ pass
+
+__all__ = [
+ "EfficientDetResizeCropTransform",
+]
+
+
+class EfficientDetResizeCropTransform(Transform):
+ """
+ """
+
+ def __init__(self, scaled_h, scaled_w, offset_y, offset_x, img_scale, \
+ target_size, interp=None):
+ """
+ Args:
+ h, w (int): original image size
+ new_h, new_w (int): new image size
+ interp: PIL interpolation methods, defaults to bilinear.
+ """
+ # TODO decide on PIL vs opencv
+ super().__init__()
+ if interp is None:
+ interp = Image.BILINEAR
+ self._set_attributes(locals())
+
+ def apply_image(self, img, interp=None):
+ assert len(img.shape) <= 4
+
+ if img.dtype == np.uint8:
+ pil_image = Image.fromarray(img)
+ interp_method = interp if interp is not None else self.interp
+ pil_image = pil_image.resize((self.scaled_w, self.scaled_h), interp_method)
+ ret = np.asarray(pil_image)
+ right = min(self.scaled_w, self.offset_x + self.target_size[1])
+ lower = min(self.scaled_h, self.offset_y + self.target_size[0])
+ if len(ret.shape) <= 3:
+ ret = ret[self.offset_y: lower, self.offset_x: right]
+ else:
+ ret = ret[..., self.offset_y: lower, self.offset_x: right, :]
+ else:
+ # PIL only supports uint8
+ img = torch.from_numpy(img)
+ shape = list(img.shape)
+ shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
+ img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
+ _PIL_RESIZE_TO_INTERPOLATE_MODE = {Image.BILINEAR: "bilinear", Image.BICUBIC: "bicubic"}
+ mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[self.interp]
+ img = F.interpolate(img, (self.scaled_h, self.scaled_w), mode=mode, align_corners=False)
+ shape[:2] = (self.scaled_h, self.scaled_w)
+ ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
+ right = min(self.scaled_w, self.offset_x + self.target_size[1])
+ lower = min(self.scaled_h, self.offset_y + self.target_size[0])
+ if len(ret.shape) <= 3:
+ ret = ret[self.offset_y: lower, self.offset_x: right]
+ else:
+ ret = ret[..., self.offset_y: lower, self.offset_x: right, :]
+ return ret
+
+
+ def apply_coords(self, coords):
+ coords[:, 0] = coords[:, 0] * self.img_scale
+ coords[:, 1] = coords[:, 1] * self.img_scale
+ coords[:, 0] -= self.offset_x
+ coords[:, 1] -= self.offset_y
+ return coords
+
+
+ def apply_segmentation(self, segmentation):
+ segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
+ return segmentation
+
+
+ def inverse(self):
+ raise NotImplementedError
+
+
+ def inverse_apply_coords(self, coords):
+ coords[:, 0] += self.offset_x
+ coords[:, 1] += self.offset_y
+ coords[:, 0] = coords[:, 0] / self.img_scale
+ coords[:, 1] = coords[:, 1] / self.img_scale
+ return coords
+
+
+ def inverse_apply_box(self, box: np.ndarray) -> np.ndarray:
+ """
+ """
+ idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten()
+ coords = np.asarray(box).reshape(-1, 4)[:, idxs].reshape(-1, 2)
+ coords = self.inverse_apply_coords(coords).reshape((-1, 4, 2))
+ minxy = coords.min(axis=1)
+ maxxy = coords.max(axis=1)
+ trans_boxes = np.concatenate((minxy, maxxy), axis=1)
+ return trans_boxes
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/evaluation/eval.py b/model/vision/grit_src/grit/evaluation/eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..951a0920ec3d93703245562d4f76ec597e672ad9
--- /dev/null
+++ b/model/vision/grit_src/grit/evaluation/eval.py
@@ -0,0 +1,156 @@
+import itertools
+import json
+import os
+from detectron2.structures import Boxes, BoxMode, pairwise_iou
+from detectron2.utils.file_io import PathManager
+import numpy as np
+import pycocotools.mask as mask_util
+from detectron2.evaluation.coco_evaluation import COCOEvaluator
+from detectron2.evaluation.coco_evaluation import _evaluate_predictions_on_coco
+
+
+class GRiTCOCOEvaluator(COCOEvaluator):
+ def process(self, inputs, outputs):
+ for input, output in zip(inputs, outputs):
+ prediction = {"image_id": input["image_id"]}
+
+ if "instances" in output:
+ instances = output["instances"].to(self._cpu_device)
+ prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
+
+ if len(prediction) > 1:
+ self._predictions.append(prediction)
+
+ def _eval_predictions(self, predictions, img_ids=None):
+ self._logger.info("Preparing results for COCO format ...")
+ coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
+ tasks = self._tasks or self._tasks_from_predictions(coco_results)
+
+ if self._output_dir:
+ file_path = os.path.join(self._output_dir, "coco_instances_results.json")
+ self._logger.info("Saving results to {}".format(file_path))
+ with PathManager.open(file_path, "w") as f:
+ f.write(json.dumps(coco_results))
+ f.flush()
+
+ if not self._do_evaluation:
+ self._logger.info("Annotations are not available for evaluation.")
+ return
+
+ self._logger.info(
+ "Evaluating predictions with {} COCO API...".format(
+ "unofficial" if self._use_fast_impl else "official"
+ )
+ )
+
+ coco_results = self.convert_classname_to_id(coco_results)
+
+ for task in sorted(tasks):
+ assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
+ coco_eval = (
+ _evaluate_predictions_on_coco(
+ self._coco_api,
+ coco_results,
+ task,
+ kpt_oks_sigmas=self._kpt_oks_sigmas,
+ use_fast_impl=self._use_fast_impl,
+ img_ids=img_ids,
+ max_dets_per_image=self._max_dets_per_image,
+ )
+ if len(coco_results) > 0
+ else None # cocoapi does not handle empty results very well
+ )
+
+ res = self._derive_coco_results(
+ coco_eval, task, class_names=self._metadata.get("thing_classes")
+ )
+ self._results[task] = res
+
+ def convert_classname_to_id(self, results):
+ outputs = []
+ class_name_to_id = {}
+ categories = sorted(self._coco_api.dataset['categories'], key=lambda x: x['id'])
+
+ for cat in categories:
+ class_name_to_id[cat['name']] = cat['id']
+
+ for pred in results:
+ if pred['object_descriptions'] in class_name_to_id:
+ pred['category_id'] = class_name_to_id[pred['object_descriptions']]
+ del pred['object_descriptions']
+ outputs.append(pred)
+
+ return outputs
+
+
+class GRiTVGEvaluator(COCOEvaluator):
+ def process(self, inputs, outputs):
+ for input, output in zip(inputs, outputs):
+ assert input["image_id"] == int(input['file_name'].split('/')[-1].split('.')[0])
+ prediction = {"image_id": input["image_id"]}
+
+ if "instances" in output:
+ instances = output["instances"].to(self._cpu_device)
+ prediction["instances"] = instances_to_coco_json(instances, input["image_id"], output_logits=True)
+ h = input['height']
+ w = input['width']
+ scale = 720.0 / max(h, w)
+ scaled_inst = []
+ for inst in prediction["instances"]:
+ inst['bbox'][0] = inst['bbox'][0] * scale
+ inst['bbox'][1] = inst['bbox'][1] * scale
+ inst['bbox'][2] = inst['bbox'][2] * scale
+ inst['bbox'][3] = inst['bbox'][3] * scale
+ scaled_inst.append(inst)
+ if len(scaled_inst) > 0:
+ prediction["instances"] = scaled_inst
+ if len(prediction) > 1:
+ self._predictions.append(prediction)
+
+ def _eval_predictions(self, predictions, img_ids=None):
+ '''
+ This is only for saving the results to json file
+ '''
+ self._logger.info("Preparing results for COCO format ...")
+ coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
+
+ if self._output_dir:
+ file_path = os.path.join(self._output_dir, "vg_instances_results.json")
+ self._logger.info("Saving results to {}".format(file_path))
+ with PathManager.open(file_path, "w") as f:
+ f.write(json.dumps(coco_results))
+ f.flush()
+
+
+def instances_to_coco_json(instances, img_id, output_logits=False):
+ """
+ Add object_descriptions and logit (if applicable) to
+ detectron2's instances_to_coco_json
+ """
+ num_instance = len(instances)
+ if num_instance == 0:
+ return []
+
+ boxes = instances.pred_boxes.tensor.numpy()
+ boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
+ boxes = boxes.tolist()
+ scores = instances.scores.tolist()
+ classes = instances.pred_classes.tolist()
+ object_descriptions = instances.pred_object_descriptions.data
+ if output_logits:
+ logits = instances.logits.tolist()
+
+ results = []
+ for k in range(num_instance):
+ result = {
+ "image_id": img_id,
+ "category_id": classes[k],
+ "bbox": boxes[k],
+ "score": scores[k],
+ 'object_descriptions': object_descriptions[k],
+ }
+ if output_logits:
+ result["logit"] = logits[k]
+
+ results.append(result)
+ return results
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/modeling/backbone/utils.py b/model/vision/grit_src/grit/modeling/backbone/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..e71db21f1223c87cceeb422a70888f7bac42bb18
--- /dev/null
+++ b/model/vision/grit_src/grit/modeling/backbone/utils.py
@@ -0,0 +1,186 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+# This code is from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/utils.py
+import math
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+__all__ = [
+ "window_partition",
+ "window_unpartition",
+ "add_decomposed_rel_pos",
+ "get_abs_pos",
+ "PatchEmbed",
+]
+
+def window_partition(x, window_size):
+ """
+ Partition into non-overlapping windows with padding if needed.
+ Args:
+ x (tensor): input tokens with [B, H, W, C].
+ window_size (int): window size.
+
+ Returns:
+ windows: windows after partition with [B * num_windows, window_size, window_size, C].
+ (Hp, Wp): padded height and width before partition
+ """
+ B, H, W, C = x.shape
+
+ pad_h = (window_size - H % window_size) % window_size
+ pad_w = (window_size - W % window_size) % window_size
+ if pad_h > 0 or pad_w > 0:
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
+ Hp, Wp = H + pad_h, W + pad_w
+
+ x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
+ return windows, (Hp, Wp)
+
+
+def window_unpartition(windows, window_size, pad_hw, hw):
+ """
+ Window unpartition into original sequences and removing padding.
+ Args:
+ x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
+ window_size (int): window size.
+ pad_hw (Tuple): padded height and width (Hp, Wp).
+ hw (Tuple): original height and width (H, W) before padding.
+
+ Returns:
+ x: unpartitioned sequences with [B, H, W, C].
+ """
+ Hp, Wp = pad_hw
+ H, W = hw
+ B = windows.shape[0] // (Hp * Wp // window_size // window_size)
+ x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
+
+ if Hp > H or Wp > W:
+ x = x[:, :H, :W, :].contiguous()
+ return x
+
+
+def get_rel_pos(q_size, k_size, rel_pos):
+ """
+ Get relative positional embeddings according to the relative positions of
+ query and key sizes.
+ Args:
+ q_size (int): size of query q.
+ k_size (int): size of key k.
+ rel_pos (Tensor): relative position embeddings (L, C).
+
+ Returns:
+ Extracted positional embeddings according to relative positions.
+ """
+ max_rel_dist = int(2 * max(q_size, k_size) - 1)
+ # Interpolate rel pos if needed.
+ if rel_pos.shape[0] != max_rel_dist:
+ # Interpolate rel pos.
+ rel_pos_resized = F.interpolate(
+ rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
+ size=max_rel_dist,
+ mode="linear",
+ )
+ rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
+ else:
+ rel_pos_resized = rel_pos
+
+ # Scale the coords with short length if shapes for q and k are different.
+ q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
+ k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
+ relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
+
+ return rel_pos_resized[relative_coords.long()]
+
+
+def add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size):
+ """
+ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
+ https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
+ Args:
+ attn (Tensor): attention map.
+ q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
+ rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
+ rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
+ q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
+ k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
+
+ Returns:
+ attn (Tensor): attention map with added relative positional embeddings.
+ """
+ q_h, q_w = q_size
+ k_h, k_w = k_size
+ Rh = get_rel_pos(q_h, k_h, rel_pos_h)
+ Rw = get_rel_pos(q_w, k_w, rel_pos_w)
+
+ B, _, dim = q.shape
+ r_q = q.reshape(B, q_h, q_w, dim)
+ rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
+ rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
+
+ attn = (
+ attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
+ ).view(B, q_h * q_w, k_h * k_w)
+
+ return attn
+
+
+def get_abs_pos(abs_pos, has_cls_token, hw):
+ """
+ Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token
+ dimension for the original embeddings.
+ Args:
+ abs_pos (Tensor): absolute positional embeddings with (1, num_position, C).
+ has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token.
+ hw (Tuple): size of input image tokens.
+
+ Returns:
+ Absolute positional embeddings after processing with shape (1, H, W, C)
+ """
+ h, w = hw
+ if has_cls_token:
+ abs_pos = abs_pos[:, 1:]
+ xy_num = abs_pos.shape[1]
+ size = int(math.sqrt(xy_num))
+ assert size * size == xy_num
+
+ if size != h or size != w:
+ new_abs_pos = F.interpolate(
+ abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2),
+ size=(h, w),
+ mode="bicubic",
+ align_corners=False,
+ )
+
+ return new_abs_pos.permute(0, 2, 3, 1)
+ else:
+ return abs_pos.reshape(1, h, w, -1)
+
+
+class PatchEmbed(nn.Module):
+ """
+ Image to Patch Embedding.
+ """
+
+ def __init__(
+ self, kernel_size=(16, 16), stride=(16, 16), padding=(0, 0), in_chans=3, embed_dim=768
+ ):
+ """
+ Args:
+ kernel_size (Tuple): kernel size of the projection layer.
+ stride (Tuple): stride of the projection layer.
+ padding (Tuple): padding size of the projection layer.
+ in_chans (int): Number of input image channels.
+ embed_dim (int): embed_dim (int): Patch embedding dimension.
+ """
+ super().__init__()
+
+ self.proj = nn.Conv2d(
+ in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
+ )
+
+ def forward(self, x):
+ x = self.proj(x)
+ # B C H W -> B H W C
+ x = x.permute(0, 2, 3, 1)
+ return x
diff --git a/model/vision/grit_src/grit/modeling/backbone/vit.py b/model/vision/grit_src/grit/modeling/backbone/vit.py
new file mode 100644
index 0000000000000000000000000000000000000000..6eb3f1d96100fbe6369e99d8e4f8e2d0db273e82
--- /dev/null
+++ b/model/vision/grit_src/grit/modeling/backbone/vit.py
@@ -0,0 +1,538 @@
+# Modified by Jialian Wu from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py
+import logging
+import math
+import fvcore.nn.weight_init as weight_init
+import torch
+import torch.nn as nn
+from functools import partial
+
+from detectron2.layers import CNNBlockBase, Conv2d, get_norm
+from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
+from detectron2.layers import ShapeSpec
+from model.vision.grit_src.third_party.CenterNet2.projects.CenterNet2.centernet.modeling.backbone.fpn_p5 import LastLevelP6P7_P5
+
+import torch.utils.checkpoint as checkpoint
+from timm.models.layers import DropPath, Mlp, trunc_normal_
+
+from detectron2.modeling.backbone.backbone import Backbone
+from .utils import (
+ PatchEmbed,
+ add_decomposed_rel_pos,
+ get_abs_pos,
+ window_partition,
+ window_unpartition,
+)
+
+logger = logging.getLogger(__name__)
+
+
+__all__ = ["ViT"]
+
+
+class Attention(nn.Module):
+ """Multi-head Attention block with relative position embeddings."""
+
+ def __init__(
+ self,
+ dim,
+ num_heads=8,
+ qkv_bias=True,
+ use_rel_pos=False,
+ rel_pos_zero_init=True,
+ input_size=None,
+ ):
+ """
+ Args:
+ dim (int): Number of input channels.
+ num_heads (int): Number of attention heads.
+ qkv_bias (bool: If True, add a learnable bias to query, key, value.
+ rel_pos (bool): If True, add relative positional embeddings to the attention map.
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
+ input_size (int or None): Input resolution for calculating the relative positional
+ parameter size.
+ """
+ super().__init__()
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+ self.scale = head_dim**-0.5
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.proj = nn.Linear(dim, dim)
+
+ self.use_rel_pos = use_rel_pos
+ if self.use_rel_pos:
+ # initialize relative positional embeddings
+ self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
+ self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
+
+ if not rel_pos_zero_init:
+ trunc_normal_(self.rel_pos_h, std=0.02)
+ trunc_normal_(self.rel_pos_w, std=0.02)
+
+ def forward(self, x):
+ B, H, W, _ = x.shape
+ # qkv with shape (3, B, nHead, H * W, C)
+ qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
+ # q, k, v with shape (B * nHead, H * W, C)
+ q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
+
+ attn = (q * self.scale) @ k.transpose(-2, -1)
+
+ if self.use_rel_pos:
+ attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
+
+ attn = attn.softmax(dim=-1)
+ x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
+ x = self.proj(x)
+
+ return x
+
+
+class ResBottleneckBlock(CNNBlockBase):
+ """
+ The standard bottleneck residual block without the last activation layer.
+ It contains 3 conv layers with kernels 1x1, 3x3, 1x1.
+ """
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ bottleneck_channels,
+ norm="LN",
+ act_layer=nn.GELU,
+ ):
+ """
+ Args:
+ in_channels (int): Number of input channels.
+ out_channels (int): Number of output channels.
+ bottleneck_channels (int): number of output channels for the 3x3
+ "bottleneck" conv layers.
+ norm (str or callable): normalization for all conv layers.
+ See :func:`layers.get_norm` for supported format.
+ act_layer (callable): activation for all conv layers.
+ """
+ super().__init__(in_channels, out_channels, 1)
+
+ self.conv1 = Conv2d(in_channels, bottleneck_channels, 1, bias=False)
+ self.norm1 = get_norm(norm, bottleneck_channels)
+ self.act1 = act_layer()
+
+ self.conv2 = Conv2d(
+ bottleneck_channels,
+ bottleneck_channels,
+ 3,
+ padding=1,
+ bias=False,
+ )
+ self.norm2 = get_norm(norm, bottleneck_channels)
+ self.act2 = act_layer()
+
+ self.conv3 = Conv2d(bottleneck_channels, out_channels, 1, bias=False)
+ self.norm3 = get_norm(norm, out_channels)
+
+ for layer in [self.conv1, self.conv2, self.conv3]:
+ weight_init.c2_msra_fill(layer)
+ for layer in [self.norm1, self.norm2]:
+ layer.weight.data.fill_(1.0)
+ layer.bias.data.zero_()
+ # zero init last norm layer.
+ self.norm3.weight.data.zero_()
+ self.norm3.bias.data.zero_()
+
+ def forward(self, x):
+ out = x
+ for layer in self.children():
+ out = layer(out)
+
+ out = x + out
+ return out
+
+
+class Block(nn.Module):
+ """Transformer blocks with support of window attention and residual propagation blocks"""
+
+ def __init__(
+ self,
+ dim,
+ num_heads,
+ mlp_ratio=4.0,
+ qkv_bias=True,
+ drop_path=0.0,
+ norm_layer=nn.LayerNorm,
+ act_layer=nn.GELU,
+ use_rel_pos=False,
+ rel_pos_zero_init=True,
+ window_size=0,
+ use_residual_block=False,
+ input_size=None,
+ ):
+ """
+ Args:
+ dim (int): Number of input channels.
+ num_heads (int): Number of attention heads in each ViT block.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
+ drop_path (float): Stochastic depth rate.
+ norm_layer (nn.Module): Normalization layer.
+ act_layer (nn.Module): Activation layer.
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
+ window_size (int): Window size for window attention blocks. If it equals 0, then not
+ use window attention.
+ use_residual_block (bool): If True, use a residual block after the MLP block.
+ input_size (int or None): Input resolution for calculating the relative positional
+ parameter size.
+ """
+ super().__init__()
+ self.norm1 = norm_layer(dim)
+ self.attn = Attention(
+ dim,
+ num_heads=num_heads,
+ qkv_bias=qkv_bias,
+ use_rel_pos=use_rel_pos,
+ rel_pos_zero_init=rel_pos_zero_init,
+ input_size=input_size if window_size == 0 else (window_size, window_size),
+ )
+
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+ self.norm2 = norm_layer(dim)
+ self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer)
+
+ self.window_size = window_size
+
+ self.use_residual_block = use_residual_block
+ if use_residual_block:
+ # Use a residual block with bottleneck channel as dim // 2
+ self.residual = ResBottleneckBlock(
+ in_channels=dim,
+ out_channels=dim,
+ bottleneck_channels=dim // 2,
+ norm="LN",
+ act_layer=act_layer,
+ )
+
+ def forward(self, x):
+ shortcut = x
+ x = self.norm1(x)
+ # Window partition
+ if self.window_size > 0:
+ H, W = x.shape[1], x.shape[2]
+ x, pad_hw = window_partition(x, self.window_size)
+
+ x = self.attn(x)
+ # Reverse window partition
+ if self.window_size > 0:
+ x = window_unpartition(x, self.window_size, pad_hw, (H, W))
+
+ x = shortcut + self.drop_path(x)
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+
+ if self.use_residual_block:
+ x = self.residual(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
+
+ return x
+
+
+class ViT(Backbone):
+ """
+ This module implements Vision Transformer (ViT) backbone in :paper:`vitdet`.
+ "Exploring Plain Vision Transformer Backbones for Object Detection",
+ https://arxiv.org/abs/2203.16527
+ """
+
+ def __init__(
+ self,
+ img_size=1024,
+ patch_size=16,
+ in_chans=3,
+ embed_dim=768,
+ depth=12,
+ num_heads=12,
+ mlp_ratio=4.0,
+ qkv_bias=True,
+ drop_path_rate=0.0,
+ norm_layer=nn.LayerNorm,
+ act_layer=nn.GELU,
+ use_abs_pos=True,
+ use_rel_pos=False,
+ rel_pos_zero_init=True,
+ window_size=0,
+ window_block_indexes=(),
+ residual_block_indexes=(),
+ use_act_checkpoint=True,
+ pretrain_img_size=224,
+ pretrain_use_cls_token=True,
+ out_feature="last_feat",
+ ):
+ """
+ Args:
+ img_size (int): Input image size.
+ patch_size (int): Patch size.
+ in_chans (int): Number of input image channels.
+ embed_dim (int): Patch embedding dimension.
+ depth (int): Depth of ViT.
+ num_heads (int): Number of attention heads in each ViT block.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
+ drop_path_rate (float): Stochastic depth rate.
+ norm_layer (nn.Module): Normalization layer.
+ act_layer (nn.Module): Activation layer.
+ use_abs_pos (bool): If True, use absolute positional embeddings.
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
+ window_size (int): Window size for window attention blocks.
+ window_block_indexes (list): Indexes for blocks using window attention.
+ residual_block_indexes (list): Indexes for blocks using conv propagation.
+ use_act_checkpoint (bool): If True, use activation checkpointing.
+ pretrain_img_size (int): input image size for pretraining models.
+ pretrain_use_cls_token (bool): If True, pretrainig models use class token.
+ out_feature (str): name of the feature from the last block.
+ """
+ super().__init__()
+ self.pretrain_use_cls_token = pretrain_use_cls_token
+ self.use_act_checkpoint = use_act_checkpoint
+
+ self.patch_embed = PatchEmbed(
+ kernel_size=(patch_size, patch_size),
+ stride=(patch_size, patch_size),
+ in_chans=in_chans,
+ embed_dim=embed_dim,
+ )
+
+ if use_abs_pos:
+ # Initialize absolute positional embedding with pretrain image size.
+ num_patches = (pretrain_img_size // patch_size) * (pretrain_img_size // patch_size)
+ num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim))
+ else:
+ self.pos_embed = None
+
+ # stochastic depth decay rule
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
+
+ self.blocks = nn.ModuleList()
+ for i in range(depth):
+ block = Block(
+ dim=embed_dim,
+ num_heads=num_heads,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ drop_path=dpr[i],
+ norm_layer=norm_layer,
+ act_layer=act_layer,
+ use_rel_pos=use_rel_pos,
+ rel_pos_zero_init=rel_pos_zero_init,
+ window_size=window_size if i in window_block_indexes else 0,
+ use_residual_block=i in residual_block_indexes,
+ input_size=(img_size // patch_size, img_size // patch_size),
+ )
+ self.blocks.append(block)
+
+ self._out_feature_channels = {out_feature: embed_dim}
+ self._out_feature_strides = {out_feature: patch_size}
+ self._out_features = [out_feature]
+
+ if self.pos_embed is not None:
+ trunc_normal_(self.pos_embed, std=0.02)
+
+ self.apply(self._init_weights)
+
+ def _init_weights(self, m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=0.02)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+
+ def forward(self, x):
+ x = self.patch_embed(x)
+ if self.pos_embed is not None:
+ x = x + get_abs_pos(
+ self.pos_embed, self.pretrain_use_cls_token, (x.shape[1], x.shape[2])
+ )
+
+ for blk in self.blocks:
+ if self.use_act_checkpoint:
+ x = checkpoint.checkpoint(blk, x)
+ else:
+ x = blk(x)
+
+ return x.permute(0, 3, 1, 2)
+
+
+class ViT_FPN(Backbone):
+ def __init__(self, bottom_up=None, top_block=None, out_channels=None, strides=None, vit_out_dim=None):
+ super(ViT_FPN, self).__init__()
+ assert isinstance(bottom_up, Backbone)
+ self.bottom_up = bottom_up
+ self.top_block = top_block
+
+ self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides}
+ self._out_features = list(self._out_feature_strides.keys())
+ self._out_feature_channels = {k: out_channels for k in self._out_features}
+ self._size_divisibility = strides[2]
+
+ self.maxpool = nn.MaxPool2d(2, stride=2)
+ self.fpn_stride_16_8 = nn.ConvTranspose2d(vit_out_dim, vit_out_dim, 2, stride=2, bias=False)
+ self.fpn_stride8_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
+ self.fpn_stride8_norm1 = nn.LayerNorm(out_channels)
+ self.fpn_stride8_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
+ self.fpn_stride8_norm2 = nn.LayerNorm(out_channels)
+
+ self.fpn_stride16_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
+ self.fpn_stride16_norm1 = nn.LayerNorm(out_channels)
+ self.fpn_stride16_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
+ self.fpn_stride16_norm2 = nn.LayerNorm(out_channels)
+
+ self.fpn_stride32_conv1 = nn.Conv2d(in_channels=vit_out_dim, out_channels=out_channels, kernel_size=1, bias=False)
+ self.fpn_stride32_norm1 = nn.LayerNorm(out_channels)
+ self.fpn_stride32_conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
+ self.fpn_stride32_norm2 = nn.LayerNorm(out_channels)
+
+ def forward(self, x):
+ vit_output_featuremap = self.bottom_up(x)
+
+ stride8_feature = self.fpn_stride_16_8(vit_output_featuremap)
+ stride8_feature = self.fpn_stride8_norm1(self.fpn_stride8_conv1(stride8_feature)
+ .permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
+ stride8_feature = self.fpn_stride8_norm2(self.fpn_stride8_conv2(stride8_feature)
+ .permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
+
+ stride32_feature = self.maxpool(vit_output_featuremap)
+ stride32_feature = self.fpn_stride32_norm1(self.fpn_stride32_conv1(stride32_feature)
+ .permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
+ stride32_feature = self.fpn_stride32_norm2(self.fpn_stride32_conv2(stride32_feature)
+ .permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
+
+ stride16_feature = self.fpn_stride16_norm1(self.fpn_stride16_conv1(vit_output_featuremap).
+ permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
+ stride16_feature = self.fpn_stride16_norm2(self.fpn_stride16_conv2(stride16_feature)
+ .permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
+
+ results = [stride8_feature, stride16_feature, stride32_feature]
+
+ results.extend(self.top_block(stride32_feature))
+
+ assert len(self._out_features) == len(results)
+ fpn_out = {f: res for f, res in zip(self._out_features, results)}
+
+ return fpn_out
+ @property
+ def size_divisibility(self):
+ return self._size_divisibility
+
+ def output_shape(self):
+ return {
+ name: ShapeSpec(
+ channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
+ )
+ for name in self._out_features
+ }
+
+
+@BACKBONE_REGISTRY.register()
+def build_vit_fpn_backbone(cfg, input_shape: ShapeSpec):
+ embed_dim = 768
+ vit_out_dim = embed_dim
+ bottom_up = ViT( # Single-scale ViT backbone
+ img_size=1024,
+ patch_size=16,
+ embed_dim=embed_dim,
+ depth=12,
+ num_heads=12,
+ drop_path_rate=0.1,
+ window_size=14,
+ mlp_ratio=4,
+ qkv_bias=True,
+ norm_layer=partial(nn.LayerNorm, eps=1e-6),
+ window_block_indexes=[
+ # 2, 5, 8 11 for global attention
+ 0,
+ 1,
+ 3,
+ 4,
+ 6,
+ 7,
+ 9,
+ 10,
+ ],
+ residual_block_indexes=[],
+ use_act_checkpoint=cfg.USE_ACT_CHECKPOINT,
+ use_rel_pos=True,
+ out_feature="last_feat",)
+
+ out_channels = cfg.MODEL.FPN.OUT_CHANNELS
+ assert out_channels == 256 or out_channels == 768 or out_channels == 1024
+ backbone = ViT_FPN(bottom_up=bottom_up,
+ top_block=LastLevelP6P7_P5(out_channels, out_channels),
+ out_channels=out_channels,
+ strides=[8, 16, 32, 64, 128],
+ vit_out_dim=vit_out_dim)
+ return backbone
+
+
+@BACKBONE_REGISTRY.register()
+def build_vit_fpn_backbone_large(cfg, input_shape: ShapeSpec):
+ window_block_indexes = (list(range(0, 5)) + list(range(6, 11)) + list(range(12, 17)) + list(range(18, 23)))
+ embed_dim = 1024
+ vit_out_dim = embed_dim
+ bottom_up = ViT( # Single-scale ViT backbone
+ img_size=1024,
+ patch_size=16,
+ embed_dim=embed_dim,
+ depth=24,
+ num_heads=16,
+ drop_path_rate=0.4,
+ window_size=14,
+ mlp_ratio=4,
+ qkv_bias=True,
+ norm_layer=partial(nn.LayerNorm, eps=1e-6),
+ window_block_indexes=window_block_indexes,
+ residual_block_indexes=[],
+ use_act_checkpoint=cfg.USE_ACT_CHECKPOINT,
+ use_rel_pos=True,
+ out_feature="last_feat",)
+
+ out_channels = cfg.MODEL.FPN.OUT_CHANNELS
+ assert out_channels == 256 or out_channels == 768 or out_channels == 1024
+ backbone = ViT_FPN(bottom_up=bottom_up,
+ top_block=LastLevelP6P7_P5(out_channels, out_channels),
+ out_channels=out_channels,
+ strides=[8, 16, 32, 64, 128],
+ vit_out_dim=vit_out_dim)
+ return backbone
+
+
+@BACKBONE_REGISTRY.register()
+def build_vit_fpn_backbone_huge(cfg, input_shape: ShapeSpec):
+ window_block_indexes = (list(range(0, 7)) + list(range(8, 15)) + list(range(16, 23)) + list(range(24, 31)))
+ embed_dim = 1280
+ vit_out_dim = embed_dim
+ bottom_up = ViT( # Single-scale ViT backbone
+ img_size=1024,
+ patch_size=16,
+ embed_dim=embed_dim,
+ depth=32,
+ num_heads=16,
+ drop_path_rate=0.5,
+ window_size=14,
+ mlp_ratio=4,
+ qkv_bias=True,
+ norm_layer=partial(nn.LayerNorm, eps=1e-6),
+ window_block_indexes=window_block_indexes,
+ residual_block_indexes=[],
+ use_act_checkpoint=cfg.USE_ACT_CHECKPOINT,
+ use_rel_pos=True,
+ out_feature="last_feat",)
+
+ out_channels = cfg.MODEL.FPN.OUT_CHANNELS
+ assert out_channels == 256 or out_channels == 768 or out_channels == 1024
+ backbone = ViT_FPN(bottom_up=bottom_up,
+ top_block=LastLevelP6P7_P5(out_channels, out_channels),
+ out_channels=out_channels,
+ strides=[8, 16, 32, 64, 128],
+ vit_out_dim=vit_out_dim)
+ return backbone
diff --git a/model/vision/grit_src/grit/modeling/meta_arch/grit.py b/model/vision/grit_src/grit/modeling/meta_arch/grit.py
new file mode 100644
index 0000000000000000000000000000000000000000..101725fd455e723360eaafc26db37beb226a9233
--- /dev/null
+++ b/model/vision/grit_src/grit/modeling/meta_arch/grit.py
@@ -0,0 +1,66 @@
+from typing import Dict, List, Optional, Tuple
+import torch
+from detectron2.config import configurable
+from detectron2.structures import ImageList, Instances, Boxes
+from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
+from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
+
+
+@META_ARCH_REGISTRY.register()
+class GRiT(GeneralizedRCNN):
+ @configurable
+ def __init__(
+ self,
+ **kwargs):
+ super().__init__(**kwargs)
+ assert self.proposal_generator is not None
+
+ @classmethod
+ def from_config(cls, cfg):
+ ret = super().from_config(cfg)
+ return ret
+
+ def inference(
+ self,
+ batched_inputs: Tuple[Dict[str, torch.Tensor]],
+ detected_instances: Optional[List[Instances]] = None,
+ do_postprocess: bool = True,
+ ):
+ assert not self.training
+ assert detected_instances is None
+
+ images = self.preprocess_image(batched_inputs)
+ features = self.backbone(images.tensor)
+ proposals, _ = self.proposal_generator(images, features, None)
+ results, _ = self.roi_heads(features, proposals)
+ if do_postprocess:
+ assert not torch.jit.is_scripting(), \
+ "Scripting is not supported for postprocess."
+ return GRiT._postprocess(
+ results, batched_inputs, images.image_sizes)
+ else:
+ return results
+
+ def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
+ if not self.training:
+ return self.inference(batched_inputs)
+
+ images = self.preprocess_image(batched_inputs)
+
+ gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
+
+ targets_task = batched_inputs[0]['task']
+ for anno_per_image in batched_inputs:
+ assert targets_task == anno_per_image['task']
+
+ features = self.backbone(images.tensor)
+ proposals, proposal_losses = self.proposal_generator(
+ images, features, gt_instances)
+ proposals, roihead_textdecoder_losses = self.roi_heads(
+ features, proposals, gt_instances, targets_task=targets_task)
+
+ losses = {}
+ losses.update(roihead_textdecoder_losses)
+ losses.update(proposal_losses)
+
+ return losses
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/modeling/roi_heads/grit_fast_rcnn.py b/model/vision/grit_src/grit/modeling/roi_heads/grit_fast_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d03daabac26aecf214baf1f743c97a5d7486bf7
--- /dev/null
+++ b/model/vision/grit_src/grit/modeling/roi_heads/grit_fast_rcnn.py
@@ -0,0 +1,126 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# Modified by Jialian Wu from https://github.com/facebookresearch/Detic/blob/main/detic/modeling/roi_heads/detic_fast_rcnn.py
+import torch
+from fvcore.nn import giou_loss, smooth_l1_loss
+from torch import nn
+from torch.nn import functional as F
+import fvcore.nn.weight_init as weight_init
+from detectron2.config import configurable
+from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple
+from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
+from detectron2.modeling.roi_heads.fast_rcnn import _log_classification_stats
+
+
+__all__ = ["GRiTFastRCNNOutputLayers"]
+
+
+class GRiTFastRCNNOutputLayers(FastRCNNOutputLayers):
+ @configurable
+ def __init__(
+ self,
+ input_shape: ShapeSpec,
+ **kwargs,
+ ):
+ super().__init__(
+ input_shape=input_shape,
+ **kwargs,
+ )
+
+ input_size = input_shape.channels * \
+ (input_shape.width or 1) * (input_shape.height or 1)
+
+ self.bbox_pred = nn.Sequential(
+ nn.Linear(input_size, input_size),
+ nn.ReLU(inplace=True),
+ nn.Linear(input_size, 4)
+ )
+ weight_init.c2_xavier_fill(self.bbox_pred[0])
+ nn.init.normal_(self.bbox_pred[-1].weight, std=0.001)
+ nn.init.constant_(self.bbox_pred[-1].bias, 0)
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ ret = super().from_config(cfg, input_shape)
+ return ret
+
+ def losses(self, predictions, proposals):
+ scores, proposal_deltas = predictions
+ gt_classes = (
+ cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)
+ )
+ num_classes = self.num_classes
+ _log_classification_stats(scores, gt_classes)
+
+ if len(proposals):
+ proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4
+ assert not proposal_boxes.requires_grad, "Proposals should not require gradients!"
+ gt_boxes = cat(
+ [(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals],
+ dim=0,
+ )
+ else:
+ proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
+
+ loss_cls = self.softmax_cross_entropy_loss(scores, gt_classes)
+ return {
+ "loss_cls": loss_cls,
+ "loss_box_reg": self.box_reg_loss(
+ proposal_boxes, gt_boxes, proposal_deltas, gt_classes,
+ num_classes=num_classes)
+ }
+
+ def softmax_cross_entropy_loss(self, pred_class_logits, gt_classes):
+ if pred_class_logits.numel() == 0:
+ return pred_class_logits.new_zeros([1])[0]
+
+ loss = F.cross_entropy(
+ pred_class_logits, gt_classes, reduction="mean")
+ return loss
+
+ def box_reg_loss(
+ self, proposal_boxes, gt_boxes, pred_deltas, gt_classes,
+ num_classes=-1):
+ num_classes = num_classes if num_classes > 0 else self.num_classes
+ box_dim = proposal_boxes.shape[1]
+ fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < num_classes))[0]
+ if pred_deltas.shape[1] == box_dim:
+ fg_pred_deltas = pred_deltas[fg_inds]
+ else:
+ fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[
+ fg_inds, gt_classes[fg_inds]
+ ]
+
+ if self.box_reg_loss_type == "smooth_l1":
+ gt_pred_deltas = self.box2box_transform.get_deltas(
+ proposal_boxes[fg_inds],
+ gt_boxes[fg_inds],
+ )
+ loss_box_reg = smooth_l1_loss(
+ fg_pred_deltas, gt_pred_deltas, self.smooth_l1_beta, reduction="sum"
+ )
+ elif self.box_reg_loss_type == "giou":
+ fg_pred_boxes = self.box2box_transform.apply_deltas(
+ fg_pred_deltas, proposal_boxes[fg_inds]
+ )
+ loss_box_reg = giou_loss(fg_pred_boxes, gt_boxes[fg_inds], reduction="sum")
+ else:
+ raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
+ return loss_box_reg / max(gt_classes.numel(), 1.0)
+
+ def predict_probs(self, predictions, proposals):
+ scores = predictions[0]
+ num_inst_per_image = [len(p) for p in proposals]
+ probs = F.softmax(scores, dim=-1)
+ return probs.split(num_inst_per_image, dim=0)
+
+ def forward(self, x):
+ if x.dim() > 2:
+ x = torch.flatten(x, start_dim=1)
+ scores = []
+
+ cls_scores = self.cls_score(x)
+ scores.append(cls_scores)
+ scores = torch.cat(scores, dim=1)
+
+ proposal_deltas = self.bbox_pred(x)
+ return scores, proposal_deltas
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/modeling/roi_heads/grit_roi_heads.py b/model/vision/grit_src/grit/modeling/roi_heads/grit_roi_heads.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a4c5b1a9bf795aaf5096318a36af724175d72c4
--- /dev/null
+++ b/model/vision/grit_src/grit/modeling/roi_heads/grit_roi_heads.py
@@ -0,0 +1,478 @@
+import math
+import torch
+from typing import Dict, List, Optional, Tuple, Union
+
+from detectron2.config import configurable
+from detectron2.structures import Boxes, Instances, pairwise_iou
+from detectron2.utils.events import get_event_storage
+
+from detectron2.modeling.box_regression import Box2BoxTransform
+from detectron2.modeling.roi_heads.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
+from detectron2.modeling.roi_heads.cascade_rcnn import CascadeROIHeads, _ScaleGradient
+from detectron2.modeling.poolers import ROIPooler
+from detectron2.layers import batched_nms
+from .grit_fast_rcnn import GRiTFastRCNNOutputLayers
+
+from ..text.text_decoder import TransformerDecoderTextualHead, GRiTTextDecoder, AutoRegressiveBeamSearch
+from ..text.load_text_token import LoadTextTokens
+from transformers import BertTokenizer
+from model.vision.grit_src.grit.data.custom_dataset_mapper import ObjDescription
+from ..soft_nms import batched_soft_nms
+
+import logging
+logger = logging.getLogger(__name__)
+
+
+@ROI_HEADS_REGISTRY.register()
+class GRiTROIHeadsAndTextDecoder(CascadeROIHeads):
+ @configurable
+ def __init__(
+ self,
+ *,
+ text_decoder_transformer,
+ train_task: list,
+ test_task: str,
+ mult_proposal_score: bool = False,
+ mask_weight: float = 1.0,
+ object_feat_pooler=None,
+ soft_nms_enabled=False,
+ beam_size=1,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.mult_proposal_score = mult_proposal_score
+ self.mask_weight = mask_weight
+ self.object_feat_pooler = object_feat_pooler
+ self.soft_nms_enabled = soft_nms_enabled
+ self.test_task = test_task
+ self.beam_size = beam_size
+
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
+ self.tokenizer = tokenizer
+
+ assert test_task in train_task, 'GRiT has not been trained on {} task, ' \
+ 'please verify the task name or train a new ' \
+ 'GRiT on {} task'.format(test_task, test_task)
+ task_begin_tokens = {}
+ for i, task in enumerate(train_task):
+ if i == 0:
+ task_begin_tokens[task] = tokenizer.cls_token_id
+ else:
+ task_begin_tokens[task] = 103 + i
+ self.task_begin_tokens = task_begin_tokens
+
+ beamsearch_decode = AutoRegressiveBeamSearch(
+ end_token_id=tokenizer.sep_token_id,
+ max_steps=40,
+ beam_size=beam_size,
+ objectdet=test_task == "ObjectDet",
+ per_node_beam_size=1,
+ )
+ self.text_decoder = GRiTTextDecoder(
+ text_decoder_transformer,
+ beamsearch_decode=beamsearch_decode,
+ begin_token_id=task_begin_tokens[test_task],
+ loss_type='smooth',
+ tokenizer=tokenizer,
+ )
+ self.get_target_text_tokens = LoadTextTokens(tokenizer, max_text_len=40, padding='do_not_pad')
+
+ @classmethod
+ def from_config(cls, cfg, input_shape):
+ ret = super().from_config(cfg, input_shape)
+ text_decoder_transformer = TransformerDecoderTextualHead(
+ object_feature_size=cfg.MODEL.FPN.OUT_CHANNELS,
+ vocab_size=cfg.TEXT_DECODER.VOCAB_SIZE,
+ hidden_size=cfg.TEXT_DECODER.HIDDEN_SIZE,
+ num_layers=cfg.TEXT_DECODER.NUM_LAYERS,
+ attention_heads=cfg.TEXT_DECODER.ATTENTION_HEADS,
+ feedforward_size=cfg.TEXT_DECODER.FEEDFORWARD_SIZE,
+ mask_future_positions=True,
+ padding_idx=0,
+ decoder_type='bert_en',
+ use_act_checkpoint=cfg.USE_ACT_CHECKPOINT,
+ )
+ ret.update({
+ 'text_decoder_transformer': text_decoder_transformer,
+ 'train_task': cfg.MODEL.TRAIN_TASK,
+ 'test_task': cfg.MODEL.TEST_TASK,
+ 'mult_proposal_score': cfg.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE,
+ 'mask_weight': cfg.MODEL.ROI_HEADS.MASK_WEIGHT,
+ 'soft_nms_enabled': cfg.MODEL.ROI_HEADS.SOFT_NMS_ENABLED,
+ 'beam_size': cfg.MODEL.BEAM_SIZE,
+ })
+ return ret
+
+ @classmethod
+ def _init_box_head(self, cfg, input_shape):
+ ret = super()._init_box_head(cfg, input_shape)
+ del ret['box_predictors']
+ cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS
+ box_predictors = []
+ for box_head, bbox_reg_weights in zip(ret['box_heads'], \
+ cascade_bbox_reg_weights):
+ box_predictors.append(
+ GRiTFastRCNNOutputLayers(
+ cfg, box_head.output_shape,
+ box2box_transform=Box2BoxTransform(weights=bbox_reg_weights)
+ ))
+ ret['box_predictors'] = box_predictors
+
+ in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
+ pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
+ sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
+ pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
+ object_feat_pooler = ROIPooler(
+ output_size=cfg.MODEL.ROI_HEADS.OBJECT_FEAT_POOLER_RES,
+ scales=pooler_scales,
+ sampling_ratio=sampling_ratio,
+ pooler_type=pooler_type,
+ )
+ ret['object_feat_pooler'] = object_feat_pooler
+ return ret
+
+ def check_if_all_background(self, proposals, targets, stage):
+ all_background = True
+ for proposals_per_image in proposals:
+ if not (proposals_per_image.gt_classes == self.num_classes).all():
+ all_background = False
+
+ if all_background:
+ logger.info('all proposals are background at stage {}'.format(stage))
+ proposals[0].proposal_boxes.tensor[0, :] = targets[0].gt_boxes.tensor[0, :]
+ proposals[0].gt_boxes.tensor[0, :] = targets[0].gt_boxes.tensor[0, :]
+ proposals[0].objectness_logits[0] = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10)))
+ proposals[0].gt_classes[0] = targets[0].gt_classes[0]
+ proposals[0].gt_object_descriptions.data[0] = targets[0].gt_object_descriptions.data[0]
+ if 'foreground' in proposals[0].get_fields().keys():
+ proposals[0].foreground[0] = 1
+ return proposals
+
+ def _forward_box(self, features, proposals, targets=None, task="ObjectDet"):
+ if self.training:
+ proposals = self.check_if_all_background(proposals, targets, 0)
+ if (not self.training) and self.mult_proposal_score:
+ if len(proposals) > 0 and proposals[0].has('scores'):
+ proposal_scores = [p.get('scores') for p in proposals]
+ else:
+ proposal_scores = [p.get('objectness_logits') for p in proposals]
+
+ features = [features[f] for f in self.box_in_features]
+ head_outputs = []
+ prev_pred_boxes = None
+ image_sizes = [x.image_size for x in proposals]
+
+ for k in range(self.num_cascade_stages):
+ if k > 0:
+ proposals = self._create_proposals_from_boxes(
+ prev_pred_boxes, image_sizes,
+ logits=[p.objectness_logits for p in proposals])
+ if self.training:
+ proposals = self._match_and_label_boxes_GRiT(
+ proposals, k, targets)
+ proposals = self.check_if_all_background(proposals, targets, k)
+ predictions = self._run_stage(features, proposals, k)
+ prev_pred_boxes = self.box_predictor[k].predict_boxes(
+ (predictions[0], predictions[1]), proposals)
+ head_outputs.append((self.box_predictor[k], predictions, proposals))
+
+ if self.training:
+ object_features = self.object_feat_pooler(features, [x.proposal_boxes for x in proposals])
+ object_features = _ScaleGradient.apply(object_features, 1.0 / self.num_cascade_stages)
+ foreground = torch.cat([x.foreground for x in proposals])
+ object_features = object_features[foreground > 0]
+
+ object_descriptions = []
+ for x in proposals:
+ object_descriptions += x.gt_object_descriptions[x.foreground > 0].data
+ object_descriptions = ObjDescription(object_descriptions)
+ object_descriptions = object_descriptions.data
+
+ if len(object_descriptions) > 0:
+ begin_token = self.task_begin_tokens[task]
+ text_decoder_inputs = self.get_target_text_tokens(object_descriptions, object_features, begin_token)
+ object_features = object_features.view(
+ object_features.shape[0], object_features.shape[1], -1).permute(0, 2, 1).contiguous()
+ text_decoder_inputs.update({'object_features': object_features})
+ text_decoder_loss = self.text_decoder(text_decoder_inputs)
+ else:
+ text_decoder_loss = head_outputs[0][1][0].new_zeros([1])[0]
+
+ losses = {}
+ storage = get_event_storage()
+ # RoI Head losses (For the proposal generator loss, please find it in grit.py)
+ for stage, (predictor, predictions, proposals) in enumerate(head_outputs):
+ with storage.name_scope("stage{}".format(stage)):
+ stage_losses = predictor.losses(
+ (predictions[0], predictions[1]), proposals)
+ losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()})
+ # Text Decoder loss
+ losses.update({'text_decoder_loss': text_decoder_loss})
+ return losses
+ else:
+ scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs]
+ logits_per_stage = [(h[1][0],) for h in head_outputs]
+ scores = [
+ sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages)
+ for scores_per_image in zip(*scores_per_stage)
+ ]
+ logits = [
+ sum(list(logits_per_image)) * (1.0 / self.num_cascade_stages)
+ for logits_per_image in zip(*logits_per_stage)
+ ]
+ if self.mult_proposal_score:
+ scores = [(s * ps[:, None]) ** 0.5 for s, ps in zip(scores, proposal_scores)]
+ predictor, predictions, proposals = head_outputs[-1]
+ boxes = predictor.predict_boxes(
+ (predictions[0], predictions[1]), proposals)
+ assert len(boxes) == 1
+ pred_instances, _ = self.fast_rcnn_inference_GRiT(
+ boxes,
+ scores,
+ logits,
+ image_sizes,
+ predictor.test_score_thresh,
+ predictor.test_nms_thresh,
+ predictor.test_topk_per_image,
+ self.soft_nms_enabled,
+ )
+
+ assert len(pred_instances) == 1, "Only support one image"
+ for i, pred_instance in enumerate(pred_instances):
+ if len(pred_instance.pred_boxes) > 0:
+ object_features = self.object_feat_pooler(features, [pred_instance.pred_boxes])
+ object_features = object_features.view(
+ object_features.shape[0], object_features.shape[1], -1).permute(0, 2, 1).contiguous()
+ text_decoder_output = self.text_decoder({'object_features': object_features})
+ if self.beam_size > 1 and self.test_task == "ObjectDet":
+ pred_boxes = []
+ pred_scores = []
+ pred_classes = []
+ pred_object_descriptions = []
+
+ for beam_id in range(self.beam_size):
+ pred_boxes.append(pred_instance.pred_boxes.tensor)
+ # object score = sqrt(objectness score x description score)
+ pred_scores.append((pred_instance.scores *
+ torch.exp(text_decoder_output['logprobs'])[:, beam_id]) ** 0.5)
+ pred_classes.append(pred_instance.pred_classes)
+ for prediction in text_decoder_output['predictions'][:, beam_id, :]:
+ # convert text tokens to words
+ description = self.tokenizer.decode(prediction.tolist()[1:], skip_special_tokens=True)
+ pred_object_descriptions.append(description)
+
+ merged_instances = Instances(image_sizes[0])
+ if torch.cat(pred_scores, dim=0).shape[0] <= predictor.test_topk_per_image:
+ merged_instances.scores = torch.cat(pred_scores, dim=0)
+ merged_instances.pred_boxes = Boxes(torch.cat(pred_boxes, dim=0))
+ merged_instances.pred_classes = torch.cat(pred_classes, dim=0)
+ merged_instances.pred_object_descriptions = ObjDescription(pred_object_descriptions)
+ else:
+ pred_scores, top_idx = torch.topk(
+ torch.cat(pred_scores, dim=0), predictor.test_topk_per_image)
+ merged_instances.scores = pred_scores
+ merged_instances.pred_boxes = Boxes(torch.cat(pred_boxes, dim=0)[top_idx, :])
+ merged_instances.pred_classes = torch.cat(pred_classes, dim=0)[top_idx]
+ merged_instances.pred_object_descriptions = \
+ ObjDescription(ObjDescription(pred_object_descriptions)[top_idx].data)
+
+ pred_instances[i] = merged_instances
+ else:
+ # object score = sqrt(objectness score x description score)
+ pred_instance.scores = (pred_instance.scores *
+ torch.exp(text_decoder_output['logprobs'])) ** 0.5
+
+ pred_object_descriptions = []
+ for prediction in text_decoder_output['predictions']:
+ # convert text tokens to words
+ description = self.tokenizer.decode(prediction.tolist()[1:], skip_special_tokens=True)
+ pred_object_descriptions.append(description)
+ pred_instance.pred_object_descriptions = ObjDescription(pred_object_descriptions)
+ else:
+ pred_instance.pred_object_descriptions = ObjDescription([])
+
+ return pred_instances
+
+
+ def forward(self, features, proposals, targets=None, targets_task="ObjectDet"):
+ if self.training:
+ proposals = self.label_and_sample_proposals(
+ proposals, targets)
+
+ losses = self._forward_box(features, proposals, targets, task=targets_task)
+ if targets[0].has('gt_masks'):
+ mask_losses = self._forward_mask(features, proposals)
+ losses.update({k: v * self.mask_weight \
+ for k, v in mask_losses.items()})
+ else:
+ losses.update(self._get_empty_mask_loss(device=proposals[0].objectness_logits.device))
+ return proposals, losses
+ else:
+ pred_instances = self._forward_box(features, proposals, task=self.test_task)
+ pred_instances = self.forward_with_given_boxes(features, pred_instances)
+ return pred_instances, {}
+
+ @torch.no_grad()
+ def _match_and_label_boxes_GRiT(self, proposals, stage, targets):
+ """
+ Add "gt_object_description" and "foreground" to detectron2's _match_and_label_boxes
+ """
+ num_fg_samples, num_bg_samples = [], []
+ for proposals_per_image, targets_per_image in zip(proposals, targets):
+ match_quality_matrix = pairwise_iou(
+ targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
+ )
+ # proposal_labels are 0 or 1
+ matched_idxs, proposal_labels = self.proposal_matchers[stage](match_quality_matrix)
+ if len(targets_per_image) > 0:
+ gt_classes = targets_per_image.gt_classes[matched_idxs]
+ # Label unmatched proposals (0 label from matcher) as background (label=num_classes)
+ gt_classes[proposal_labels == 0] = self.num_classes
+ foreground = torch.ones_like(gt_classes)
+ foreground[proposal_labels == 0] = 0
+ gt_boxes = targets_per_image.gt_boxes[matched_idxs]
+ gt_object_descriptions = targets_per_image.gt_object_descriptions[matched_idxs]
+ else:
+ gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
+ foreground = torch.zeros_like(gt_classes)
+ gt_boxes = Boxes(
+ targets_per_image.gt_boxes.tensor.new_zeros((len(proposals_per_image), 4))
+ )
+ gt_object_descriptions = ObjDescription(['None' for i in range(len(proposals_per_image))])
+ proposals_per_image.gt_classes = gt_classes
+ proposals_per_image.gt_boxes = gt_boxes
+ proposals_per_image.gt_object_descriptions = gt_object_descriptions
+ proposals_per_image.foreground = foreground
+
+ num_fg_samples.append((proposal_labels == 1).sum().item())
+ num_bg_samples.append(proposal_labels.numel() - num_fg_samples[-1])
+
+ # Log the number of fg/bg samples in each stage
+ storage = get_event_storage()
+ storage.put_scalar(
+ "stage{}/roi_head/num_fg_samples".format(stage),
+ sum(num_fg_samples) / len(num_fg_samples),
+ )
+ storage.put_scalar(
+ "stage{}/roi_head/num_bg_samples".format(stage),
+ sum(num_bg_samples) / len(num_bg_samples),
+ )
+ return proposals
+
+ def fast_rcnn_inference_GRiT(
+ self,
+ boxes: List[torch.Tensor],
+ scores: List[torch.Tensor],
+ logits: List[torch.Tensor],
+ image_shapes: List[Tuple[int, int]],
+ score_thresh: float,
+ nms_thresh: float,
+ topk_per_image: int,
+ soft_nms_enabled: bool,
+ ):
+ result_per_image = [
+ self.fast_rcnn_inference_single_image_GRiT(
+ boxes_per_image, scores_per_image, logits_per_image, image_shape,
+ score_thresh, nms_thresh, topk_per_image, soft_nms_enabled
+ )
+ for scores_per_image, boxes_per_image, image_shape, logits_per_image \
+ in zip(scores, boxes, image_shapes, logits)
+ ]
+ return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
+
+ def fast_rcnn_inference_single_image_GRiT(
+ self,
+ boxes,
+ scores,
+ logits,
+ image_shape: Tuple[int, int],
+ score_thresh: float,
+ nms_thresh: float,
+ topk_per_image: int,
+ soft_nms_enabled,
+ ):
+ """
+ Add soft NMS to detectron2's fast_rcnn_inference_single_image
+ """
+ valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)
+ if not valid_mask.all():
+ boxes = boxes[valid_mask]
+ scores = scores[valid_mask]
+ logits = logits[valid_mask]
+
+ scores = scores[:, :-1]
+ logits = logits[:, :-1]
+ num_bbox_reg_classes = boxes.shape[1] // 4
+ # Convert to Boxes to use the `clip` function ...
+ boxes = Boxes(boxes.reshape(-1, 4))
+ boxes.clip(image_shape)
+ boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4
+
+ # 1. Filter results based on detection scores. It can make NMS more efficient
+ # by filtering out low-confidence detections.
+ filter_mask = scores > score_thresh # R x K
+ # R' x 2. First column contains indices of the R predictions;
+ # Second column contains indices of classes.
+ filter_inds = filter_mask.nonzero()
+ if num_bbox_reg_classes == 1:
+ boxes = boxes[filter_inds[:, 0], 0]
+ else:
+ boxes = boxes[filter_mask]
+ scores = scores[filter_mask]
+ logits = logits[filter_mask]
+
+ # 2. Apply NMS for each class independently.
+ if not soft_nms_enabled:
+ keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)
+ else:
+ keep, soft_nms_scores = batched_soft_nms(
+ boxes,
+ scores,
+ filter_inds[:, 1],
+ "linear",
+ 0.5,
+ nms_thresh,
+ 0.001,
+ )
+ scores[keep] = soft_nms_scores
+ if topk_per_image >= 0:
+ keep = keep[:topk_per_image]
+ boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
+ logits = logits[keep]
+
+ result = Instances(image_shape)
+ result.pred_boxes = Boxes(boxes)
+ result.scores = scores
+ result.pred_classes = filter_inds[:, 1]
+ result.logits = logits
+ return result, filter_inds[:, 0]
+
+ def _get_empty_mask_loss(self, device):
+ if self.mask_on:
+ return {'loss_mask': torch.zeros(
+ (1, ), device=device, dtype=torch.float32)[0]}
+ else:
+ return {}
+
+ def _create_proposals_from_boxes(self, boxes, image_sizes, logits):
+ boxes = [Boxes(b.detach()) for b in boxes]
+ proposals = []
+ for boxes_per_image, image_size, logit in zip(
+ boxes, image_sizes, logits):
+ boxes_per_image.clip(image_size)
+ if self.training:
+ inds = boxes_per_image.nonempty()
+ boxes_per_image = boxes_per_image[inds]
+ logit = logit[inds]
+ prop = Instances(image_size)
+ prop.proposal_boxes = boxes_per_image
+ prop.objectness_logits = logit
+ proposals.append(prop)
+ return proposals
+
+ def _run_stage(self, features, proposals, stage):
+ pool_boxes = [x.proposal_boxes for x in proposals]
+ box_features = self.box_pooler(features, pool_boxes)
+ box_features = _ScaleGradient.apply(box_features, 1.0 / self.num_cascade_stages)
+ box_features = self.box_head[stage](box_features)
+ return self.box_predictor[stage](box_features)
diff --git a/model/vision/grit_src/grit/modeling/soft_nms.py b/model/vision/grit_src/grit/modeling/soft_nms.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a5aae7c4261191b8e07e0fd25055d8917f7f97d
--- /dev/null
+++ b/model/vision/grit_src/grit/modeling/soft_nms.py
@@ -0,0 +1,177 @@
+import torch
+
+from detectron2.structures import Boxes, RotatedBoxes, pairwise_iou, pairwise_iou_rotated
+
+
+def soft_nms(boxes, scores, method, gaussian_sigma, linear_threshold, prune_threshold):
+ """
+ Performs soft non-maximum suppression algorithm on axis aligned boxes
+
+ Args:
+ boxes (Tensor[N, 5]):
+ boxes where NMS will be performed. They
+ are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format
+ scores (Tensor[N]):
+ scores for each one of the boxes
+ method (str):
+ one of ['gaussian', 'linear', 'hard']
+ see paper for details. users encouraged not to use "hard", as this is the
+ same nms available elsewhere in detectron2
+ gaussian_sigma (float):
+ parameter for Gaussian penalty function
+ linear_threshold (float):
+ iou threshold for applying linear decay. Nt from the paper
+ re-used as threshold for standard "hard" nms
+ prune_threshold (float):
+ boxes with scores below this threshold are pruned at each iteration.
+ Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
+
+ Returns:
+ tuple(Tensor, Tensor):
+ [0]: int64 tensor with the indices of the elements that have been kept
+ by Soft NMS, sorted in decreasing order of scores
+ [1]: float tensor with the re-scored scores of the elements that were kept
+"""
+ return _soft_nms(
+ Boxes,
+ pairwise_iou,
+ boxes,
+ scores,
+ method,
+ gaussian_sigma,
+ linear_threshold,
+ prune_threshold,
+ )
+
+
+def batched_soft_nms(
+ boxes, scores, idxs, method, gaussian_sigma, linear_threshold, prune_threshold
+):
+ """
+ Performs soft non-maximum suppression in a batched fashion.
+
+ Each index value correspond to a category, and NMS
+ will not be applied between elements of different categories.
+
+ Args:
+ boxes (Tensor[N, 4]):
+ boxes where NMS will be performed. They
+ are expected to be in (x1, y1, x2, y2) format
+ scores (Tensor[N]):
+ scores for each one of the boxes
+ idxs (Tensor[N]):
+ indices of the categories for each one of the boxes.
+ method (str):
+ one of ['gaussian', 'linear', 'hard']
+ see paper for details. users encouraged not to use "hard", as this is the
+ same nms available elsewhere in detectron2
+ gaussian_sigma (float):
+ parameter for Gaussian penalty function
+ linear_threshold (float):
+ iou threshold for applying linear decay. Nt from the paper
+ re-used as threshold for standard "hard" nms
+ prune_threshold (float):
+ boxes with scores below this threshold are pruned at each iteration.
+ Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
+ Returns:
+ tuple(Tensor, Tensor):
+ [0]: int64 tensor with the indices of the elements that have been kept
+ by Soft NMS, sorted in decreasing order of scores
+ [1]: float tensor with the re-scored scores of the elements that were kept
+ """
+ if boxes.numel() == 0:
+ return (
+ torch.empty((0,), dtype=torch.int64, device=boxes.device),
+ torch.empty((0,), dtype=torch.float32, device=scores.device),
+ )
+ # strategy: in order to perform NMS independently per class.
+ # we add an offset to all the boxes. The offset is dependent
+ # only on the class idx, and is large enough so that boxes
+ # from different classes do not overlap
+ max_coordinate = boxes.max()
+ offsets = idxs.to(boxes) * (max_coordinate + 1)
+ boxes_for_nms = boxes + offsets[:, None]
+ return soft_nms(
+ boxes_for_nms, scores, method, gaussian_sigma, linear_threshold, prune_threshold
+ )
+
+
+def _soft_nms(
+ box_class,
+ pairwise_iou_func,
+ boxes,
+ scores,
+ method,
+ gaussian_sigma,
+ linear_threshold,
+ prune_threshold,
+):
+ """
+ Soft non-max suppression algorithm.
+
+ Implementation of [Soft-NMS -- Improving Object Detection With One Line of Codec]
+ (https://arxiv.org/abs/1704.04503)
+
+ Args:
+ box_class (cls): one of Box, RotatedBoxes
+ pairwise_iou_func (func): one of pairwise_iou, pairwise_iou_rotated
+ boxes (Tensor[N, ?]):
+ boxes where NMS will be performed
+ if Boxes, in (x1, y1, x2, y2) format
+ if RotatedBoxes, in (x_ctr, y_ctr, width, height, angle_degrees) format
+ scores (Tensor[N]):
+ scores for each one of the boxes
+ method (str):
+ one of ['gaussian', 'linear', 'hard']
+ see paper for details. users encouraged not to use "hard", as this is the
+ same nms available elsewhere in detectron2
+ gaussian_sigma (float):
+ parameter for Gaussian penalty function
+ linear_threshold (float):
+ iou threshold for applying linear decay. Nt from the paper
+ re-used as threshold for standard "hard" nms
+ prune_threshold (float):
+ boxes with scores below this threshold are pruned at each iteration.
+ Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
+
+ Returns:
+ tuple(Tensor, Tensor):
+ [0]: int64 tensor with the indices of the elements that have been kept
+ by Soft NMS, sorted in decreasing order of scores
+ [1]: float tensor with the re-scored scores of the elements that were kept
+ """
+ boxes = boxes.clone()
+ scores = scores.clone()
+ idxs = torch.arange(scores.size()[0])
+
+ idxs_out = []
+ scores_out = []
+
+ while scores.numel() > 0:
+ top_idx = torch.argmax(scores)
+ idxs_out.append(idxs[top_idx].item())
+ scores_out.append(scores[top_idx].item())
+
+ top_box = boxes[top_idx]
+ ious = pairwise_iou_func(box_class(top_box.unsqueeze(0)), box_class(boxes))[0]
+
+ if method == "linear":
+ decay = torch.ones_like(ious)
+ decay_mask = ious > linear_threshold
+ decay[decay_mask] = 1 - ious[decay_mask]
+ elif method == "gaussian":
+ decay = torch.exp(-torch.pow(ious, 2) / gaussian_sigma)
+ elif method == "hard": # standard NMS
+ decay = (ious < linear_threshold).float()
+ else:
+ raise NotImplementedError("{} soft nms method not implemented.".format(method))
+
+ scores *= decay
+ keep = scores > prune_threshold
+ keep[top_idx] = False
+
+ boxes = boxes[keep]
+ scores = scores[keep]
+ idxs = idxs[keep]
+
+ return torch.tensor(idxs_out).to(boxes.device), torch.tensor(scores_out).to(scores.device)
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/modeling/text/file_utils.py b/model/vision/grit_src/grit/modeling/text/file_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..51918cf3857471e4ffb5b617d73ee8b9eed0989e
--- /dev/null
+++ b/model/vision/grit_src/grit/modeling/text/file_utils.py
@@ -0,0 +1,256 @@
+# Utilities for working with the local dataset cache.
+# This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
+# Copyright by the AllenNLP authors.
+
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import sys
+import json
+import logging
+import os
+import shutil
+import tempfile
+import fnmatch
+from functools import wraps
+from hashlib import sha256
+from io import open
+
+import boto3
+import requests
+from botocore.exceptions import ClientError
+from tqdm import tqdm
+
+try:
+ from torch.hub import _get_torch_home
+ torch_cache_home = _get_torch_home()
+except ImportError:
+ torch_cache_home = os.path.expanduser(
+ os.getenv('TORCH_HOME', os.path.join(
+ os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
+default_cache_path = os.path.join(torch_cache_home, 'pytorch_transformers')
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+try:
+ from pathlib import Path
+ PYTORCH_PRETRAINED_BERT_CACHE = Path(
+ os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path))
+except (AttributeError, ImportError):
+ PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
+ default_cache_path)
+
+logger = logging.getLogger(__name__) # pylint: disable=invalid-name
+
+
+def url_to_filename(url, etag=None):
+ """
+ Convert `url` into a hashed filename in a repeatable way.
+ If `etag` is specified, append its hash to the url's, delimited
+ by a period.
+ """
+ url_bytes = url.encode('utf-8')
+ url_hash = sha256(url_bytes)
+ filename = url_hash.hexdigest()
+
+ if etag:
+ etag_bytes = etag.encode('utf-8')
+ etag_hash = sha256(etag_bytes)
+ filename += '.' + etag_hash.hexdigest()
+
+ return filename
+
+
+def filename_to_url(filename, cache_dir=None):
+ """
+ Return the url and etag (which may be ``None``) stored for `filename`.
+ Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
+ """
+ if cache_dir is None:
+ cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
+ if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
+ cache_dir = str(cache_dir)
+
+ cache_path = os.path.join(cache_dir, filename)
+ if not os.path.exists(cache_path):
+ raise EnvironmentError("file {} not found".format(cache_path))
+
+ meta_path = cache_path + '.json'
+ if not os.path.exists(meta_path):
+ raise EnvironmentError("file {} not found".format(meta_path))
+
+ with open(meta_path, encoding="utf-8") as meta_file:
+ metadata = json.load(meta_file)
+ url = metadata['url']
+ etag = metadata['etag']
+
+ return url, etag
+
+
+def cached_path(url_or_filename, cache_dir=None):
+ """
+ Given something that might be a URL (or might be a local path),
+ determine which. If it's a URL, download the file and cache it, and
+ return the path to the cached file. If it's already a local path,
+ make sure the file exists and then return the path.
+ """
+ if cache_dir is None:
+ cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
+ if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
+ url_or_filename = str(url_or_filename)
+ if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
+ cache_dir = str(cache_dir)
+
+ parsed = urlparse(url_or_filename)
+
+ if parsed.scheme in ('http', 'https', 's3'):
+ # URL, so get it from the cache (downloading if necessary)
+ return get_from_cache(url_or_filename, cache_dir)
+ elif os.path.exists(url_or_filename):
+ # File, and it exists.
+ return url_or_filename
+ elif parsed.scheme == '':
+ # File, but it doesn't exist.
+ raise EnvironmentError("file {} not found".format(url_or_filename))
+ else:
+ # Something unknown
+ raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
+
+
+def split_s3_path(url):
+ """Split a full s3 path into the bucket name and path."""
+ parsed = urlparse(url)
+ if not parsed.netloc or not parsed.path:
+ raise ValueError("bad s3 path {}".format(url))
+ bucket_name = parsed.netloc
+ s3_path = parsed.path
+ # Remove '/' at beginning of path.
+ if s3_path.startswith("/"):
+ s3_path = s3_path[1:]
+ return bucket_name, s3_path
+
+
+def s3_request(func):
+ """
+ Wrapper function for s3 requests in order to create more helpful error
+ messages.
+ """
+
+ @wraps(func)
+ def wrapper(url, *args, **kwargs):
+ try:
+ return func(url, *args, **kwargs)
+ except ClientError as exc:
+ if int(exc.response["Error"]["Code"]) == 404:
+ raise EnvironmentError("file {} not found".format(url))
+ else:
+ raise
+
+ return wrapper
+
+
+@s3_request
+def s3_etag(url):
+ """Check ETag on S3 object."""
+ s3_resource = boto3.resource("s3")
+ bucket_name, s3_path = split_s3_path(url)
+ s3_object = s3_resource.Object(bucket_name, s3_path)
+ return s3_object.e_tag
+
+
+@s3_request
+def s3_get(url, temp_file):
+ """Pull a file directly from S3."""
+ s3_resource = boto3.resource("s3")
+ bucket_name, s3_path = split_s3_path(url)
+ s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
+
+
+def http_get(url, temp_file):
+ req = requests.get(url, stream=True)
+ content_length = req.headers.get('Content-Length')
+ total = int(content_length) if content_length is not None else None
+ progress = tqdm(unit="B", total=total)
+ for chunk in req.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
+ progress.update(len(chunk))
+ temp_file.write(chunk)
+ progress.close()
+
+
+def get_from_cache(url, cache_dir=None):
+ """
+ Given a URL, look for the corresponding dataset in the local cache.
+ If it's not there, download it. Then return the path to the cached file.
+ """
+ if cache_dir is None:
+ cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
+ if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
+ cache_dir = str(cache_dir)
+ if sys.version_info[0] == 2 and not isinstance(cache_dir, str):
+ cache_dir = str(cache_dir)
+
+ if not os.path.exists(cache_dir):
+ os.makedirs(cache_dir)
+
+ # Get eTag to add to filename, if it exists.
+ if url.startswith("s3://"):
+ etag = s3_etag(url)
+ else:
+ try:
+ response = requests.head(url, allow_redirects=True)
+ if response.status_code != 200:
+ etag = None
+ else:
+ etag = response.headers.get("ETag")
+ except EnvironmentError:
+ etag = None
+
+ if sys.version_info[0] == 2 and etag is not None:
+ etag = etag.decode('utf-8')
+ filename = url_to_filename(url, etag)
+
+ # get cache path to put the file
+ cache_path = os.path.join(cache_dir, filename)
+
+ # If we don't have a connection (etag is None) and can't identify the file
+ # try to get the last downloaded one
+ if not os.path.exists(cache_path) and etag is None:
+ matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
+ matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
+ if matching_files:
+ cache_path = os.path.join(cache_dir, matching_files[-1])
+
+ if not os.path.exists(cache_path):
+ # Download to temporary file, then copy to cache dir once finished.
+ # Otherwise you get corrupt cache entries if the download gets interrupted.
+ with tempfile.NamedTemporaryFile() as temp_file:
+ logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
+
+ # GET file object
+ if url.startswith("s3://"):
+ s3_get(url, temp_file)
+ else:
+ http_get(url, temp_file)
+
+ # we are copying the file before closing it, so flush to avoid truncation
+ temp_file.flush()
+ # shutil.copyfileobj() starts at the current position, so go to the start
+ temp_file.seek(0)
+
+ logger.info("copying %s to cache at %s", temp_file.name, cache_path)
+ with open(cache_path, 'wb') as cache_file:
+ shutil.copyfileobj(temp_file, cache_file)
+
+ logger.info("creating metadata file for %s", cache_path)
+ meta = {'url': url, 'etag': etag}
+ meta_path = cache_path + '.json'
+ with open(meta_path, 'w') as meta_file:
+ output_string = json.dumps(meta)
+ meta_file.write(output_string)
+
+ logger.info("removing temp file %s", temp_file.name)
+
+ return cache_path
diff --git a/model/vision/grit_src/grit/modeling/text/load_text_token.py b/model/vision/grit_src/grit/modeling/text/load_text_token.py
new file mode 100644
index 0000000000000000000000000000000000000000..8491021bf5d7d23d7f3826395f270dccad30df36
--- /dev/null
+++ b/model/vision/grit_src/grit/modeling/text/load_text_token.py
@@ -0,0 +1,80 @@
+import torch
+
+
+class LoadTextTokens(object):
+ def __init__(self, tokenizer, max_text_len=40, padding='do_not_pad'):
+ self.tokenizer = tokenizer
+ self.max_text_len = max_text_len
+ self.padding = padding
+
+ def descriptions_to_text_tokens(self, target, begin_token):
+ target_encoding = self.tokenizer(
+ target, padding=self.padding,
+ add_special_tokens=False,
+ truncation=True, max_length=self.max_text_len)
+
+ need_predict = [1] * len(target_encoding['input_ids'])
+ payload = target_encoding['input_ids']
+ if len(payload) > self.max_text_len - 2:
+ payload = payload[-(self.max_text_len - 2):]
+ need_predict = payload[-(self.max_text_len - 2):]
+
+ input_ids = [begin_token] + payload + [self.tokenizer.sep_token_id]
+
+ need_predict = [0] + need_predict + [1]
+ data = {
+ 'text_tokens': torch.tensor(input_ids),
+ 'text_lengths': len(input_ids),
+ 'need_predict': torch.tensor(need_predict),
+ }
+
+ return data
+
+ def __call__(self, object_descriptions, box_features, begin_token):
+ text_tokens = []
+ text_lengths = []
+ need_predict = []
+ for description in object_descriptions:
+ tokens = self.descriptions_to_text_tokens(description, begin_token)
+ text_tokens.append(tokens['text_tokens'])
+ text_lengths.append(tokens['text_lengths'])
+ need_predict.append(tokens['need_predict'])
+
+ text_tokens = torch.cat(self.collate(text_tokens), dim=0).to(box_features.device)
+ text_lengths = torch.tensor(text_lengths).to(box_features.device)
+ need_predict = torch.cat(self.collate(need_predict), dim=0).to(box_features.device)
+
+ assert text_tokens.dim() == 2 and need_predict.dim() == 2
+ data = {'text_tokens': text_tokens,
+ 'text_lengths': text_lengths,
+ 'need_predict': need_predict}
+
+ return data
+
+ def collate(self, batch):
+ if all(isinstance(b, torch.Tensor) for b in batch) and len(batch) > 0:
+ if not all(b.shape == batch[0].shape for b in batch[1:]):
+ assert all(len(b.shape) == len(batch[0].shape) for b in batch[1:])
+ shape = torch.tensor([b.shape for b in batch])
+ max_shape = tuple(shape.max(dim=0)[0].tolist())
+ batch2 = []
+ for b in batch:
+ if any(c < m for c, m in zip(b.shape, max_shape)):
+ b2 = torch.zeros(max_shape, dtype=b.dtype, device=b.device)
+ if b.dim() == 1:
+ b2[:b.shape[0]] = b
+ elif b.dim() == 2:
+ b2[:b.shape[0], :b.shape[1]] = b
+ elif b.dim() == 3:
+ b2[:b.shape[0], :b.shape[1], :b.shape[2]] = b
+ else:
+ raise NotImplementedError
+ b = b2
+ batch2.append(b[None, ...])
+ else:
+ batch2 = []
+ for b in batch:
+ batch2.append(b[None, ...])
+ return batch2
+ else:
+ raise NotImplementedError
diff --git a/model/vision/grit_src/grit/modeling/text/modeling_bert.py b/model/vision/grit_src/grit/modeling/text/modeling_bert.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f8bf2d5d7552ee6c314da86a19a56eb0bdaa03e
--- /dev/null
+++ b/model/vision/grit_src/grit/modeling/text/modeling_bert.py
@@ -0,0 +1,529 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch BERT model. """
+# Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_bert.py
+
+from __future__ import absolute_import, division, print_function, unicode_literals
+import copy
+import os
+import json
+import logging
+import math
+import sys
+from io import open
+import torch
+from torch import nn
+import torch.utils.checkpoint as checkpoint
+from .file_utils import cached_path
+
+
+logger = logging.getLogger()
+
+
+BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
+ 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
+ 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
+ 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
+ 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
+ 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
+ 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
+ 'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
+ 'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
+ 'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
+ 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
+ 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
+ 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
+}
+
+
+def qk2attn(query, key, attention_mask, gamma):
+ query = query / gamma
+ attention_scores = torch.matmul(query, key.transpose(-1, -2))
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
+ attention_scores = attention_scores + attention_mask
+ return attention_scores.softmax(dim=-1)
+
+
+class QK2Attention(nn.Module):
+ def forward(self, query, key, attention_mask, gamma):
+ return qk2attn(query, key, attention_mask, gamma)
+
+
+LayerNormClass = torch.nn.LayerNorm
+
+
+class BertSelfAttention(nn.Module):
+ def __init__(self, config):
+ super(BertSelfAttention, self).__init__()
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ "The hidden size (%d) is not a multiple of the number of attention "
+ "heads (%d)" % (config.hidden_size, config.num_attention_heads))
+ self.output_attentions = config.output_attentions
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.softmax = nn.Softmax(dim=-1)
+ self.qk2attn = QK2Attention()
+
+ def transpose_for_scores(self, x):
+ if torch._C._get_tracing_state():
+ # exporter is not smart enough to detect dynamic size for some paths
+ x = x.view(x.shape[0], -1, self.num_attention_heads, self.attention_head_size)
+ else:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(self, hidden_states, attention_mask, head_mask=None,
+ history_state=None):
+ if history_state is not None:
+ x_states = torch.cat([history_state, hidden_states], dim=1)
+ mixed_query_layer = self.query(hidden_states)
+ mixed_key_layer = self.key(x_states)
+ mixed_value_layer = self.value(x_states)
+ else:
+ mixed_query_layer = self.query(hidden_states)
+ mixed_key_layer = self.key(hidden_states)
+ mixed_value_layer = self.value(hidden_states)
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+ key_layer = self.transpose_for_scores(mixed_key_layer)
+ value_layer = self.transpose_for_scores(mixed_value_layer)
+
+ attention_probs = self.qk2attn(query_layer, key_layer, attention_mask, math.sqrt(self.attention_head_size))
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(*new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
+ return outputs
+
+
+class BertSelfOutput(nn.Module):
+ def __init__(self, config):
+ super(BertSelfOutput, self).__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.pre_norm = hasattr(config, 'pre_norm') and config.pre_norm
+ if not self.pre_norm:
+ self.LayerNorm = LayerNormClass(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ if not self.pre_norm:
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ else:
+ hidden_states = hidden_states + input_tensor
+ return hidden_states
+
+
+class BertAttention(nn.Module):
+ def __init__(self, config):
+ super(BertAttention, self).__init__()
+ self.pre_norm = hasattr(config, 'pre_norm') and config.pre_norm
+ if self.pre_norm:
+ self.LayerNorm = LayerNormClass(config.hidden_size, eps=config.layer_norm_eps)
+ self.self = BertSelfAttention(config)
+ self.output = BertSelfOutput(config)
+
+ def forward(self, input_tensor, attention_mask, head_mask=None,
+ history_state=None):
+ if self.pre_norm:
+ self_outputs = self.self(self.LayerNorm(input_tensor), attention_mask, head_mask,
+ self.layerNorm(history_state) if history_state else history_state)
+ else:
+ self_outputs = self.self(input_tensor, attention_mask, head_mask,
+ history_state)
+ attention_output = self.output(self_outputs[0], input_tensor)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class BertIntermediate(nn.Module):
+ def __init__(self, config):
+ super(BertIntermediate, self).__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ assert config.hidden_act == 'gelu', 'Please implement other activation functions'
+ self.intermediate_act_fn = _gelu_python
+
+ def forward(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+class BertOutput(nn.Module):
+ def __init__(self, config):
+ super(BertOutput, self).__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.pre_norm = hasattr(config, 'pre_norm') and config.pre_norm
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ if not self.pre_norm:
+ self.LayerNorm = LayerNormClass(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ if not self.pre_norm:
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ else:
+ hidden_states = hidden_states + input_tensor
+ return hidden_states
+
+
+class Mlp(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.pre_norm = hasattr(config, 'pre_norm') and config.pre_norm
+ self.intermediate = BertIntermediate(config)
+ if self.pre_norm:
+ self.LayerNorm = LayerNormClass(config.hidden_size, eps=config.layer_norm_eps)
+ self.output = BertOutput(config)
+
+ def forward(self, attention_output):
+ if not self.pre_norm:
+ intermediate_output = self.intermediate(attention_output)
+ else:
+ intermediate_output = self.intermediate(self.LayerNorm(attention_output))
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+class BertLayer(nn.Module):
+ def __init__(self, config, use_act_checkpoint=True):
+ super(BertLayer, self).__init__()
+ self.pre_norm = hasattr(config, 'pre_norm') and config.pre_norm
+ self.use_mlp_wrapper = hasattr(config, 'use_mlp_wrapper') and config.use_mlp_wrapper
+ self.attention = BertAttention(config)
+ self.use_act_checkpoint = use_act_checkpoint
+ if self.use_mlp_wrapper:
+ self.mlp = Mlp(config)
+ else:
+ self.intermediate = BertIntermediate(config)
+ if self.pre_norm:
+ self.LayerNorm = LayerNormClass(config.hidden_size, eps=config.layer_norm_eps)
+ self.output = BertOutput(config)
+
+ def forward(self, hidden_states, attention_mask, head_mask=None,
+ history_state=None):
+ if self.use_act_checkpoint:
+ attention_outputs = checkpoint.checkpoint(self.attention, hidden_states,
+ attention_mask, head_mask, history_state)
+ else:
+ attention_outputs = self.attention(hidden_states, attention_mask,
+ head_mask, history_state)
+ attention_output = attention_outputs[0]
+ if self.use_mlp_wrapper:
+ layer_output = self.mlp(attention_output)
+ else:
+ if not self.pre_norm:
+ intermediate_output = self.intermediate(attention_output)
+ else:
+ intermediate_output = self.intermediate(self.LayerNorm(attention_output))
+ layer_output = self.output(intermediate_output, attention_output)
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class BertEncoder(nn.Module):
+ def __init__(self, config, use_act_checkpoint=True):
+ super(BertEncoder, self).__init__()
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.layer = nn.ModuleList([BertLayer(config, use_act_checkpoint=use_act_checkpoint) for _ in range(config.num_hidden_layers)])
+ self.pre_norm = hasattr(config, 'pre_norm') and config.pre_norm
+ if self.pre_norm:
+ self.LayerNorm = LayerNormClass(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states, attention_mask, head_mask=None,
+ encoder_history_states=None):
+ all_hidden_states = ()
+ all_attentions = ()
+ for i, layer_module in enumerate(self.layer):
+ if self.output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ history_state = None if encoder_history_states is None else encoder_history_states[i]
+ layer_outputs = layer_module(
+ hidden_states, attention_mask,
+ (None if head_mask is None else head_mask[i]),
+ history_state,
+ )
+ hidden_states = layer_outputs[0]
+
+ if self.output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+ if self.pre_norm:
+ hidden_states = self.LayerNorm(hidden_states)
+ outputs = (hidden_states,)
+ if self.output_hidden_states:
+ outputs = outputs + (all_hidden_states,)
+ if self.output_attentions:
+ outputs = outputs + (all_attentions,)
+ return outputs
+
+CONFIG_NAME = "config.json"
+
+class PretrainedConfig(object):
+ """ Base class for all configuration classes.
+ Handle a few common parameters and methods for loading/downloading/saving configurations.
+ """
+ pretrained_config_archive_map = {}
+
+ def __init__(self, **kwargs):
+ self.finetuning_task = kwargs.pop('finetuning_task', None)
+ self.num_labels = kwargs.pop('num_labels', 2)
+ self.output_attentions = kwargs.pop('output_attentions', False)
+ self.output_hidden_states = kwargs.pop('output_hidden_states', False)
+ self.torchscript = kwargs.pop('torchscript', False)
+
+ def save_pretrained(self, save_directory):
+ """ Save a configuration object to a directory, so that it
+ can be re-loaded using the `from_pretrained(save_directory)` class method.
+ """
+ assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved"
+
+ # If we save using the predefined names, we can load using `from_pretrained`
+ output_config_file = os.path.join(save_directory, CONFIG_NAME)
+
+ self.to_json_file(output_config_file)
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
+ r""" Instantiate a PretrainedConfig from a pre-trained model configuration.
+
+ Params:
+ **pretrained_model_name_or_path**: either:
+ - a string with the `shortcut name` of a pre-trained model configuration to load from cache
+ or download and cache if not already stored in cache (e.g. 'bert-base-uncased').
+ - a path to a `directory` containing a configuration file saved
+ using the `save_pretrained(save_directory)` method.
+ - a path or url to a saved configuration `file`.
+ **cache_dir**: (`optional`) string:
+ Path to a directory in which a downloaded pre-trained model
+ configuration should be cached if the standard cache should not be used.
+ **return_unused_kwargs**: (`optional`) bool:
+ - If False, then this function returns just the final configuration object.
+ - If True, then this functions returns a tuple `(config, unused_kwargs)` where `unused_kwargs`
+ is a dictionary consisting of the key/value pairs whose keys are not configuration attributes:
+ ie the part of kwargs which has not been used to update `config` and is otherwise ignored.
+ **kwargs**: (`optional`) dict:
+ Dictionary of key/value pairs with which to update the configuration object after loading.
+ - The values in kwargs of any keys which are configuration attributes will be used
+ to override the loaded values.
+ - Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
+ by the `return_unused_kwargs` keyword parameter.
+
+ Examples::
+
+ >>> config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
+ >>> config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
+ >>> config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
+ >>> config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)
+ >>> assert config.output_attention == True
+ >>> config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,
+ >>> foo=False, return_unused_kwargs=True)
+ >>> assert config.output_attention == True
+ >>> assert unused_kwargs == {'foo': False}
+
+ """
+ cache_dir = kwargs.pop('cache_dir', None)
+ return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)
+
+ if pretrained_model_name_or_path in cls.pretrained_config_archive_map:
+ config_file = cls.pretrained_config_archive_map[pretrained_model_name_or_path]
+ elif os.path.isdir(pretrained_model_name_or_path):
+ config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
+ else:
+ config_file = pretrained_model_name_or_path
+ # redirect to the cache, if necessary
+ try:
+ resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
+ except EnvironmentError:
+ if pretrained_model_name_or_path in cls.pretrained_config_archive_map:
+ logger.error(
+ "Couldn't reach server at '{}' to download pretrained model configuration file.".format(
+ config_file))
+ else:
+ logger.error(
+ "Model name '{}' was not found in model name list ({}). "
+ "We assumed '{}' was a path or url but couldn't find any file "
+ "associated to this path or url.".format(
+ pretrained_model_name_or_path,
+ ', '.join(cls.pretrained_config_archive_map.keys()),
+ config_file))
+ return None
+ if resolved_config_file == config_file:
+ logger.info("loading configuration file {}".format(config_file))
+ else:
+ logger.info("loading configuration file {} from cache at {}".format(
+ config_file, resolved_config_file))
+
+ # Load config
+ config = cls.from_json_file(resolved_config_file)
+
+ # Update config with kwargs if needed
+ to_remove = []
+ for key, value in kwargs.items():
+ if hasattr(config, key):
+ setattr(config, key, value)
+ to_remove.append(key)
+ # add img_layer_norm_eps, use_img_layernorm
+ if "img_layer_norm_eps" in kwargs:
+ setattr(config, "img_layer_norm_eps", kwargs["img_layer_norm_eps"])
+ to_remove.append("img_layer_norm_eps")
+ if "use_img_layernorm" in kwargs:
+ setattr(config, "use_img_layernorm", kwargs["use_img_layernorm"])
+ to_remove.append("use_img_layernorm")
+ for key in to_remove:
+ kwargs.pop(key, None)
+
+ logger.info("Model config %s", config)
+ if return_unused_kwargs:
+ return config, kwargs
+ else:
+ return config
+
+ @classmethod
+ def from_dict(cls, json_object):
+ """Constructs a `Config` from a Python dictionary of parameters."""
+ config = cls(vocab_size_or_config_json_file=-1)
+ for key, value in json_object.items():
+ config.__dict__[key] = value
+ return config
+
+ @classmethod
+ def from_json_file(cls, json_file):
+ """Constructs a `BertConfig` from a json file of parameters."""
+ with open(json_file, "r", encoding='utf-8') as reader:
+ text = reader.read()
+ return cls.from_dict(json.loads(text))
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+ def __repr__(self):
+ return str(self.to_json_string())
+
+ def to_dict(self):
+ """Serializes this instance to a Python dictionary."""
+ output = copy.deepcopy(self.__dict__)
+ return output
+
+ def to_json_string(self):
+ """Serializes this instance to a JSON string."""
+ return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
+
+ def to_json_file(self, json_file_path):
+ """ Save this instance to a json file."""
+ with open(json_file_path, "w", encoding='utf-8') as writer:
+ writer.write(self.to_json_string())
+
+
+class BertConfig(PretrainedConfig):
+ r"""
+ :class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a
+ `BertModel`.
+
+
+ Arguments:
+ vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
+ hidden_size: Size of the encoder layers and the pooler layer.
+ num_hidden_layers: Number of hidden layers in the Transformer encoder.
+ num_attention_heads: Number of attention heads for each attention layer in
+ the Transformer encoder.
+ intermediate_size: The size of the "intermediate" (i.e., feed-forward)
+ layer in the Transformer encoder.
+ hidden_act: The non-linear activation function (function or string) in the
+ encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
+ hidden_dropout_prob: The dropout probabilitiy for all fully connected
+ layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob: The dropout ratio for the attention
+ probabilities.
+ max_position_embeddings: The maximum sequence length that this model might
+ ever be used with. Typically set this to something large just in case
+ (e.g., 512 or 1024 or 2048).
+ type_vocab_size: The vocabulary size of the `token_type_ids` passed into
+ `BertModel`.
+ initializer_range: The sttdev of the truncated_normal_initializer for
+ initializing all weight matrices.
+ layer_norm_eps: The epsilon used by LayerNorm.
+ """
+ pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
+
+ def __init__(self,
+ vocab_size_or_config_json_file=30522,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ **kwargs):
+ super(BertConfig, self).__init__(**kwargs)
+ if isinstance(vocab_size_or_config_json_file, str):
+ with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
+ json_config = json.loads(reader.read())
+ for key, value in json_config.items():
+ self.__dict__[key] = value
+ elif isinstance(vocab_size_or_config_json_file, int):
+ self.vocab_size = vocab_size_or_config_json_file
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ else:
+ raise ValueError("First argument must be either a vocabulary size (int)"
+ "or the path to a pretrained model config file (str)")
+
+
+def _gelu_python(x):
+
+ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
\ No newline at end of file
diff --git a/model/vision/grit_src/grit/modeling/text/text_decoder.py b/model/vision/grit_src/grit/modeling/text/text_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..071baa7a52d21d7132cc492f070cba066d17aa43
--- /dev/null
+++ b/model/vision/grit_src/grit/modeling/text/text_decoder.py
@@ -0,0 +1,672 @@
+# Modified by Jialian Wu from
+# https://github.com/microsoft/GenerativeImage2Text/blob/main/generativeimage2text/layers/decoder.py
+# and https://github.com/kdexd/virtex
+from torch import nn
+import torch
+import functools
+from torch.nn import functional as F
+import warnings
+
+
+class TextualHead(nn.Module):
+ def __init__(self,
+ visual_feature_size: int, vocab_size: int, hidden_size: int):
+ super().__init__()
+ self.visual_feature_size = visual_feature_size
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+
+ @property
+ def textual_feature_size(self):
+ return self.hidden_size
+
+
+class WordAndPositionalEmbedding(nn.Module):
+ def __init__(
+ self,
+ vocab_size: int,
+ hidden_size: int,
+ dropout: float = 0.0,
+ max_caption_length: int = 30,
+ padding_idx: int = 0,
+ ):
+ super().__init__()
+ self.vocab_size = vocab_size
+ self.padding_idx = padding_idx
+
+ #self.words = nn.Embedding(vocab_size, hidden_size, padding_idx=padding_idx)
+ self.words = nn.Embedding(vocab_size, hidden_size)
+
+ # We provide no "padding index" for positional embeddings. We zero out
+ # the positional embeddings of padded positions as a post-processing.
+ self.positions = nn.Embedding(max_caption_length, hidden_size)
+ self.layer_norm = nn.LayerNorm(
+ hidden_size, eps=1e-8, elementwise_affine=True
+ )
+ self.dropout = nn.Dropout(p=dropout)
+
+ def forward(self, tokens: torch.Tensor):
+ position_indices = self._create_position_indices(tokens)
+
+ # shape: (batch_size, max_caption_length, hidden_size)
+ word_embeddings = self.words(tokens)
+ position_embeddings = self.positions(position_indices)
+
+ # shape: (batch_size, max_caption_length, hidden_size)
+ embeddings = self.layer_norm(word_embeddings + position_embeddings)
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+ @functools.lru_cache(maxsize=128)
+ def _create_position_indices(self, tokens: torch.Tensor):
+
+ # Create position indices of the same size as token indices.
+ batch_size, max_caption_length = tokens.size()
+ positions = torch.arange(
+ max_caption_length, dtype=tokens.dtype, device=tokens.device
+ )
+ # shape: (batch_size, max_caption_length)
+ positions = positions.unsqueeze(0).expand(batch_size, max_caption_length)
+ return positions
+
+
+class BertEncoderAsDecoder(nn.Module):
+ def __init__(self, encoder):
+ super().__init__()
+ self.encoder = encoder
+
+ def forward(self, tgt, memory,
+ tgt_mask=None,
+ tgt_key_padding_mask=None,
+ memory_key_padding_mask=None,
+ tgt_bi_valid_mask=None,
+ encoder_history_states=None,
+ ):
+ assert tgt_key_padding_mask is None, 'not supported'
+ assert tgt_mask.dim() == 2
+ assert tgt_mask.shape[0] == tgt_mask.shape[1]
+ # tgt_mask should always be 0/negative infinity
+ tgt = tgt.transpose(0, 1)
+ memory = memory.transpose(0, 1)
+
+ hidden_states = torch.cat((memory, tgt), dim=1)
+ num_tgt = tgt.shape[1]
+ num_memory = memory.shape[1]
+ device = tgt.device
+ dtype = tgt.dtype
+ top_left = torch.zeros((num_memory, num_memory), device=device, dtype=dtype)
+ top_right = torch.full((num_memory, num_tgt), float('-inf'), device=tgt.device, dtype=dtype,)
+ bottom_left = torch.zeros((num_tgt, num_memory), dtype=dtype, device=tgt_mask.device,)
+ left = torch.cat((top_left, bottom_left), dim=0)
+ right = torch.cat((top_right, tgt_mask.to(dtype)), dim=0)
+
+ full_attention_mask = torch.cat((left, right), dim=1)[None, :]
+
+ if memory_key_padding_mask is None:
+ memory_key_padding_mask = torch.full((memory.shape[0], memory.shape[1]), fill_value=False, device=device)
+ # if it is False, it means valid. That is, it is not a padding
+ assert memory_key_padding_mask.dtype == torch.bool
+ zero_negative_infinity = torch.zeros_like(memory_key_padding_mask, dtype=tgt.dtype)
+ zero_negative_infinity[memory_key_padding_mask] = float('-inf')
+ full_attention_mask = full_attention_mask.expand((memory_key_padding_mask.shape[0], num_memory + num_tgt, num_memory + num_tgt))
+ full_attention_mask = full_attention_mask.clone()
+ origin_left = full_attention_mask[:, :, :num_memory]
+ update = zero_negative_infinity[:, None, :]
+ full_attention_mask[:, :, :num_memory] = origin_left + update
+
+ if tgt_bi_valid_mask is not None:
+ # verify the correctness
+ bs = full_attention_mask.shape[0]
+ # during inference, tgt_bi_valid_mask's length is not changed, but
+ # num_tgt can be increased
+ max_valid_target = tgt_bi_valid_mask.shape[1]
+ mask = tgt_bi_valid_mask[:, None, :].expand((bs, num_memory+num_tgt, max_valid_target))
+ full_attention_mask[:, :, num_memory:(num_memory+max_valid_target)][mask] = 0
+
+ # add axis for multi-head
+ full_attention_mask = full_attention_mask[:, None, :, :]
+
+ if encoder_history_states is None:
+ result = self.encoder(
+ hidden_states=hidden_states,
+ attention_mask=full_attention_mask,
+ encoder_history_states=encoder_history_states,
+ )
+ result = list(result)
+ result[0] = result[0][:, num_memory:].transpose(0, 1)
+ if self.encoder.output_hidden_states:
+ return result[0], result[1]
+ else:
+ # make it back-compatible
+ return result[0]
+ else:
+ encoder_out = self.encoder(
+ hidden_states=hidden_states[:, -1:],
+ attention_mask=full_attention_mask[:, :, -1:],
+ encoder_history_states=encoder_history_states,
+ )
+ result = encoder_out[0].transpose(0, 1)
+ if self.encoder.output_hidden_states:
+ return result, encoder_out[1]
+ else:
+ return result
+
+
+def create_transformer(decoder_type, norm_type,
+ textual_feature_size,
+ attention_heads,
+ feedforward_size,
+ dropout,
+ num_layers,
+ output_hidden_states=False,
+ use_mlp_wrapper=None,
+ use_act_checkpoint=True,
+ ):
+ assert norm_type in ['post', 'pre']
+ if decoder_type is None:
+ LayerClass = (
+ nn.TransformerDecoderLayer
+ if norm_type == "post"
+ else PreNormTransformerDecoderLayer
+ )
+ _layer = LayerClass(
+ textual_feature_size,
+ attention_heads,
+ dim_feedforward=feedforward_size,
+ dropout=dropout,
+ activation="gelu",
+ )
+ return nn.TransformerDecoder(_layer, num_layers)
+ elif decoder_type == 'bert_en':
+ from .modeling_bert import BertConfig, BertEncoder
+ config = BertConfig(
+ vocab_size_or_config_json_file=30522,
+ hidden_size=textual_feature_size,
+ num_hidden_layers=num_layers,
+ num_attention_heads=attention_heads,
+ intermediate_size=feedforward_size,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ layer_norm_eps=1e-12,
+ )
+ config.pre_norm = (norm_type == 'pre')
+ config.use_mlp_wrapper = use_mlp_wrapper
+ config.output_hidden_states = output_hidden_states
+ encoder = BertEncoder(config, use_act_checkpoint=use_act_checkpoint)
+ return BertEncoderAsDecoder(encoder)
+
+
+class PreNormTransformerDecoderLayer(nn.TransformerDecoderLayer):
+ def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
+ tgt_key_padding_mask=None, memory_key_padding_mask=None):
+ # fmt: off
+ # We use the members (modules) from super-class, just the order of
+ # operations is changed here. First layernorm, then attention.
+ tgt2 = self.norm1(tgt)
+ tgt2, _ = self.self_attn(
+ tgt2, tgt2, tgt2, attn_mask=tgt_mask,
+ key_padding_mask=tgt_key_padding_mask
+ )
+ tgt = tgt + self.dropout1(tgt2)
+
+ # Layernorm first, then decoder attention.
+ tgt2 = self.norm2(tgt)
+ tgt2, _ = self.multihead_attn(
+ tgt2, memory, memory, attn_mask=memory_mask,
+ key_padding_mask=memory_key_padding_mask
+ )
+ tgt = tgt + self.dropout2(tgt2)
+
+ # Layernorm first, then transformation through feedforward network.
+ tgt2 = self.norm3(tgt)
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
+ tgt = tgt + self.dropout3(tgt2)
+ return tgt
+
+
+class TransformerDecoderTextualHead(TextualHead):
+ def __init__(
+ self,
+ object_feature_size: int,
+ vocab_size: int,
+ hidden_size: int,
+ num_layers: int,
+ attention_heads: int,
+ feedforward_size: int,
+ dropout: float = 0.1,
+ norm_type: str = "post",
+ mask_future_positions: bool = True,
+ max_caption_length: int = 1024,
+ padding_idx: int = 0,
+ decoder_type=None,
+ not_tie_weight=None,
+ output_hidden_states=None,
+ use_mlp_wrapper=None,
+ use_act_checkpoint=True,
+ ):
+ super().__init__(object_feature_size, vocab_size, hidden_size)
+ self.num_layers = num_layers
+ self.attention_heads = attention_heads
+ self.feedforward_size = feedforward_size
+ self.dropout = dropout
+ assert mask_future_positions
+ self.padding_idx = padding_idx
+
+ self.object_feature_projection = nn.Sequential(
+ nn.Linear(object_feature_size, self.textual_feature_size),
+ nn.LayerNorm(self.textual_feature_size))
+
+ self.embedding = WordAndPositionalEmbedding(
+ self.vocab_size,
+ self.textual_feature_size,
+ dropout=dropout,
+ max_caption_length=max_caption_length,
+ padding_idx=padding_idx,
+ )
+ self.transformer = create_transformer(
+ decoder_type=decoder_type,
+ norm_type=norm_type,
+ textual_feature_size=self.textual_feature_size,
+ attention_heads=self.attention_heads,
+ feedforward_size=self.feedforward_size,
+ dropout=dropout,
+ num_layers=self.num_layers,
+ output_hidden_states=output_hidden_states,
+ use_mlp_wrapper=use_mlp_wrapper,
+ use_act_checkpoint=use_act_checkpoint,
+ )
+ self.apply(self._init_weights)
+
+ # Create an output linear layer and tie the input and output word
+ # embeddings to reduce parametejs.
+ self.output = nn.Linear(self.textual_feature_size, vocab_size)
+ if not not_tie_weight:
+ self.output.weight = self.embedding.words.weight
+
+ @staticmethod
+ def _init_weights(module):
+ """Initialize weights like BERT - N(0.0, 0.02), bias = 0."""
+
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=0.02)
+ elif isinstance(module, nn.MultiheadAttention):
+ module.in_proj_weight.data.normal_(mean=0.0, std=0.02)
+ module.out_proj.weight.data.normal_(mean=0.0, std=0.02)
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=0.02)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ def forward(
+ self,
+ hidden_states,
+ text_tokens,
+ ):
+ projected_object_features = self.object_feature_projection(hidden_states) if hidden_states is not None else None
+ batch_size, max_text_length = text_tokens.size()
+ text_embeddings = self.embedding(text_tokens)
+
+ # An additive mask for masking the future (one direction).
+ uni_mask_zero_neg = self._generate_future_mask(
+ max_text_length, text_embeddings.dtype, text_embeddings.device
+ )
+
+ # We transpose the first two dimensions of tokens embeddings and visual
+ # features, as required by decoder.
+ text_embeddings = text_embeddings.transpose(0, 1)
+
+ projected_object_features = projected_object_features.transpose(0, 1)
+
+ # if transformer here is the pytorch/decoder, there is no chance, the
+ # output is always tensor
+ trans_out = self.transformer(
+ text_embeddings,
+ projected_object_features,
+ tgt_mask=uni_mask_zero_neg,
+ )
+ if isinstance(trans_out, tuple):
+ textual_features = trans_out[0]
+ else:
+ assert isinstance(trans_out, torch.Tensor)
+ textual_features = trans_out
+ # Undo the transpose and bring batch to dim 0.
+ # shape: (batch_size, max_caption_length, hidden_size)
+ textual_features = textual_features.transpose(0, 1)
+
+ # shape: (batch_size, max_caption_length, vocab_size)
+ output_logits = self.output(textual_features)
+ if isinstance(trans_out, tuple):
+ return output_logits, trans_out[1]
+ else:
+ return output_logits
+
+ def _generate_future_mask(
+ self, size: int, dtype: torch.dtype, device: torch.device
+ ):
+ # Default mask is for forward direction. Flip for backward direction.
+ mask = torch.triu(
+ torch.ones(size, size, device=device, dtype=dtype), diagonal=1
+ )
+ mask = mask.masked_fill(mask == 1, float("-inf"))
+ return mask
+
+
+class AutoRegressiveBeamSearch(object):
+ def __init__(
+ self,
+ end_token_id: int,
+ max_steps: int = 50,
+ beam_size: int = 5,
+ objectdet=True,
+ per_node_beam_size: int = 2,
+ ):
+ self._eos_index = end_token_id
+ self.max_steps = max_steps
+ self.beam_size = beam_size
+ self.objectdet = objectdet
+ self.per_node_beam_size = per_node_beam_size or beam_size
+
+ def search(self, begin_tokens, step):
+ if self.beam_size > 1 and self.objectdet:
+ only_return_best = False
+ else:
+ only_return_best = True
+
+ batch_size = begin_tokens.size()[0]
+
+ predictions = begin_tokens.unsqueeze(1).expand((batch_size, self.beam_size, begin_tokens.shape[-1]))
+ # Calculate the first timestep. This is done outside the main loop
+ # because we are going from a single decoder input (the output from the
+ # encoder) to the top `beam_size` decoder outputs. On the other hand,
+ # within the main loop we are going from the `beam_size` elements of the
+ # beam to `beam_size`^2 candidates from which we will select the top
+ # `beam_size` elements for the next iteration.
+ # shape: (batch_size, num_classes)
+ start_class_logits = step(begin_tokens)
+
+ # Convert logits to logprobs.
+ # shape: (batch_size * beam_size, vocab_size)
+ start_class_logprobs = F.log_softmax(start_class_logits, dim=1)
+
+ num_classes = start_class_logprobs.size()[1]
+
+ # shape: (batch_size, beam_size), (batch_size, beam_size)
+ start_top_logprobs, start_predicted_classes = start_class_logprobs.topk(
+ self.beam_size
+ )
+
+ if (
+ self.beam_size == 1
+ and (start_predicted_classes == self._eos_index).all()
+ ):
+ warnings.warn(
+ "Empty object description predicted. You may want to increase beam"
+ "size or ensure your step function is working properly.",
+ RuntimeWarning,
+ )
+ if only_return_best:
+ return start_predicted_classes, start_top_logprobs
+ else:
+ return start_predicted_classes.unsqueeze(-1), start_top_logprobs
+
+ # The log probs for the last time step.
+ # shape: (batch_size, beam_size)
+ last_logprobs = start_top_logprobs
+
+ # shape: (batch_size, beam_size, sequence_length)
+ predictions = torch.cat([predictions, start_predicted_classes.unsqueeze(-1)], dim=-1)
+
+ # Log probability tensor that mandates that the end token is selected.
+ # shape: (batch_size * beam_size, num_classes)
+ logprobs_after_end = start_class_logprobs.new_full(
+ (batch_size * self.beam_size, num_classes), float("-inf")
+ )
+ logprobs_after_end[:, self._eos_index] = 0.0
+
+ logits_after_end = start_class_logprobs.new_full(
+ (batch_size * self.beam_size, num_classes), float("-inf")
+ )
+ logits_after_end[:, self._eos_index] = 0
+
+ while predictions.shape[-1] < self.max_steps:
+ # shape: (batch_size * beam_size,)
+ last_predictions = predictions[:, :, -1].reshape(batch_size * self.beam_size)
+
+ # If every predicted token from the last step is `self._eos_index`,
+ # then we can stop early.
+ if (last_predictions == self._eos_index).all():
+ break
+
+ predictions_so_far = predictions.view(
+ batch_size * self.beam_size, -1
+ )
+ # shape: (batch_size * beam_size, num_classes)
+ class_logits = step(predictions_so_far)
+
+ # Set logprobs of last predicted tokens as high negative value to avoid
+ # repetition in description.
+ class_logits = class_logits.scatter(1, predictions_so_far[:, -1].view((-1, 1)), -10000)
+
+ # shape: (batch_size * beam_size, num_classes)
+ last_predictions_expanded = last_predictions.unsqueeze(-1).expand(
+ batch_size * self.beam_size, num_classes
+ )
+
+ # Here we are finding any beams where we predicted the end token in
+ # the previous timestep and replacing the distribution with a
+ # one-hot distribution, forcing the beam to predict the end token
+ # this timestep as well.
+ class_logits = torch.where(
+ last_predictions_expanded == self._eos_index,
+ logits_after_end,
+ class_logits,
+ )
+
+ # Convert logits to logprobs.
+ # shape: (batch_size * beam_size, vocab_size)
+ class_logprobs = F.log_softmax(class_logits, dim=1)
+
+ # shape (both): (batch_size * beam_size, per_node_beam_size)
+ top_logprobs, predicted_classes = class_logprobs.topk(
+ self.per_node_beam_size
+ )
+
+ # Here we expand the last log probs to `(batch_size * beam_size,
+ # per_node_beam_size)` so that we can add them to the current log
+ # probs for this timestep. This lets us maintain the log
+ # probability of each element on the beam.
+ # shape: (batch_size * beam_size, per_node_beam_size)
+ expanded_last_logprobs = (
+ last_logprobs.unsqueeze(2)
+ .expand(batch_size, self.beam_size, self.per_node_beam_size)
+ .reshape(batch_size * self.beam_size, self.per_node_beam_size)
+ )
+ # shape: (batch_size * beam_size, per_node_beam_size)
+ summed_top_logprobs = top_logprobs + expanded_last_logprobs
+
+ # shape: (batch_size, beam_size * per_node_beam_size)
+ reshaped_summed = summed_top_logprobs.reshape(
+ batch_size, self.beam_size * self.per_node_beam_size
+ )
+ # shape: (batch_size, beam_size * per_node_beam_size)
+ reshaped_predicted_classes = predicted_classes.reshape(
+ batch_size, self.beam_size * self.per_node_beam_size
+ )
+ # Append the predictions to the current beam.
+ reshaped_beam = (
+ predictions.view(batch_size * self.beam_size, 1, -1)
+ .repeat(1, self.per_node_beam_size, 1)
+ .reshape(batch_size, self.beam_size * self.per_node_beam_size, -1)
+ )
+ # batch_size, (beam_size * per_node_beach_size), #token
+ reshaped_beam = torch.cat([reshaped_beam, reshaped_predicted_classes.unsqueeze(-1)], dim=-1)
+
+ # Keep only the top `beam_size` beam indices.
+ # shape: (batch_size, beam_size), (batch_size, beam_size)
+ restricted_beam_logprobs, restricted_beam_indices = reshaped_summed.topk(
+ self.beam_size
+ )
+ predictions = reshaped_beam.gather(
+ 1, restricted_beam_indices.unsqueeze(-1).repeat(1,1,reshaped_beam.shape[-1])
+ )
+
+ # shape: (batch_size, beam_size)
+ last_logprobs = restricted_beam_logprobs
+
+ if not torch.isfinite(last_logprobs).all():
+ warnings.warn(
+ "Infinite log probs encountered. Some final descriptions may not "
+ "make sense. This can happen when the beam size is larger than"
+ " the number of valid (non-zero probability) transitions that "
+ "the step function produces.",
+ RuntimeWarning,
+ )
+
+ # Optionally select best beam and its logprobs.
+ if only_return_best:
+ # shape: (batch_size, sequence_length)
+ predictions = predictions[:, 0, :]
+ last_logprobs = last_logprobs[:, 0]
+ num_valid = (predictions != self._eos_index).sum(dim=-1)
+ num_valid += (predictions == self._eos_index).sum(dim=-1) > 0
+ num_valid = num_valid - begin_tokens.shape[1]
+ num_valid = num_valid.clip(min=1)
+
+ last_logprobs = last_logprobs / num_valid
+
+ return predictions, last_logprobs
+
+
+class GRiTTextDecoder(nn.Module):
+ def __init__(
+ self,
+ transformer,
+ begin_token_id=101,
+ beamsearch_decode=None,
+ loss_type=None,
+ tokenizer=None,
+ ):
+ super().__init__()
+ self.textual = transformer
+ self.padding_idx = self.textual.padding_idx
+
+ self.begin_token_id = begin_token_id
+ self.beamsearch_decode = beamsearch_decode
+ self.tokenizer = tokenizer
+
+ if loss_type is None:
+ self.loss = nn.CrossEntropyLoss(ignore_index=self.padding_idx)
+ elif loss_type == 'smooth':
+ self.loss = SmoothLabelCrossEntropyLoss(ignore_index=self.padding_idx)
+ else:
+ raise NotImplementedError(loss_type)
+
+ def forward(self, batch):
+ object_features = batch['object_features']
+
+ if self.training:
+ caption_token_input = batch["text_tokens"]
+
+ output_logits = self.textual(
+ object_features,
+ caption_token_input,
+ )
+
+ if 'need_predict' in batch:
+ # in place should also be good, but we do not choose that for
+ # safety as we may use it in prediction results in future
+ target = batch["text_tokens"].clone()
+ target[batch['need_predict'] == 0] = self.padding_idx
+ else:
+ target = batch["text_tokens"]
+
+ feat = output_logits[:, :-1].contiguous()
+ target = target[:, 1:].contiguous()
+ feat = feat.view(-1, self.textual.vocab_size)
+ target = target.view(-1)
+
+ valid_mask = target != self.padding_idx
+ target = target[valid_mask]
+ feat = feat[valid_mask]
+ loss = self.loss(feat, target)
+
+ return loss
+ else:
+ output_dict = self.infer(object_features)
+ return output_dict
+
+ def infer(self, object_features):
+ batch_size = object_features.size(0)
+ begin_tokens = object_features.new_full(
+ (batch_size, 1), self.begin_token_id
+ ).long()
+
+ decoding_step = functools.partial(
+ self.decoding_step, object_features
+ )
+
+ object_description_tokens, logprobs = self.beamsearch_decode.search(
+ begin_tokens, decoding_step
+ )
+
+ output_dict = {
+ 'predictions': object_description_tokens,
+ 'logprobs': logprobs,
+ }
+
+ return output_dict
+
+ def decoding_step(self, object_features, partial_text):
+ batch_size = object_features.shape[0]
+ beam_size = int(partial_text.size(0) / batch_size)
+ if beam_size > 1:
+ batch_size, num_token, channels = object_features.size()
+ object_features = object_features.unsqueeze(1).repeat(1, beam_size, 1, 1)
+ object_features = object_features.view(
+ batch_size * beam_size, num_token, channels
+ )
+
+ text_lengths = torch.ones_like(partial_text)
+ if len(text_lengths.size()) != 2:
+ partial_text = partial_text.unsqueeze(1)
+
+ # shape: (batch_size * beam_size, partial_caption_length, vocab_size)
+ logits = self.textual(
+ object_features,
+ partial_text,
+ )
+
+ return logits[:, -1, :].float()
+
+
+class SmoothLabelCrossEntropyLoss(nn.Module):
+ def __init__(self, eps=0.1, log_prefix='', ignore_index=None):
+ super().__init__()
+ self.eps = eps
+ self.log_soft = nn.LogSoftmax(dim=1)
+ self.kl = nn.KLDivLoss(reduction='none')
+
+ self.iter = 0
+ self.max_loss = 0
+ self.min_loss = 0
+ self.log_prefix = log_prefix
+ self.ignore_index = ignore_index
+
+ def forward(self, feature, target):
+ feature = feature.float()
+ if self.ignore_index is not None:
+ valid_mask = target != self.ignore_index
+ target = target[valid_mask]
+ feature = feature[valid_mask]
+ assert target.numel() > 0
+ self.iter += 1
+ eps = self.eps
+ n_class = feature.size(1)
+ one_hot = torch.zeros_like(feature).scatter(1, target.view(-1, 1), 1)
+ one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
+ log_prb = self.log_soft(feature)
+ loss = self.kl(log_prb, one_hot)
+ return loss.sum(dim=1).mean()
+
diff --git a/model/vision/grit_src/grit/predictor.py b/model/vision/grit_src/grit/predictor.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c188ea2ab5fac232554d4eaaf2fb073670a70e4
--- /dev/null
+++ b/model/vision/grit_src/grit/predictor.py
@@ -0,0 +1,66 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# Modified by Jialian Wu from https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/visualizer.py
+import torch
+
+from detectron2.engine.defaults import DefaultPredictor
+from detectron2.utils.visualizer import ColorMode, Visualizer
+
+
+class Visualizer_GRiT(Visualizer):
+ def __init__(self, image, instance_mode=None):
+ super().__init__(image, instance_mode=instance_mode)
+
+ def draw_instance_predictions(self, predictions):
+ boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
+ scores = predictions.scores if predictions.has("scores") else None
+ classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None
+ object_description = predictions.pred_object_descriptions.data
+ # uncomment to output scores in visualized images
+ # object_description = [c + '|' + str(round(s.item(), 1)) for c, s in zip(object_description, scores)]
+
+ if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
+ colors = [
+ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
+ ]
+ alpha = 0.8
+ else:
+ colors = None
+ alpha = 0.5
+
+ if self._instance_mode == ColorMode.IMAGE_BW:
+ self.output.reset_image(
+ self._create_grayscale_image(
+ (predictions.pred_masks.any(dim=0) > 0).numpy()
+ if predictions.has("pred_masks")
+ else None
+ )
+ )
+ alpha = 0.3
+
+ self.overlay_instances(
+ masks=None,
+ boxes=boxes,
+ labels=object_description,
+ keypoints=None,
+ assigned_colors=colors,
+ alpha=alpha,
+ )
+ return self.output
+
+
+class VisualizationDemo(object):
+ def __init__(self, cfg, instance_mode=ColorMode.IMAGE):
+ self.cpu_device = torch.device("cpu")
+ self.instance_mode = instance_mode
+
+ self.predictor = DefaultPredictor(cfg)
+
+ def run_on_image(self, image):
+ predictions = self.predictor(image)
+ # Convert image from OpenCV BGR format to Matplotlib RGB format.
+ image = image[:, :, ::-1]
+ visualizer = Visualizer_GRiT(image, instance_mode=self.instance_mode)
+ instances = predictions["instances"].to(self.cpu_device)
+ vis_output = visualizer.draw_instance_predictions(predictions=instances)
+
+ return predictions, vis_output
\ No newline at end of file
diff --git a/model/vision/grit_src/image_dense_captions.py b/model/vision/grit_src/image_dense_captions.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1f98ff658b5ba2ef246ad2bb504ef88e10fca52
--- /dev/null
+++ b/model/vision/grit_src/image_dense_captions.py
@@ -0,0 +1,84 @@
+import sys
+
+from detectron2.config import get_cfg
+
+sys.path.insert(
+ 0, 'model/vision/grit_src/third_party/CenterNet2/projects/CenterNet2/')
+from model.vision.grit_src.third_party.CenterNet2.projects.CenterNet2.centernet.config import add_centernet_config
+from model.vision.grit_src.grit.config import add_grit_config
+
+from model.vision.grit_src.grit.predictor import VisualizationDemo
+
+# constants
+WINDOW_NAME = "GRiT"
+
+
+def dense_pred_to_caption_no_bbox(predictions):
+ object_description = predictions["instances"].pred_object_descriptions.data
+ new_caption = ""
+ for i in range(len(object_description) - 1):
+ new_caption += (object_description[i] + ", ")
+ new_caption += (object_description[-1] + ".")
+ return new_caption
+
+
+def dense_pred_to_caption(predictions):
+ boxes = predictions["instances"].pred_boxes if predictions[
+ "instances"].has("pred_boxes") else None
+ object_description = predictions["instances"].pred_object_descriptions.data
+ new_caption = ""
+ for i in range(len(object_description)):
+ new_caption += (object_description[i] + ": " + str(
+ [int(a)
+ for a in boxes[i].tensor.cpu().detach().numpy()[0]])) + "; "
+ return new_caption
+
+
+def setup_cfg(args):
+ cfg = get_cfg()
+ if args["cpu"]:
+ cfg.MODEL.DEVICE = "cpu"
+ add_centernet_config(cfg)
+ add_grit_config(cfg)
+ cfg.merge_from_file(args["config_file"])
+ cfg.merge_from_list(args["opts"])
+ # Set score_threshold for builtin models
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args["confidence_threshold"]
+ cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args[
+ "confidence_threshold"]
+ if args["test_task"]:
+ cfg.MODEL.TEST_TASK = args["test_task"]
+ cfg.MODEL.BEAM_SIZE = 1
+ cfg.MODEL.ROI_HEADS.SOFT_NMS_ENABLED = False
+ cfg.USE_ACT_CHECKPOINT = False
+ cfg.freeze()
+ return cfg
+
+
+def get_parser(device):
+ arg_dict = {
+ 'config_file':
+ "model/vision/grit_src/configs/GRiT_B_DenseCap_ObjectDet.yaml",
+ 'cpu':
+ False,
+ 'confidence_threshold':
+ 0.5,
+ 'test_task':
+ 'DenseCap',
+ 'opts':
+ ["MODEL.WEIGHTS", "pretrained_models/grit_b_densecap_objectdet.pth"]
+ }
+ if device == "cpu":
+ arg_dict["cpu"] = True
+ return arg_dict
+
+
+def image_caption_api(cv2_img, device='cuda'):
+ args2 = get_parser(device)
+ cfg = setup_cfg(args2)
+ demo = VisualizationDemo(cfg)
+
+ predictions, _ = demo.run_on_image(cv2_img)
+ new_caption = dense_pred_to_caption_no_bbox(predictions)
+
+ return new_caption
diff --git a/model/vision/grit_src/third_party/CenterNet2/.circleci/config.yml b/model/vision/grit_src/third_party/CenterNet2/.circleci/config.yml
new file mode 100644
index 0000000000000000000000000000000000000000..097afade9aa0a48b54ab7c7aae93058227901e7a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.circleci/config.yml
@@ -0,0 +1,256 @@
+version: 2.1
+
+# -------------------------------------------------------------------------------------
+# Environments to run the jobs in
+# -------------------------------------------------------------------------------------
+cpu: &cpu
+ machine:
+ image: ubuntu-2004:202107-02
+ resource_class: medium
+
+gpu: &gpu
+ machine:
+ # NOTE: use a cuda vesion that's supported by all our pytorch versions
+ image: ubuntu-1604-cuda-11.1:202012-01
+ resource_class: gpu.nvidia.small
+
+windows-cpu: &windows_cpu
+ machine:
+ resource_class: windows.medium
+ image: windows-server-2019-vs2019:stable
+ shell: powershell.exe
+
+# windows-gpu: &windows_gpu
+# machine:
+# resource_class: windows.gpu.nvidia.medium
+# image: windows-server-2019-nvidia:stable
+
+version_parameters: &version_parameters
+ parameters:
+ pytorch_version:
+ type: string
+ torchvision_version:
+ type: string
+ pytorch_index:
+ type: string
+ # use test wheels index to have access to RC wheels
+ # https://download.pytorch.org/whl/test/torch_test.html
+ default: "https://download.pytorch.org/whl/torch_stable.html"
+ python_version: # NOTE: only affect linux
+ type: string
+ default: '3.6.8'
+
+ environment:
+ PYTORCH_VERSION: << parameters.pytorch_version >>
+ TORCHVISION_VERSION: << parameters.torchvision_version >>
+ PYTORCH_INDEX: << parameters.pytorch_index >>
+ PYTHON_VERSION: << parameters.python_version>>
+ # point datasets to ~/.torch so it's cached in CI
+ DETECTRON2_DATASETS: ~/.torch/datasets
+
+# -------------------------------------------------------------------------------------
+# Re-usable commands
+# -------------------------------------------------------------------------------------
+# install_nvidia_driver: &install_nvidia_driver
+# - run:
+# name: Install nvidia driver
+# working_directory: ~/
+# command: |
+# wget -q 'https://s3.amazonaws.com/ossci-linux/nvidia_driver/NVIDIA-Linux-x86_64-430.40.run'
+# sudo /bin/bash ./NVIDIA-Linux-x86_64-430.40.run -s --no-drm
+# nvidia-smi
+
+add_ssh_keys: &add_ssh_keys
+ # https://circleci.com/docs/2.0/add-ssh-key/
+ - add_ssh_keys:
+ fingerprints:
+ - "e4:13:f2:22:d4:49:e8:e4:57:5a:ac:20:2f:3f:1f:ca"
+
+install_python: &install_python
+ - run:
+ name: Install Python
+ working_directory: ~/
+ command: |
+ # upgrade pyenv
+ cd /opt/circleci/.pyenv/plugins/python-build/../.. && git pull && cd -
+ pyenv install -s $PYTHON_VERSION
+ pyenv global $PYTHON_VERSION
+ python --version
+ which python
+ pip install --upgrade pip
+
+setup_venv: &setup_venv
+ - run:
+ name: Setup Virtual Env
+ working_directory: ~/
+ command: |
+ python -m venv ~/venv
+ echo ". ~/venv/bin/activate" >> $BASH_ENV
+ . ~/venv/bin/activate
+ python --version
+ which python
+ which pip
+ pip install --upgrade pip
+
+setup_venv_win: &setup_venv_win
+ - run:
+ name: Setup Virutal Env for Windows
+ command: |
+ pip install virtualenv
+ python -m virtualenv env
+ .\env\Scripts\activate
+ python --version
+ which python
+ which pip
+
+install_linux_dep: &install_linux_dep
+ - run:
+ name: Install Dependencies
+ command: |
+ # disable crash coredump, so unittests fail fast
+ sudo systemctl stop apport.service
+ # install from github to get latest; install iopath first since fvcore depends on it
+ pip install --progress-bar off -U 'git+https://github.com/facebookresearch/iopath'
+ pip install --progress-bar off -U 'git+https://github.com/facebookresearch/fvcore'
+ # Don't use pytest-xdist: cuda tests are unstable under multi-process workers.
+ pip install --progress-bar off ninja opencv-python-headless pytest tensorboard pycocotools
+ pip install --progress-bar off torch==$PYTORCH_VERSION -f $PYTORCH_INDEX
+ if [[ "$TORCHVISION_VERSION" == "master" ]]; then
+ pip install git+https://github.com/pytorch/vision.git
+ else
+ pip install --progress-bar off torchvision==$TORCHVISION_VERSION -f $PYTORCH_INDEX
+ fi
+
+ python -c 'import torch; print("CUDA:", torch.cuda.is_available())'
+ gcc --version
+
+install_detectron2: &install_detectron2
+ - run:
+ name: Install Detectron2
+ command: |
+ # Remove first, in case it's in the CI cache
+ pip uninstall -y detectron2
+ pip install --progress-bar off -e .[all]
+ python -m detectron2.utils.collect_env
+ ./datasets/prepare_for_tests.sh
+
+run_unittests: &run_unittests
+ - run:
+ name: Run Unit Tests
+ command: |
+ pytest -v --durations=15 tests # parallel causes some random failures
+
+# -------------------------------------------------------------------------------------
+# Jobs to run
+# -------------------------------------------------------------------------------------
+jobs:
+ linux_cpu_tests:
+ <<: *cpu
+ <<: *version_parameters
+
+ working_directory: ~/detectron2
+
+ steps:
+ - checkout
+
+ # Cache the venv directory that contains python, dependencies, and checkpoints
+ # Refresh the key when dependencies should be updated (e.g. when pytorch releases)
+ - restore_cache:
+ keys:
+ - cache-{{ arch }}-<< parameters.pytorch_version >>-{{ .Branch }}-20210827
+
+ - <<: *install_python
+ - <<: *install_linux_dep
+ - <<: *install_detectron2
+ - <<: *run_unittests
+
+ - save_cache:
+ paths:
+ - /opt/circleci/.pyenv
+ - ~/.torch
+ key: cache-{{ arch }}-<< parameters.pytorch_version >>-{{ .Branch }}-20210827
+
+
+ linux_gpu_tests:
+ <<: *gpu
+ <<: *version_parameters
+
+ working_directory: ~/detectron2
+
+ steps:
+ - checkout
+
+ - restore_cache:
+ keys:
+ - cache-{{ arch }}-<< parameters.pytorch_version >>-{{ .Branch }}-20210827
+
+ - <<: *install_python
+ - <<: *install_linux_dep
+ - <<: *install_detectron2
+ - <<: *run_unittests
+
+ - save_cache:
+ paths:
+ - /opt/circleci/.pyenv
+ - ~/.torch
+ key: cache-{{ arch }}-<< parameters.pytorch_version >>-{{ .Branch }}-20210827
+
+ windows_cpu_build:
+ <<: *windows_cpu
+ <<: *version_parameters
+ steps:
+ - <<: *add_ssh_keys
+ - checkout
+ - <<: *setup_venv_win
+
+ # Cache the env directory that contains dependencies
+ - restore_cache:
+ keys:
+ - cache-{{ arch }}-<< parameters.pytorch_version >>-{{ .Branch }}-20210404
+
+ - run:
+ name: Install Dependencies
+ command: |
+ pip install certifi --ignore-installed # required on windows to workaround some cert issue
+ pip install numpy cython # required on windows before pycocotools
+ pip install opencv-python-headless pytest-xdist pycocotools tensorboard
+ pip install -U git+https://github.com/facebookresearch/iopath
+ pip install -U git+https://github.com/facebookresearch/fvcore
+ pip install torch==$env:PYTORCH_VERSION torchvision==$env:TORCHVISION_VERSION -f $env:PYTORCH_INDEX
+
+ - save_cache:
+ paths:
+ - env
+ key: cache-{{ arch }}-<< parameters.pytorch_version >>-{{ .Branch }}-20210404
+
+ - <<: *install_detectron2
+ # TODO: unittest fails for now
+
+workflows:
+ version: 2
+ regular_test:
+ jobs:
+ - linux_cpu_tests:
+ name: linux_cpu_tests_pytorch1.10
+ pytorch_version: '1.10.0+cpu'
+ torchvision_version: '0.11.1+cpu'
+ - linux_gpu_tests:
+ name: linux_gpu_tests_pytorch1.8
+ pytorch_version: '1.8.1+cu111'
+ torchvision_version: '0.9.1+cu111'
+ - linux_gpu_tests:
+ name: linux_gpu_tests_pytorch1.9
+ pytorch_version: '1.9+cu111'
+ torchvision_version: '0.10+cu111'
+ - linux_gpu_tests:
+ name: linux_gpu_tests_pytorch1.10
+ pytorch_version: '1.10+cu111'
+ torchvision_version: '0.11.1+cu111'
+ - linux_gpu_tests:
+ name: linux_gpu_tests_pytorch1.10_python39
+ pytorch_version: '1.10+cu111'
+ torchvision_version: '0.11.1+cu111'
+ python_version: '3.9.6'
+ - windows_cpu_build:
+ pytorch_version: '1.10+cpu'
+ torchvision_version: '0.11.1+cpu'
diff --git a/model/vision/grit_src/third_party/CenterNet2/.clang-format b/model/vision/grit_src/third_party/CenterNet2/.clang-format
new file mode 100644
index 0000000000000000000000000000000000000000..39b1b3d603ed0cf6b7f94c9c08067f148f35613f
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.clang-format
@@ -0,0 +1,85 @@
+AccessModifierOffset: -1
+AlignAfterOpenBracket: AlwaysBreak
+AlignConsecutiveAssignments: false
+AlignConsecutiveDeclarations: false
+AlignEscapedNewlinesLeft: true
+AlignOperands: false
+AlignTrailingComments: false
+AllowAllParametersOfDeclarationOnNextLine: false
+AllowShortBlocksOnASingleLine: false
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: Empty
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+AlwaysBreakAfterReturnType: None
+AlwaysBreakBeforeMultilineStrings: true
+AlwaysBreakTemplateDeclarations: true
+BinPackArguments: false
+BinPackParameters: false
+BraceWrapping:
+ AfterClass: false
+ AfterControlStatement: false
+ AfterEnum: false
+ AfterFunction: false
+ AfterNamespace: false
+ AfterObjCDeclaration: false
+ AfterStruct: false
+ AfterUnion: false
+ BeforeCatch: false
+ BeforeElse: false
+ IndentBraces: false
+BreakBeforeBinaryOperators: None
+BreakBeforeBraces: Attach
+BreakBeforeTernaryOperators: true
+BreakConstructorInitializersBeforeComma: false
+BreakAfterJavaFieldAnnotations: false
+BreakStringLiterals: false
+ColumnLimit: 80
+CommentPragmas: '^ IWYU pragma:'
+ConstructorInitializerAllOnOneLineOrOnePerLine: true
+ConstructorInitializerIndentWidth: 4
+ContinuationIndentWidth: 4
+Cpp11BracedListStyle: true
+DerivePointerAlignment: false
+DisableFormat: false
+ForEachMacros: [ FOR_EACH, FOR_EACH_R, FOR_EACH_RANGE, ]
+IncludeCategories:
+ - Regex: '^<.*\.h(pp)?>'
+ Priority: 1
+ - Regex: '^<.*'
+ Priority: 2
+ - Regex: '.*'
+ Priority: 3
+IndentCaseLabels: true
+IndentWidth: 2
+IndentWrappedFunctionNames: false
+KeepEmptyLinesAtTheStartOfBlocks: false
+MacroBlockBegin: ''
+MacroBlockEnd: ''
+MaxEmptyLinesToKeep: 1
+NamespaceIndentation: None
+ObjCBlockIndentWidth: 2
+ObjCSpaceAfterProperty: false
+ObjCSpaceBeforeProtocolList: false
+PenaltyBreakBeforeFirstCallParameter: 1
+PenaltyBreakComment: 300
+PenaltyBreakFirstLessLess: 120
+PenaltyBreakString: 1000
+PenaltyExcessCharacter: 1000000
+PenaltyReturnTypeOnItsOwnLine: 200
+PointerAlignment: Left
+ReflowComments: true
+SortIncludes: true
+SpaceAfterCStyleCast: false
+SpaceBeforeAssignmentOperators: true
+SpaceBeforeParens: ControlStatements
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 1
+SpacesInAngles: false
+SpacesInContainerLiterals: true
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+Standard: Cpp11
+TabWidth: 8
+UseTab: Never
diff --git a/model/vision/grit_src/third_party/CenterNet2/.flake8 b/model/vision/grit_src/third_party/CenterNet2/.flake8
new file mode 100644
index 0000000000000000000000000000000000000000..ae8edda517fa7e2adc003299f18c880a823b1343
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.flake8
@@ -0,0 +1,15 @@
+# This is an example .flake8 config, used when developing *Black* itself.
+# Keep in sync with setup.cfg which is used for source packages.
+
+[flake8]
+ignore = W503, E203, E221, C901, C408, E741, C407, B017
+max-line-length = 100
+max-complexity = 18
+select = B,C,E,F,W,T4,B9
+exclude = build
+per-file-ignores =
+ **/__init__.py:F401,F403,E402
+ **/configs/**.py:F401,E402
+ configs/**.py:F401,E402
+ **/tests/config/**.py:F401,E402
+ tests/config/**.py:F401,E402
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/CODE_OF_CONDUCT.md b/model/vision/grit_src/third_party/CenterNet2/.github/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000000000000000000000000000000000..0f7ad8bfc173eac554f0b6ef7c684861e8014bbe
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/CODE_OF_CONDUCT.md
@@ -0,0 +1,5 @@
+# Code of Conduct
+
+Facebook has adopted a Code of Conduct that we expect project participants to adhere to.
+Please read the [full text](https://code.fb.com/codeofconduct/)
+so that you can understand what actions will and will not be tolerated.
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/CONTRIBUTING.md b/model/vision/grit_src/third_party/CenterNet2/.github/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..9bab709cae689ba3b92dd52f7fbcc0c6926f4a38
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/CONTRIBUTING.md
@@ -0,0 +1,68 @@
+# Contributing to detectron2
+
+## Issues
+We use GitHub issues to track public bugs and questions.
+Please make sure to follow one of the
+[issue templates](https://github.com/facebookresearch/detectron2/issues/new/choose)
+when reporting any issues.
+
+Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
+disclosure of security bugs. In those cases, please go through the process
+outlined on that page and do not file a public issue.
+
+## Pull Requests
+We actively welcome pull requests.
+
+However, if you're adding any significant features (e.g. > 50 lines), please
+make sure to discuss with maintainers about your motivation and proposals in an issue
+before sending a PR. This is to save your time so you don't spend time on a PR that we'll not accept.
+
+We do not always accept new features, and we take the following
+factors into consideration:
+
+1. Whether the same feature can be achieved without modifying detectron2.
+ Detectron2 is designed so that you can implement many extensions from the outside, e.g.
+ those in [projects](https://github.com/facebookresearch/detectron2/tree/master/projects).
+ * If some part of detectron2 is not extensible enough, you can also bring up a more general issue to
+ improve it. Such feature request may be useful to more users.
+2. Whether the feature is potentially useful to a large audience (e.g. an impactful detection paper, a popular dataset,
+ a significant speedup, a widely useful utility),
+ or only to a small portion of users (e.g., a less-known paper, an improvement not in the object
+ detection field, a trick that's not very popular in the community, code to handle a non-standard type of data)
+ * Adoption of additional models, datasets, new task are by default not added to detectron2 before they
+ receive significant popularity in the community.
+ We sometimes accept such features in `projects/`, or as a link in `projects/README.md`.
+3. Whether the proposed solution has a good design / interface. This can be discussed in the issue prior to PRs, or
+ in the form of a draft PR.
+4. Whether the proposed solution adds extra mental/practical overhead to users who don't
+ need such feature.
+5. Whether the proposed solution breaks existing APIs.
+
+To add a feature to an existing function/class `Func`, there are always two approaches:
+(1) add new arguments to `Func`; (2) write a new `Func_with_new_feature`.
+To meet the above criteria, we often prefer approach (2), because:
+
+1. It does not involve modifying or potentially breaking existing code.
+2. It does not add overhead to users who do not need the new feature.
+3. Adding new arguments to a function/class is not scalable w.r.t. all the possible new research ideas in the future.
+
+When sending a PR, please do:
+
+1. If a PR contains multiple orthogonal changes, split it to several PRs.
+2. If you've added code that should be tested, add tests.
+3. For PRs that need experiments (e.g. adding a new model or new methods),
+ you don't need to update model zoo, but do provide experiment results in the description of the PR.
+4. If APIs are changed, update the documentation.
+5. We use the [Google style docstrings](https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html) in python.
+6. Make sure your code lints with `./dev/linter.sh`.
+
+
+## Contributor License Agreement ("CLA")
+In order to accept your pull request, we need you to submit a CLA. You only need
+to do this once to work on any of Facebook's open source projects.
+
+Complete your CLA here:
+
+## License
+By contributing to detectron2, you agree that your contributions will be licensed
+under the LICENSE file in the root directory of this source tree.
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/Detectron2-Logo-Horz.svg b/model/vision/grit_src/third_party/CenterNet2/.github/Detectron2-Logo-Horz.svg
new file mode 100644
index 0000000000000000000000000000000000000000..eb2d643ddd940cd8bdb5eaad093029969ff2364c
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/Detectron2-Logo-Horz.svg
@@ -0,0 +1 @@
+Detectron2-Logo-Horz
\ No newline at end of file
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE.md b/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 0000000000000000000000000000000000000000..5e8aaa2d3722e7e73a3d94b2b7dfc4f751d7a240
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,5 @@
+
+Please select an issue template from
+https://github.com/facebookresearch/detectron2/issues/new/choose .
+
+Otherwise your issue will be closed.
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/bugs.md b/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/bugs.md
new file mode 100644
index 0000000000000000000000000000000000000000..d0235c708ab6b0cdadb5865110e9e8c22ca313aa
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/bugs.md
@@ -0,0 +1,38 @@
+---
+name: "🐛 Bugs"
+about: Report bugs in detectron2
+title: Please read & provide the following
+
+---
+
+## Instructions To Reproduce the 🐛 Bug:
+1. Full runnable code or full changes you made:
+```
+If making changes to the project itself, please use output of the following command:
+git rev-parse HEAD; git diff
+
+
+```
+2. What exact command you run:
+3. __Full logs__ or other relevant observations:
+```
+
+```
+4. please simplify the steps as much as possible so they do not require additional resources to
+ run, such as a private dataset.
+
+## Expected behavior:
+
+If there are no obvious error in "full logs" provided above,
+please tell us the expected behavior.
+
+## Environment:
+
+Provide your environment information using the following command:
+```
+wget -nc -q https://github.com/facebookresearch/detectron2/raw/main/detectron2/utils/collect_env.py && python collect_env.py
+```
+
+If your issue looks like an installation issue / environment issue,
+please first try to solve it yourself with the instructions in
+https://detectron2.readthedocs.io/tutorials/install.html#common-installation-issues
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/config.yml b/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c60c2e14309be9a93293a64e7481f2a91385f76a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,17 @@
+# require an issue template to be chosen
+blank_issues_enabled: false
+
+contact_links:
+ - name: How-To / All Other Questions
+ url: https://github.com/facebookresearch/detectron2/discussions
+ about: Use "github discussions" for community support on general questions that don't belong to the above issue categories
+ - name: Detectron2 Documentation
+ url: https://detectron2.readthedocs.io/index.html
+ about: Check if your question is answered in tutorials or API docs
+
+# Unexpected behaviors & bugs are split to two templates.
+# When they are one template, users think "it's not a bug" and don't choose the template.
+#
+# But the file name is still "unexpected-problems-bugs.md" so that old references
+# to this issue template still works.
+# It's ok since this template should be a superset of "bugs.md" (unexpected behaviors is a superset of bugs)
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/documentation.md b/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/documentation.md
new file mode 100644
index 0000000000000000000000000000000000000000..88214d62e5228639491e019c78bb4171d535cdd1
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/documentation.md
@@ -0,0 +1,14 @@
+---
+name: "\U0001F4DA Documentation Issue"
+about: Report a problem about existing documentation, comments, website or tutorials.
+labels: documentation
+
+---
+
+## 📚 Documentation Issue
+
+This issue category is for problems about existing documentation, not for asking how-to questions.
+
+* Provide a link to an existing documentation/comment/tutorial:
+
+* How should the above documentation/comment/tutorial improve:
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/feature-request.md b/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/feature-request.md
new file mode 100644
index 0000000000000000000000000000000000000000..03a1e93d7293948042120b875af8be0c6964e59c
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/feature-request.md
@@ -0,0 +1,31 @@
+---
+name: "\U0001F680Feature Request"
+about: Suggest an improvement or new feature
+labels: enhancement
+
+---
+
+## 🚀 Feature
+A clear and concise description of the feature proposal.
+
+## Motivation & Examples
+
+Tell us why the feature is useful.
+
+Describe what the feature would look like, if it is implemented.
+Best demonstrated using **code examples** in addition to words.
+
+## Note
+
+We only consider adding new features if they are relevant to many users.
+
+If you request implementation of research papers -- we only consider papers that have enough significance and prevalance in the object detection field.
+
+We do not take requests for most projects in the `projects/` directory, because they are research code release that is mainly for other researchers to reproduce results.
+
+"Make X faster/accurate" is not a valid feature request. "Implement a concrete feature that can make X faster/accurate" can be a valid feature request.
+
+Instead of adding features inside detectron2,
+you can implement many features by [extending detectron2](https://detectron2.readthedocs.io/tutorials/extend.html).
+The [projects/](https://github.com/facebookresearch/detectron2/tree/main/projects/) directory contains many of such examples.
+
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md b/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md
new file mode 100644
index 0000000000000000000000000000000000000000..5db8f22415ff5c857ce83fb0d3de68211f775080
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md
@@ -0,0 +1,44 @@
+---
+name: "😩 Unexpected behaviors"
+about: Report unexpected behaviors when using detectron2
+title: Please read & provide the following
+
+---
+
+If you do not know the root cause of the problem, please post according to this template:
+
+## Instructions To Reproduce the Issue:
+
+Check https://stackoverflow.com/help/minimal-reproducible-example for how to ask good questions.
+Simplify the steps to reproduce the issue using suggestions from the above link, and provide them below:
+
+1. Full runnable code or full changes you made:
+```
+If making changes to the project itself, please use output of the following command:
+git rev-parse HEAD; git diff
+
+
+```
+2. What exact command you run:
+3. __Full logs__ or other relevant observations:
+```
+
+```
+
+## Expected behavior:
+
+If there are no obvious crash in "full logs" provided above,
+please tell us the expected behavior.
+
+If you expect a model to converge / work better, we do not help with such issues, unless
+a model fails to reproduce the results in detectron2 model zoo, or proves existence of bugs.
+
+## Environment:
+
+Paste the output of the following command:
+```
+wget -nc -nv https://github.com/facebookresearch/detectron2/raw/main/detectron2/utils/collect_env.py && python collect_env.py
+```
+
+If your issue looks like an installation issue / environment issue,
+please first check common issues in https://detectron2.readthedocs.io/tutorials/install.html#common-installation-issues
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/pull_request_template.md b/model/vision/grit_src/third_party/CenterNet2/.github/pull_request_template.md
new file mode 100644
index 0000000000000000000000000000000000000000..d71729baee1ec324ab9db6e7562965cf9e2a091b
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/pull_request_template.md
@@ -0,0 +1,10 @@
+Thanks for your contribution!
+
+If you're sending a large PR (e.g., >100 lines),
+please open an issue first about the feature / bug, and indicate how you want to contribute.
+
+We do not always accept features.
+See https://detectron2.readthedocs.io/notes/contributing.html#pull-requests about how we handle PRs.
+
+Before submitting a PR, please run `dev/linter.sh` to lint the code.
+
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/workflows/check-template.yml b/model/vision/grit_src/third_party/CenterNet2/.github/workflows/check-template.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3caed9df3caa50c0d3b606e4a56a1959c463b710
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/workflows/check-template.yml
@@ -0,0 +1,86 @@
+name: Check issue template
+
+on:
+ issues:
+ types: [opened]
+
+jobs:
+ check-template:
+ runs-on: ubuntu-latest
+ # comment this out when testing with https://github.com/nektos/act
+ if: ${{ github.repository_owner == 'facebookresearch' }}
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/github-script@v3
+ with:
+ github-token: ${{secrets.GITHUB_TOKEN}}
+ script: |
+ // Arguments available:
+ // - github: A pre-authenticated octokit/rest.js client
+ // - context: An object containing the context of the workflow run
+ // - core: A reference to the @actions/core package
+ // - io: A reference to the @actions/io package
+ const fs = require('fs');
+ const editDistance = require(`${process.env.GITHUB_WORKSPACE}/.github/workflows/levenshtein.js`).getEditDistance
+ issue = await github.issues.get({
+ owner: context.issue.owner,
+ repo: context.issue.repo,
+ issue_number: context.issue.number,
+ });
+ const hasLabel = issue.data.labels.length > 0;
+ if (hasLabel || issue.state === "closed") {
+ // don't require template on them
+ core.debug("Issue " + issue.data.title + " was skipped.");
+ return;
+ }
+
+ sameAsTemplate = function(filename, body) {
+ let tmpl = fs.readFileSync(`.github/ISSUE_TEMPLATE/${filename}`, 'utf8');
+ tmpl = tmpl.toLowerCase().split("---").slice(2).join("").trim();
+ tmpl = tmpl.replace(/(\r\n|\n|\r)/gm, "");
+ let bodyr = body.replace(/(\r\n|\n|\r)/gm, "");
+ let dist = editDistance(tmpl, bodyr);
+ return dist < 8;
+ };
+
+ checkFail = async function(msg) {
+ core.info("Processing '" + issue.data.title + "' with message: " + msg);
+ await github.issues.addLabels({
+ owner: context.issue.owner,
+ repo: context.issue.repo,
+ issue_number: context.issue.number,
+ labels: ["needs-more-info"],
+ });
+ await github.issues.createComment({
+ owner: context.issue.owner,
+ repo: context.issue.repo,
+ issue_number: context.issue.number,
+ body: msg,
+ });
+ };
+
+ const body = issue.data.body.toLowerCase().trim();
+
+ if (sameAsTemplate("bugs.md", body) || sameAsTemplate("unexpected-problems-bugs.md", body)) {
+ await checkFail(`
+ We found that not enough information is provided about this issue.
+ Please provide details following the [issue template](https://github.com/facebookresearch/detectron2/issues/new/choose).`)
+ return;
+ }
+
+ const hasInstructions = body.indexOf("reproduce") != -1;
+ const hasEnvironment = (body.indexOf("environment") != -1) || (body.indexOf("colab") != -1) || (body.indexOf("docker") != -1);
+ if (hasInstructions && hasEnvironment) {
+ core.debug("Issue " + issue.data.title + " follows template.");
+ return;
+ }
+
+ let message = "You've chosen to report an unexpected problem or bug. Unless you already know the root cause of it, please include details about it by filling the [issue template](https://github.com/facebookresearch/detectron2/issues/new/choose).\n";
+ message += "The following information is missing: ";
+ if (!hasInstructions) {
+ message += "\"Instructions To Reproduce the Issue and __Full__ Logs\"; ";
+ }
+ if (!hasEnvironment) {
+ message += "\"Your Environment\"; ";
+ }
+ await checkFail(message);
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/workflows/levenshtein.js b/model/vision/grit_src/third_party/CenterNet2/.github/workflows/levenshtein.js
new file mode 100644
index 0000000000000000000000000000000000000000..67a5e3613c0072d124035ee8933a23de2105cfe3
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/workflows/levenshtein.js
@@ -0,0 +1,44 @@
+/*
+Copyright (c) 2011 Andrei Mackenzie
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+// Compute the edit distance between the two given strings
+exports.getEditDistance = function(a, b){
+ if(a.length == 0) return b.length;
+ if(b.length == 0) return a.length;
+
+ var matrix = [];
+
+ // increment along the first column of each row
+ var i;
+ for(i = 0; i <= b.length; i++){
+ matrix[i] = [i];
+ }
+
+ // increment each column in the first row
+ var j;
+ for(j = 0; j <= a.length; j++){
+ matrix[0][j] = j;
+ }
+
+ // Fill in the rest of the matrix
+ for(i = 1; i <= b.length; i++){
+ for(j = 1; j <= a.length; j++){
+ if(b.charAt(i-1) == a.charAt(j-1)){
+ matrix[i][j] = matrix[i-1][j-1];
+ } else {
+ matrix[i][j] = Math.min(matrix[i-1][j-1] + 1, // substitution
+ Math.min(matrix[i][j-1] + 1, // insertion
+ matrix[i-1][j] + 1)); // deletion
+ }
+ }
+ }
+
+ return matrix[b.length][a.length];
+};
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/workflows/needs-reply.yml b/model/vision/grit_src/third_party/CenterNet2/.github/workflows/needs-reply.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4affabd3498290a752fab6d848fc667758bedaf2
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/workflows/needs-reply.yml
@@ -0,0 +1,98 @@
+name: Close/Lock issues after inactivity
+
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+jobs:
+ close-issues-needs-more-info:
+ runs-on: ubuntu-latest
+ if: ${{ github.repository_owner == 'facebookresearch' }}
+ steps:
+ - name: Close old issues that need reply
+ uses: actions/github-script@v3
+ with:
+ github-token: ${{secrets.GITHUB_TOKEN}}
+ # Modified from https://github.com/dwieeb/needs-reply
+ script: |
+ // Arguments available:
+ // - github: A pre-authenticated octokit/rest.js client
+ // - context: An object containing the context of the workflow run
+ // - core: A reference to the @actions/core package
+ // - io: A reference to the @actions/io package
+ const kLabelToCheck = "needs-more-info";
+ const kInvalidLabel = "invalid/unrelated";
+ const kDaysBeforeClose = 7;
+ const kMessage = "Requested information was not provided in 7 days, so we're closing this issue.\n\nPlease open new issue if information becomes available. Otherwise, use [github discussions](https://github.com/facebookresearch/detectron2/discussions) for free-form discussions."
+
+ issues = await github.issues.listForRepo({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ state: 'open',
+ labels: kLabelToCheck,
+ sort: 'updated',
+ direction: 'asc',
+ per_page: 30,
+ page: 1,
+ });
+ issues = issues.data;
+ if (issues.length === 0) {
+ core.info('No more issues found to process. Exiting.');
+ return;
+ }
+ for (const issue of issues) {
+ if (!!issue.pull_request)
+ continue;
+ core.info(`Processing issue #${issue.number}`);
+
+ let updatedAt = new Date(issue.updated_at).getTime();
+ const numComments = issue.comments;
+ const comments = await github.issues.listComments({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: issue.number,
+ per_page: 30,
+ page: Math.floor((numComments - 1) / 30) + 1, // the last page
+ });
+ const lastComments = comments.data
+ .map(l => new Date(l.created_at).getTime())
+ .sort();
+ if (lastComments.length > 0) {
+ updatedAt = lastComments[lastComments.length - 1];
+ }
+
+ const now = new Date().getTime();
+ const daysSinceUpdated = (now - updatedAt) / 1000 / 60 / 60 / 24;
+
+ if (daysSinceUpdated < kDaysBeforeClose) {
+ core.info(`Skipping #${issue.number} because it has been updated in the last ${daysSinceUpdated} days`);
+ continue;
+ }
+ core.info(`Closing #${issue.number} because it has not been updated in the last ${daysSinceUpdated} days`);
+ await github.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: issue.number,
+ body: kMessage,
+ });
+ const newLabels = numComments <= 2 ? [kInvalidLabel, kLabelToCheck] : issue.labels;
+ await github.issues.update({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: issue.number,
+ labels: newLabels,
+ state: 'closed',
+ });
+ }
+
+ lock-issues-after-closed:
+ runs-on: ubuntu-latest
+ if: ${{ github.repository_owner == 'facebookresearch' }}
+ steps:
+ - name: Lock closed issues that have no activity for a while
+ uses: dessant/lock-threads@v2
+ with:
+ github-token: ${{ github.token }}
+ issue-lock-inactive-days: '300'
+ process-only: 'issues'
+ issue-exclude-labels: 'enhancement,bug,documentation'
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/workflows/remove-needs-reply.yml b/model/vision/grit_src/third_party/CenterNet2/.github/workflows/remove-needs-reply.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1f000b28ca27ef9c219d197f95251be1cb8c0979
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/workflows/remove-needs-reply.yml
@@ -0,0 +1,25 @@
+name: Remove needs-more-info label
+
+on:
+ issue_comment:
+ types: [created]
+ issues:
+ types: [edited]
+
+jobs:
+ remove-needs-more-info-label:
+ runs-on: ubuntu-latest
+ # 1. issue_comment events could include PR comment, filter them out
+ # 2. Only trigger action if event was produced by the original author
+ if: ${{ !github.event.issue.pull_request && github.event.sender.login == github.event.issue.user.login }}
+ steps:
+ - name: Remove needs-more-info label
+ uses: octokit/request-action@v2.x
+ continue-on-error: true
+ with:
+ route: DELETE /repos/:repository/issues/:issue/labels/:label
+ repository: ${{ github.repository }}
+ issue: ${{ github.event.issue.number }}
+ label: needs-more-info
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/model/vision/grit_src/third_party/CenterNet2/.github/workflows/workflow.yml b/model/vision/grit_src/third_party/CenterNet2/.github/workflows/workflow.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6085b32a503d264b0339b48a717ce7bde151f69c
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.github/workflows/workflow.yml
@@ -0,0 +1,81 @@
+name: CI
+on: [push, pull_request]
+
+# Run linter with github actions for quick feedbacks.
+# Run macos tests with github actions. Linux (CPU & GPU) tests currently runs on CircleCI
+jobs:
+ linter:
+ runs-on: ubuntu-latest
+ # run on PRs, or commits to facebookresearch (not internal)
+ if: ${{ github.repository_owner == 'facebookresearch' || github.event_name == 'pull_request' }}
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python 3.6
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.6
+ - name: Install dependencies
+ # flake8-bugbear flake8-comprehensions are useful but not available internally
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install flake8==3.8.1 isort==4.3.21
+ python -m pip install black==21.4b2
+ flake8 --version
+ - name: Lint
+ run: |
+ echo "Running isort"
+ isort -c -sp .
+ echo "Running black"
+ black -l 100 --check .
+ echo "Running flake8"
+ flake8 .
+
+ macos_tests:
+ runs-on: macos-latest
+ # run on PRs, or commits to facebookresearch (not internal)
+ if: ${{ github.repository_owner == 'facebookresearch' || github.event_name == 'pull_request' }}
+ strategy:
+ fail-fast: false
+ matrix:
+ torch: ["1.8", "1.9", "1.10"]
+ include:
+ - torch: "1.8"
+ torchvision: 0.9
+ - torch: "1.9"
+ torchvision: "0.10"
+ - torch: "1.10"
+ torchvision: "0.11.1"
+ env:
+ # point datasets to ~/.torch so it's cached by CI
+ DETECTRON2_DATASETS: ~/.torch/datasets
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Set up Python 3.6
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.6
+ - name: Cache dependencies
+ uses: actions/cache@v2
+ with:
+ path: |
+ ${{ env.pythonLocation }}/lib/python3.6/site-packages
+ ~/.torch
+ key: ${{ runner.os }}-torch${{ matrix.torch }}-${{ hashFiles('setup.py') }}-20210420
+
+ - name: Install dependencies
+ run: |
+ python -m pip install -U pip
+ python -m pip install ninja opencv-python-headless onnx pytest-xdist
+ python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
+ # install from github to get latest; install iopath first since fvcore depends on it
+ python -m pip install -U 'git+https://github.com/facebookresearch/iopath'
+ python -m pip install -U 'git+https://github.com/facebookresearch/fvcore'
+
+ - name: Build and install
+ run: |
+ CC=clang CXX=clang++ python -m pip install -e .[all]
+ python -m detectron2.utils.collect_env
+ ./datasets/prepare_for_tests.sh
+ - name: Run unittests
+ run: python -m pytest -n 4 --durations=15 -v tests/
diff --git a/model/vision/grit_src/third_party/CenterNet2/.gitignore b/model/vision/grit_src/third_party/CenterNet2/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..8ca283cb5a1cf828456b3ed3b95e947ca1ba1426
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/.gitignore
@@ -0,0 +1,57 @@
+slurm*
+# output dir
+output
+instant_test_output
+inference_test_output
+
+
+*.png
+*.json
+*.diff
+# *.jpg
+!/projects/DensePose/doc/images/*.jpg
+
+# compilation and distribution
+__pycache__
+_ext
+*.pyc
+*.pyd
+*.so
+*.dll
+*.egg-info/
+build/
+dist/
+wheels/
+
+# pytorch/python/numpy formats
+*.pth
+*.pkl
+*.npy
+*.ts
+model_ts*.txt
+
+# ipython/jupyter notebooks
+*.ipynb
+**/.ipynb_checkpoints/
+
+# Editor temporaries
+*.swn
+*.swo
+*.swp
+*~
+
+# editor settings
+.idea
+.vscode
+_darcs
+
+# project dirs
+/detectron2/model_zoo/configs
+/datasets/*
+!/datasets/*.*
+!/datasets/lvis/
+/datasets/lvis/*
+!/datasets/lvis/lvis_v1_train_cat_info.json
+/projects/*/datasets
+/models
+/snippet
diff --git a/model/vision/grit_src/third_party/CenterNet2/GETTING_STARTED.md b/model/vision/grit_src/third_party/CenterNet2/GETTING_STARTED.md
new file mode 100644
index 0000000000000000000000000000000000000000..404b0c8f467264d1adf61e8274e5f864e24018e8
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/GETTING_STARTED.md
@@ -0,0 +1,79 @@
+## Getting Started with Detectron2
+
+This document provides a brief intro of the usage of builtin command-line tools in detectron2.
+
+For a tutorial that involves actual coding with the API,
+see our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)
+which covers how to run inference with an
+existing model, and how to train a builtin model on a custom dataset.
+
+
+### Inference Demo with Pre-trained Models
+
+1. Pick a model and its config file from
+ [model zoo](MODEL_ZOO.md),
+ for example, `mask_rcnn_R_50_FPN_3x.yaml`.
+2. We provide `demo.py` that is able to demo builtin configs. Run it with:
+```
+cd demo/
+python demo.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \
+ --input input1.jpg input2.jpg \
+ [--other-options]
+ --opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl
+```
+The configs are made for training, therefore we need to specify `MODEL.WEIGHTS` to a model from model zoo for evaluation.
+This command will run the inference and show visualizations in an OpenCV window.
+
+For details of the command line arguments, see `demo.py -h` or look at its source code
+to understand its behavior. Some common arguments are:
+* To run __on your webcam__, replace `--input files` with `--webcam`.
+* To run __on a video__, replace `--input files` with `--video-input video.mp4`.
+* To run __on cpu__, add `MODEL.DEVICE cpu` after `--opts`.
+* To save outputs to a directory (for images) or a file (for webcam or video), use `--output`.
+
+
+### Training & Evaluation in Command Line
+
+We provide two scripts in "tools/plain_train_net.py" and "tools/train_net.py",
+that are made to train all the configs provided in detectron2. You may want to
+use it as a reference to write your own training script.
+
+Compared to "train_net.py", "plain_train_net.py" supports fewer default
+features. It also includes fewer abstraction, therefore is easier to add custom
+logic.
+
+To train a model with "train_net.py", first
+setup the corresponding datasets following
+[datasets/README.md](./datasets/README.md),
+then run:
+```
+cd tools/
+./train_net.py --num-gpus 8 \
+ --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
+```
+
+The configs are made for 8-GPU training.
+To train on 1 GPU, you may need to [change some parameters](https://arxiv.org/abs/1706.02677), e.g.:
+```
+./train_net.py \
+ --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \
+ --num-gpus 1 SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025
+```
+
+To evaluate a model's performance, use
+```
+./train_net.py \
+ --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \
+ --eval-only MODEL.WEIGHTS /path/to/checkpoint_file
+```
+For more options, see `./train_net.py -h`.
+
+### Use Detectron2 APIs in Your Code
+
+See our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)
+to learn how to use detectron2 APIs to:
+1. run inference with an existing model
+2. train a builtin model on a custom dataset
+
+See [detectron2/projects](https://github.com/facebookresearch/detectron2/tree/main/projects)
+for more ways to build your project on detectron2.
diff --git a/model/vision/grit_src/third_party/CenterNet2/INSTALL.md b/model/vision/grit_src/third_party/CenterNet2/INSTALL.md
new file mode 100644
index 0000000000000000000000000000000000000000..b40768913742ca2b2e11c74d5944561931ecb326
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/INSTALL.md
@@ -0,0 +1,261 @@
+## Installation
+
+### Requirements
+- Linux or macOS with Python ≥ 3.6
+- PyTorch ≥ 1.8 and [torchvision](https://github.com/pytorch/vision/) that matches the PyTorch installation.
+ Install them together at [pytorch.org](https://pytorch.org) to make sure of this
+- OpenCV is optional but needed by demo and visualization
+
+
+### Build Detectron2 from Source
+
+gcc & g++ ≥ 5.4 are required. [ninja](https://ninja-build.org/) is optional but recommended for faster build.
+After having them, run:
+```
+python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
+# (add --user if you don't have permission)
+
+# Or, to install it from a local clone:
+git clone https://github.com/facebookresearch/detectron2.git
+python -m pip install -e detectron2
+
+# On macOS, you may need to prepend the above commands with a few environment variables:
+CC=clang CXX=clang++ ARCHFLAGS="-arch x86_64" python -m pip install ...
+```
+
+To __rebuild__ detectron2 that's built from a local clone, use `rm -rf build/ **/*.so` to clean the
+old build first. You often need to rebuild detectron2 after reinstalling PyTorch.
+
+### Install Pre-Built Detectron2 (Linux only)
+
+Choose from this table to install [v0.6 (Oct 2021)](https://github.com/facebookresearch/detectron2/releases):
+
+ CUDA torch 1.10 torch 1.9 torch 1.8 11.3 install python -m pip install detectron2 -f \
+ https://dl.fbaipublicfiles.com/detectron2/wheels/cu113/torch1.10/index.html
+
11.1 install python -m pip install detectron2 -f \
+ https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.10/index.html
+
install python -m pip install detectron2 -f \
+ https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.9/index.html
+
install python -m pip install detectron2 -f \
+ https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.8/index.html
+
10.2 install python -m pip install detectron2 -f \
+ https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.10/index.html
+
install python -m pip install detectron2 -f \
+ https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.9/index.html
+
install python -m pip install detectron2 -f \
+ https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.8/index.html
+
10.1 install python -m pip install detectron2 -f \
+ https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html
+
cpu install python -m pip install detectron2 -f \
+ https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.10/index.html
+
install python -m pip install detectron2 -f \
+ https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.9/index.html
+
install python -m pip install detectron2 -f \
+ https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.8/index.html
+
+
+Note that:
+1. The pre-built packages have to be used with corresponding version of CUDA and the official package of PyTorch.
+ Otherwise, please build detectron2 from source.
+2. New packages are released every few months. Therefore, packages may not contain latest features in the main
+ branch and may not be compatible with the main branch of a research project that uses detectron2
+ (e.g. those in [projects](projects)).
+
+### Common Installation Issues
+
+Click each issue for its solutions:
+
+
+
+Undefined symbols that looks like "TH..","at::Tensor...","torch..."
+
+
+
+This usually happens when detectron2 or torchvision is not
+compiled with the version of PyTorch you're running.
+
+If the error comes from a pre-built torchvision, uninstall torchvision and pytorch and reinstall them
+following [pytorch.org](http://pytorch.org). So the versions will match.
+
+If the error comes from a pre-built detectron2, check [release notes](https://github.com/facebookresearch/detectron2/releases),
+uninstall and reinstall the correct pre-built detectron2 that matches pytorch version.
+
+If the error comes from detectron2 or torchvision that you built manually from source,
+remove files you built (`build/`, `**/*.so`) and rebuild it so it can pick up the version of pytorch currently in your environment.
+
+If the above instructions do not resolve this problem, please provide an environment (e.g. a dockerfile) that can reproduce the issue.
+
+
+
+
+Missing torch dynamic libraries, OR segmentation fault immediately when using detectron2.
+
+This usually happens when detectron2 or torchvision is not
+compiled with the version of PyTorch you're running. See the previous common issue for the solution.
+
+
+
+
+Undefined C++ symbols (e.g. "GLIBCXX..") or C++ symbols not found.
+
+
+Usually it's because the library is compiled with a newer C++ compiler but run with an old C++ runtime.
+
+This often happens with old anaconda.
+It may help to run `conda update libgcc` to upgrade its runtime.
+
+The fundamental solution is to avoid the mismatch, either by compiling using older version of C++
+compiler, or run the code with proper C++ runtime.
+To run the code with a specific C++ runtime, you can use environment variable `LD_PRELOAD=/path/to/libstdc++.so`.
+
+
+
+
+
+"nvcc not found" or "Not compiled with GPU support" or "Detectron2 CUDA Compiler: not available".
+
+
+CUDA is not found when building detectron2.
+You should make sure
+
+```
+python -c 'import torch; from torch.utils.cpp_extension import CUDA_HOME; print(torch.cuda.is_available(), CUDA_HOME)'
+```
+
+print `(True, a directory with cuda)` at the time you build detectron2.
+
+Most models can run inference (but not training) without GPU support. To use CPUs, set `MODEL.DEVICE='cpu'` in the config.
+
+
+
+
+"invalid device function" or "no kernel image is available for execution".
+
+
+Two possibilities:
+
+* You build detectron2 with one version of CUDA but run it with a different version.
+
+ To check whether it is the case,
+ use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions.
+ In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA"
+ to contain cuda libraries of the same version.
+
+ When they are inconsistent,
+ you need to either install a different build of PyTorch (or build by yourself)
+ to match your local CUDA installation, or install a different version of CUDA to match PyTorch.
+
+* PyTorch/torchvision/Detectron2 is not built for the correct GPU SM architecture (aka. compute capability).
+
+ The architecture included by PyTorch/detectron2/torchvision is available in the "architecture flags" in
+ `python -m detectron2.utils.collect_env`. It must include
+ the architecture of your GPU, which can be found at [developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus).
+
+ If you're using pre-built PyTorch/detectron2/torchvision, they have included support for most popular GPUs already.
+ If not supported, you need to build them from source.
+
+ When building detectron2/torchvision from source, they detect the GPU device and build for only the device.
+ This means the compiled code may not work on a different GPU device.
+ To recompile them for the correct architecture, remove all installed/compiled files,
+ and rebuild them with the `TORCH_CUDA_ARCH_LIST` environment variable set properly.
+ For example, `export TORCH_CUDA_ARCH_LIST="6.0;7.0"` makes it compile for both P100s and V100s.
+
+
+
+
+Undefined CUDA symbols; Cannot open libcudart.so
+
+
+The version of NVCC you use to build detectron2 or torchvision does
+not match the version of CUDA you are running with.
+This often happens when using anaconda's CUDA runtime.
+
+Use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions.
+In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA"
+to contain cuda libraries of the same version.
+
+When they are inconsistent,
+you need to either install a different build of PyTorch (or build by yourself)
+to match your local CUDA installation, or install a different version of CUDA to match PyTorch.
+
+
+
+
+
+C++ compilation errors from NVCC / NVRTC, or "Unsupported gpu architecture"
+
+
+A few possibilities:
+
+1. Local CUDA/NVCC version has to match the CUDA version of your PyTorch. Both can be found in `python collect_env.py`.
+ When they are inconsistent, you need to either install a different build of PyTorch (or build by yourself)
+ to match your local CUDA installation, or install a different version of CUDA to match PyTorch.
+
+2. Local CUDA/NVCC version shall support the SM architecture (a.k.a. compute capability) of your GPU.
+ The capability of your GPU can be found at [developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus).
+ The capability supported by NVCC is listed at [here](https://gist.github.com/ax3l/9489132).
+ If your NVCC version is too old, this can be workaround by setting environment variable
+ `TORCH_CUDA_ARCH_LIST` to a lower, supported capability.
+
+3. The combination of NVCC and GCC you use is incompatible. You need to change one of their versions.
+ See [here](https://gist.github.com/ax3l/9489132) for some valid combinations.
+ Notably, CUDA<=10.1.105 doesn't support GCC>7.3.
+
+ The CUDA/GCC version used by PyTorch can be found by `print(torch.__config__.show())`.
+
+
+
+
+
+
+"ImportError: cannot import name '_C'".
+
+
+Please build and install detectron2 following the instructions above.
+
+Or, if you are running code from detectron2's root directory, `cd` to a different one.
+Otherwise you may not import the code that you installed.
+
+
+
+
+
+Any issue on windows.
+
+
+
+Detectron2 is continuously built on windows with [CircleCI](https://app.circleci.com/pipelines/github/facebookresearch/detectron2?branch=main).
+However we do not provide official support for it.
+PRs that improves code compatibility on windows are welcome.
+
+
+
+
+ONNX conversion segfault after some "TraceWarning".
+
+
+The ONNX package is compiled with a too old compiler.
+
+Please build and install ONNX from its source code using a compiler
+whose version is closer to what's used by PyTorch (available in `torch.__config__.show()`).
+
+
+
+
+
+"library not found for -lstdc++" on older version of MacOS
+
+
+See
+[this stackoverflow answer](https://stackoverflow.com/questions/56083725/macos-build-issues-lstdc-not-found-while-building-python-package).
+
+
+
+
+### Installation inside specific environments:
+
+* __Colab__: see our [Colab Tutorial](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)
+ which has step-by-step instructions.
+
+* __Docker__: The official [Dockerfile](docker) installs detectron2 with a few simple commands.
+
diff --git a/model/vision/grit_src/third_party/CenterNet2/LICENSE b/model/vision/grit_src/third_party/CenterNet2/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..cd1b070674331757508398d99c830664dce6eaec
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction,
+and distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by
+the copyright owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all
+other entities that control, are controlled by, or are under common
+control with that entity. For the purposes of this definition,
+"control" means (i) the power, direct or indirect, to cause the
+direction or management of such entity, whether by contract or
+otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity
+exercising permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications,
+including but not limited to software source code, documentation
+source, and configuration files.
+
+"Object" form shall mean any form resulting from mechanical
+transformation or translation of a Source form, including but
+not limited to compiled object code, generated documentation,
+and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or
+Object form, made available under the License, as indicated by a
+copyright notice that is included in or attached to the work
+(an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object
+form, that is based on (or derived from) the Work and for which the
+editorial revisions, annotations, elaborations, or other modifications
+represent, as a whole, an original work of authorship. For the purposes
+of this License, Derivative Works shall not include works that remain
+separable from, or merely link (or bind by name) to the interfaces of,
+the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including
+the original version of the Work and any modifications or additions
+to that Work or Derivative Works thereof, that is intentionally
+submitted to Licensor for inclusion in the Work by the copyright owner
+or by an individual or Legal Entity authorized to submit on behalf of
+the copyright owner. For the purposes of this definition, "submitted"
+means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems,
+and issue tracking systems that are managed by, or on behalf of, the
+Licensor for the purpose of discussing and improving the Work, but
+excluding communication that is conspicuously marked or otherwise
+designated in writing by the copyright owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity
+on behalf of whom a Contribution has been received by Licensor and
+subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+this License, each Contributor hereby grants to You a perpetual,
+worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the
+Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+this License, each Contributor hereby grants to You a perpetual,
+worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+(except as stated in this section) patent license to make, have made,
+use, offer to sell, sell, import, and otherwise transfer the Work,
+where such license applies only to those patent claims licensable
+by such Contributor that are necessarily infringed by their
+Contribution(s) alone or by combination of their Contribution(s)
+with the Work to which such Contribution(s) was submitted. If You
+institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work
+or a Contribution incorporated within the Work constitutes direct
+or contributory patent infringement, then any patent licenses
+granted to You under this License for that Work shall terminate
+as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+Work or Derivative Works thereof in any medium, with or without
+modifications, and in Source or Object form, provided that You
+meet the following conditions:
+
+(a) You must give any other recipients of the Work or
+Derivative Works a copy of this License; and
+
+(b) You must cause any modified files to carry prominent notices
+stating that You changed the files; and
+
+(c) You must retain, in the Source form of any Derivative Works
+that You distribute, all copyright, patent, trademark, and
+attribution notices from the Source form of the Work,
+excluding those notices that do not pertain to any part of
+the Derivative Works; and
+
+(d) If the Work includes a "NOTICE" text file as part of its
+distribution, then any Derivative Works that You distribute must
+include a readable copy of the attribution notices contained
+within such NOTICE file, excluding those notices that do not
+pertain to any part of the Derivative Works, in at least one
+of the following places: within a NOTICE text file distributed
+as part of the Derivative Works; within the Source form or
+documentation, if provided along with the Derivative Works; or,
+within a display generated by the Derivative Works, if and
+wherever such third-party notices normally appear. The contents
+of the NOTICE file are for informational purposes only and
+do not modify the License. You may add Your own attribution
+notices within Derivative Works that You distribute, alongside
+or as an addendum to the NOTICE text from the Work, provided
+that such additional attribution notices cannot be construed
+as modifying the License.
+
+You may add Your own copyright statement to Your modifications and
+may provide additional or different license terms and conditions
+for use, reproduction, or distribution of Your modifications, or
+for any such Derivative Works as a whole, provided Your use,
+reproduction, and distribution of the Work otherwise complies with
+the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+any Contribution intentionally submitted for inclusion in the Work
+by You to the Licensor shall be under the terms and conditions of
+this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify
+the terms of any separate license agreement you may have executed
+with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+names, trademarks, service marks, or product names of the Licensor,
+except as required for reasonable and customary use in describing the
+origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+agreed to in writing, Licensor provides the Work (and each
+Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied, including, without limitation, any warranties or conditions
+of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+PARTICULAR PURPOSE. You are solely responsible for determining the
+appropriateness of using or redistributing the Work and assume any
+risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+whether in tort (including negligence), contract, or otherwise,
+unless required by applicable law (such as deliberate and grossly
+negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special,
+incidental, or consequential damages of any character arising as a
+result of this License or out of the use or inability to use the
+Work (including but not limited to damages for loss of goodwill,
+work stoppage, computer failure or malfunction, or any and all
+other commercial damages or losses), even if such Contributor
+has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+the Work or Derivative Works thereof, You may choose to offer,
+and charge a fee for, acceptance of support, warranty, indemnity,
+or other liability obligations and/or rights consistent with this
+License. However, in accepting such obligations, You may act only
+on Your own behalf and on Your sole responsibility, not on behalf
+of any other Contributor, and only if You agree to indemnify,
+defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason
+of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+To apply the Apache License to your work, attach the following
+boilerplate notice, with the fields enclosed by brackets "[]"
+replaced with your own identifying information. (Don't include
+the brackets!) The text should be enclosed in the appropriate
+comment syntax for the file format. We also recommend that a
+file or class name and description of purpose be included on the
+same "printed page" as the copyright notice for easier
+identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/model/vision/grit_src/third_party/CenterNet2/MODEL_ZOO.md b/model/vision/grit_src/third_party/CenterNet2/MODEL_ZOO.md
new file mode 100644
index 0000000000000000000000000000000000000000..69db2728563c680e89a0d5d3e6ba272b8d78bdbd
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/MODEL_ZOO.md
@@ -0,0 +1,1052 @@
+# Detectron2 Model Zoo and Baselines
+
+## Introduction
+
+This file documents a large collection of baselines trained
+with detectron2 in Sep-Oct, 2019.
+All numbers were obtained on [Big Basin](https://engineering.fb.com/data-center-engineering/introducing-big-basin-our-next-generation-ai-hardware/)
+servers with 8 NVIDIA V100 GPUs & NVLink. The speed numbers are periodically updated with latest PyTorch/CUDA/cuDNN versions.
+You can access these models from code using [detectron2.model_zoo](https://detectron2.readthedocs.io/modules/model_zoo.html) APIs.
+
+In addition to these official baseline models, you can find more models in [projects/](projects/).
+
+#### How to Read the Tables
+* The "Name" column contains a link to the config file. Models can be reproduced using `tools/train_net.py` with the corresponding yaml config file,
+ or `tools/lazyconfig_train_net.py` for python config files.
+* Training speed is averaged across the entire training.
+ We keep updating the speed with latest version of detectron2/pytorch/etc.,
+ so they might be different from the `metrics` file.
+ Training speed for multi-machine jobs is not provided.
+* Inference speed is measured by `tools/train_net.py --eval-only`, or [inference_on_dataset()](https://detectron2.readthedocs.io/modules/evaluation.html#detectron2.evaluation.inference_on_dataset),
+ with batch size 1 in detectron2 directly.
+ Measuring it with custom code may introduce other overhead.
+ Actual deployment in production should in general be faster than the given inference
+ speed due to more optimizations.
+* The *model id* column is provided for ease of reference.
+ To check downloaded file integrity, any model on this page contains its md5 prefix in its file name.
+* Training curves and other statistics can be found in `metrics` for each model.
+
+#### Common Settings for COCO Models
+* All COCO models were trained on `train2017` and evaluated on `val2017`.
+* The default settings are __not directly comparable__ with Detectron's standard settings.
+ For example, our default training data augmentation uses scale jittering in addition to horizontal flipping.
+
+ To make fair comparisons with Detectron's settings, see
+ [Detectron1-Comparisons](configs/Detectron1-Comparisons/) for accuracy comparison,
+ and [benchmarks](https://detectron2.readthedocs.io/notes/benchmarks.html)
+ for speed comparison.
+* For Faster/Mask R-CNN, we provide baselines based on __3 different backbone combinations__:
+ * __FPN__: Use a ResNet+FPN backbone with standard conv and FC heads for mask and box prediction,
+ respectively. It obtains the best
+ speed/accuracy tradeoff, but the other two are still useful for research.
+ * __C4__: Use a ResNet conv4 backbone with conv5 head. The original baseline in the Faster R-CNN paper.
+ * __DC5__ (Dilated-C5): Use a ResNet conv5 backbone with dilations in conv5, and standard conv and FC heads
+ for mask and box prediction, respectively.
+ This is used by the Deformable ConvNet paper.
+* Most models are trained with the 3x schedule (~37 COCO epochs).
+ Although 1x models are heavily under-trained, we provide some ResNet-50 models with the 1x (~12 COCO epochs)
+ training schedule for comparison when doing quick research iteration.
+
+#### ImageNet Pretrained Models
+
+It's common to initialize from backbone models pre-trained on ImageNet classification tasks. The following backbone models are available:
+
+* [R-50.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-50.pkl): converted copy of [MSRA's original ResNet-50](https://github.com/KaimingHe/deep-residual-networks) model.
+* [R-101.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-101.pkl): converted copy of [MSRA's original ResNet-101](https://github.com/KaimingHe/deep-residual-networks) model.
+* [X-101-32x8d.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/FAIR/X-101-32x8d.pkl): ResNeXt-101-32x8d model trained with Caffe2 at FB.
+* [R-50.pkl (torchvision)](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/torchvision/R-50.pkl): converted copy of [torchvision's ResNet-50](https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.resnet50) model.
+ More details can be found in [the conversion script](tools/convert-torchvision-to-d2.py).
+
+Note that the above models have __different__ format from those provided in Detectron: we do not fuse BatchNorm into an affine layer.
+Pretrained models in Detectron's format can still be used. For example:
+* [X-152-32x8d-IN5k.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl):
+ ResNeXt-152-32x8d model trained on ImageNet-5k with Caffe2 at FB (see ResNeXt paper for details on ImageNet-5k).
+* [R-50-GN.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/47261647/R-50-GN.pkl):
+ ResNet-50 with Group Normalization.
+* [R-101-GN.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/47592356/R-101-GN.pkl):
+ ResNet-101 with Group Normalization.
+
+These models require slightly different settings regarding normalization and architecture. See the model zoo configs for reference.
+
+#### License
+
+All models available for download through this document are licensed under the
+[Creative Commons Attribution-ShareAlike 3.0 license](https://creativecommons.org/licenses/by-sa/3.0/).
+
+### COCO Object Detection Baselines
+
+#### Faster R-CNN:
+
+
+
+
+
+#### RetinaNet:
+
+
+
+
+
+Name
+lr sched
+train time (s/iter)
+inference time (s/im)
+train mem (GB)
+box AP
+model id
+download
+
+
+ R50
+1x
+0.205
+0.041
+4.1
+37.4
+190397773
+model | metrics
+
+
+ R50
+3x
+0.205
+0.041
+4.1
+38.7
+190397829
+model | metrics
+
+
+ R101
+3x
+0.291
+0.054
+5.2
+40.4
+190397697
+model | metrics
+
+
+
+
+#### RPN & Fast R-CNN:
+
+
+
+
+### COCO Instance Segmentation Baselines with Mask R-CNN
+
+
+
+
+
+
+
+
+#### New baselines using Large-Scale Jitter and Longer Training Schedule
+
+The following baselines of COCO Instance Segmentation with Mask R-CNN are generated
+using a longer training schedule and large-scale jitter as described in Google's
+[Simple Copy-Paste Data Augmentation](https://arxiv.org/pdf/2012.07177.pdf) paper. These
+models are trained from scratch using random initialization. These baselines exceed the
+previous Mask R-CNN baselines.
+
+In the following table, one epoch consists of training on 118000 COCO images.
+
+
+
+### COCO Person Keypoint Detection Baselines with Keypoint R-CNN
+
+
+
+
+
+
+Name
+lr sched
+train time (s/iter)
+inference time (s/im)
+train mem (GB)
+box AP
+kp. AP
+model id
+download
+
+
+ R50-FPN
+1x
+0.315
+0.072
+5.0
+53.6
+64.0
+137261548
+model | metrics
+
+
+ R50-FPN
+3x
+0.316
+0.066
+5.0
+55.4
+65.5
+137849621
+model | metrics
+
+
+ R101-FPN
+3x
+0.390
+0.076
+6.1
+56.4
+66.1
+138363331
+model | metrics
+
+
+ X101-FPN
+3x
+0.738
+0.121
+8.7
+57.3
+66.0
+139686956
+model | metrics
+
+
+
+### COCO Panoptic Segmentation Baselines with Panoptic FPN
+
+
+
+
+
+
+Name
+lr sched
+train time (s/iter)
+inference time (s/im)
+train mem (GB)
+box AP
+mask AP
+PQ
+model id
+download
+
+
+ R50-FPN
+1x
+0.304
+0.053
+4.8
+37.6
+34.7
+39.4
+139514544
+model | metrics
+
+
+ R50-FPN
+3x
+0.302
+0.053
+4.8
+40.0
+36.5
+41.5
+139514569
+model | metrics
+
+
+ R101-FPN
+3x
+0.392
+0.066
+6.0
+42.4
+38.5
+43.0
+139514519
+model | metrics
+
+
+
+
+### LVIS Instance Segmentation Baselines with Mask R-CNN
+
+Mask R-CNN baselines on the [LVIS dataset](https://lvisdataset.org), v0.5.
+These baselines are described in Table 3(c) of the [LVIS paper](https://arxiv.org/abs/1908.03195).
+
+NOTE: the 1x schedule here has the same amount of __iterations__ as the COCO 1x baselines.
+They are roughly 24 epochs of LVISv0.5 data.
+The final results of these configs have large variance across different runs.
+
+
+
+
+
+
+
+Name
+lr sched
+train time (s/iter)
+inference time (s/im)
+train mem (GB)
+box AP
+mask AP
+model id
+download
+
+
+ R50-FPN
+1x
+0.292
+0.107
+7.1
+23.6
+24.4
+144219072
+model | metrics
+
+
+ R101-FPN
+1x
+0.371
+0.114
+7.8
+25.6
+25.9
+144219035
+model | metrics
+
+
+ X101-FPN
+1x
+0.712
+0.151
+10.2
+26.7
+27.1
+144219108
+model | metrics
+
+
+
+
+
+### Cityscapes & Pascal VOC Baselines
+
+Simple baselines for
+* Mask R-CNN on Cityscapes instance segmentation (initialized from COCO pre-training, then trained on Cityscapes fine annotations only)
+* Faster R-CNN on PASCAL VOC object detection (trained on VOC 2007 train+val + VOC 2012 train+val, tested on VOC 2007 using 11-point interpolated AP)
+
+
+
+
+
+
+
+Name
+train time (s/iter)
+inference time (s/im)
+train mem (GB)
+box AP
+box AP50
+mask AP
+model id
+download
+
+
+ R50-FPN, Cityscapes
+0.240
+0.078
+4.4
+
+
+36.5
+142423278
+model | metrics
+
+
+ R50-C4, VOC
+0.537
+0.081
+4.8
+51.9
+80.3
+
+142202221
+model | metrics
+
+
+
+
+
+### Other Settings
+
+Ablations for Deformable Conv and Cascade R-CNN:
+
+
+
+
+
+
+
+Ablations for normalization methods, and a few models trained from scratch following [Rethinking ImageNet Pre-training](https://arxiv.org/abs/1811.08883).
+(Note: The baseline uses `2fc` head while the others use [`4conv1fc` head](https://arxiv.org/abs/1803.08494))
+
+
+
+
+
+
+A few very large models trained for a long time, for demo purposes. They are trained using multiple machines:
+
+
+
+
+
+
+
+Name
+inference time (s/im)
+train mem (GB)
+box AP
+mask AP
+PQ
+model id
+download
+
+
+ Panoptic FPN R101
+0.098
+11.4
+47.4
+41.3
+46.1
+139797668
+model | metrics
+
+
+ Mask R-CNN X152
+0.234
+15.1
+50.2
+44.0
+
+18131413
+model | metrics
+
+
+ above + test-time aug.
+
+
+51.9
+45.9
+
+
+
+
+
diff --git a/model/vision/grit_src/third_party/CenterNet2/README.md b/model/vision/grit_src/third_party/CenterNet2/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d3e1d5cf533555e19c6326777f792ac82a560a84
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/README.md
@@ -0,0 +1,85 @@
+# Probabilistic two-stage detection
+Two-stage object detectors that use class-agnostic one-stage detectors as the proposal network.
+
+
+
+
+> [**Probabilistic two-stage detection**](http://arxiv.org/abs/2103.07461),
+> Xingyi Zhou, Vladlen Koltun, Philipp Krähenbühl,
+> *arXiv technical report ([arXiv 2103.07461](http://arxiv.org/abs/2103.07461))*
+
+Contact: [zhouxy@cs.utexas.edu](mailto:zhouxy@cs.utexas.edu). Any questions or discussions are welcomed!
+
+## Abstract
+
+We develop a probabilistic interpretation of two-stage object detection. We show that this probabilistic interpretation motivates a number of common empirical training practices. It also suggests changes to two-stage detection pipelines. Specifically, the first stage should infer proper object-vs-background likelihoods, which should then inform the overall score of the detector. A standard region proposal network (RPN) cannot infer this likelihood sufficiently well, but many one-stage detectors can. We show how to build a probabilistic two-stage detector from any state-of-the-art one-stage detector. The resulting detectors are faster and more accurate than both their one- and two-stage precursors. Our detector achieves 56.4 mAP on COCO test-dev with single-scale testing, outperforming all published results. Using a lightweight backbone, our detector achieves 49.2 mAP on COCO at 33 fps on a Titan Xp.
+
+## Summary
+
+- Two-stage CenterNet: First stage estimates object probabilities, second stage conditionally classifies objects.
+
+- Resulting detector is faster and more accurate than both traditional two-stage detectors (fewer proposals required), and one-stage detectors (lighter first stage head).
+
+- Our best model achieves 56.4 mAP on COCO test-dev.
+
+- This repo also includes a detectron2-based CenterNet implementation with better accuracy (42.5 mAP at 70FPS) and a new FPN version of CenterNet (40.2 mAP with Res50_1x).
+
+## Main results
+
+All models are trained with multi-scale training, and tested with a single scale. The FPS is tested on a Titan RTX GPU.
+More models and details can be found in the [MODEL_ZOO](projects/CenterNet2/centernet2_docs/MODEL_ZOO.md).
+
+#### COCO
+
+| Model | COCO val mAP | FPS |
+|-------------------------------------------|---------------|-------|
+| CenterNet-S4_DLA_8x | 42.5 | 71 |
+| CenterNet2_R50_1x | 42.9 | 24 |
+| CenterNet2_X101-DCN_2x | 49.9 | 8 |
+| CenterNet2_R2-101-DCN-BiFPN_4x+4x_1560_ST | 56.1 | 5 |
+| CenterNet2_DLA-BiFPN-P5_24x_ST | 49.2 | 38 |
+
+
+#### LVIS
+
+| Model | val mAP box |
+| ------------------------- | ----------- |
+| CenterNet2_R50_1x | 26.5 |
+| CenterNet2_FedLoss_R50_1x | 28.3 |
+
+
+#### Objects365
+
+| Model | val mAP |
+|-------------------------------------------|----------|
+| CenterNet2_R50_1x | 22.6 |
+
+## Installation
+
+Our project is developed on [detectron2](https://github.com/facebookresearch/detectron2). Please follow the official detectron2 [installation](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). All our code is under `projects/CenterNet2/`. In theory, you should be able to copy-paste `projects/CenterNet2/` to the latest detectron2 release or your own detectron2 repo to run our project. There might be API changes in future detectron2 releases that make it incompatible.
+
+We use the default detectron2 demo script. To run inference on an image folder using our pre-trained model, run
+
+~~~
+python projects/CenterNet2/demo/demo.py --config-file projects/CenterNet2/configs/CenterNet2_R50_1x.yaml --input path/to/image/ --opts MODEL.WEIGHTS models/CenterNet2_R50_1x.pth
+~~~
+
+## Benchmark evaluation and training
+
+Please check detectron2 [GETTING_STARTED.md](https://github.com/facebookresearch/detectron2/blob/master/GETTING_STARTED.md) for running evaluation and training. Our config files are under `projects/CenterNet2/configs` and the pre-trained models are in the [MODEL_ZOO](projects/CenterNet2/centernet2_docs/MODEL_ZOO.md).
+
+
+## License
+
+Our code under `projects/CenterNet2/` is under [Apache 2.0 license](projects/CenterNet2/LICENSE). `projects/CenterNet2/centernet/modeling/backbone/bifpn_fcos.py` are from [AdelaiDet](https://github.com/aim-uofa/AdelaiDet), which follows the original [non-commercial license](https://github.com/aim-uofa/AdelaiDet/blob/master/LICENSE). The code from detectron2 follows the original [Apache 2.0 license](LICENSE).
+
+## Citation
+
+If you find this project useful for your research, please use the following BibTeX entry.
+
+ @inproceedings{zhou2021probablistic,
+ title={Probabilistic two-stage detection},
+ author={Zhou, Xingyi and Koltun, Vladlen and Kr{\"a}henb{\"u}hl, Philipp},
+ booktitle={arXiv preprint arXiv:2103.07461},
+ year={2021}
+ }
diff --git a/model/vision/grit_src/third_party/CenterNet2/README_D2.md b/model/vision/grit_src/third_party/CenterNet2/README_D2.md
new file mode 100644
index 0000000000000000000000000000000000000000..a88ad7e21ce1d8651ec0d73848ce6dcd17f19d00
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/README_D2.md
@@ -0,0 +1,62 @@
+
+
+Detectron2 is Facebook AI Research's next generation software system
+that implements state-of-the-art object detection algorithms.
+It is a ground-up rewrite of the previous version,
+[Detectron](https://github.com/facebookresearch/Detectron/),
+and it originates from [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark/).
+
+
+
+
+
+### What's New
+* It is powered by the [PyTorch](https://pytorch.org) deep learning framework.
+* Includes more features such as panoptic segmentation, Densepose, Cascade R-CNN, rotated bounding boxes, PointRend,
+ DeepLab, etc.
+* Can be used as a library to support [different projects](projects/) on top of it.
+ We'll open source more research projects in this way.
+* It [trains much faster](https://detectron2.readthedocs.io/notes/benchmarks.html).
+* Models can be exported to TorchScript format or Caffe2 format for deployment.
+
+See our [blog post](https://ai.facebook.com/blog/-detectron2-a-pytorch-based-modular-object-detection-library-/)
+to see more demos and learn about detectron2.
+
+## Installation
+
+See [INSTALL.md](INSTALL.md).
+
+## Getting Started
+
+Follow the [installation instructions](https://detectron2.readthedocs.io/tutorials/install.html) to
+install detectron2.
+
+See [Getting Started with Detectron2](https://detectron2.readthedocs.io/tutorials/getting_started.html),
+and the [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)
+to learn about basic usage.
+
+Learn more at our [documentation](https://detectron2.readthedocs.org).
+And see [projects/](projects/) for some projects that are built on top of detectron2.
+
+## Model Zoo and Baselines
+
+We provide a large set of baseline results and trained models available for download in the [Detectron2 Model Zoo](MODEL_ZOO.md).
+
+
+## License
+
+Detectron2 is released under the [Apache 2.0 license](LICENSE).
+
+## Citing Detectron2
+
+If you use Detectron2 in your research or wish to refer to the baseline results published in the [Model Zoo](MODEL_ZOO.md), please use the following BibTeX entry.
+
+```BibTeX
+@misc{wu2019detectron2,
+ author = {Yuxin Wu and Alexander Kirillov and Francisco Massa and
+ Wan-Yen Lo and Ross Girshick},
+ title = {Detectron2},
+ howpublished = {\url{https://github.com/facebookresearch/detectron2}},
+ year = {2019}
+}
+```
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Base-RCNN-C4.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Base-RCNN-C4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fbf34a0ea57a587e09997edd94c4012d69d0b6ad
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Base-RCNN-C4.yaml
@@ -0,0 +1,18 @@
+MODEL:
+ META_ARCHITECTURE: "GeneralizedRCNN"
+ RPN:
+ PRE_NMS_TOPK_TEST: 6000
+ POST_NMS_TOPK_TEST: 1000
+ ROI_HEADS:
+ NAME: "Res5ROIHeads"
+DATASETS:
+ TRAIN: ("coco_2017_train",)
+ TEST: ("coco_2017_val",)
+SOLVER:
+ IMS_PER_BATCH: 16
+ BASE_LR: 0.02
+ STEPS: (60000, 80000)
+ MAX_ITER: 90000
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+VERSION: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Base-RCNN-DilatedC5.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Base-RCNN-DilatedC5.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c0d6d16bdaf532f09e4976f0aa240a49e748da27
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Base-RCNN-DilatedC5.yaml
@@ -0,0 +1,31 @@
+MODEL:
+ META_ARCHITECTURE: "GeneralizedRCNN"
+ RESNETS:
+ OUT_FEATURES: ["res5"]
+ RES5_DILATION: 2
+ RPN:
+ IN_FEATURES: ["res5"]
+ PRE_NMS_TOPK_TEST: 6000
+ POST_NMS_TOPK_TEST: 1000
+ ROI_HEADS:
+ NAME: "StandardROIHeads"
+ IN_FEATURES: ["res5"]
+ ROI_BOX_HEAD:
+ NAME: "FastRCNNConvFCHead"
+ NUM_FC: 2
+ POOLER_RESOLUTION: 7
+ ROI_MASK_HEAD:
+ NAME: "MaskRCNNConvUpsampleHead"
+ NUM_CONV: 4
+ POOLER_RESOLUTION: 14
+DATASETS:
+ TRAIN: ("coco_2017_train",)
+ TEST: ("coco_2017_val",)
+SOLVER:
+ IMS_PER_BATCH: 16
+ BASE_LR: 0.02
+ STEPS: (60000, 80000)
+ MAX_ITER: 90000
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+VERSION: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Base-RCNN-FPN.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Base-RCNN-FPN.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3e020f2e7b2f26765be317f907126a1556621abf
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Base-RCNN-FPN.yaml
@@ -0,0 +1,42 @@
+MODEL:
+ META_ARCHITECTURE: "GeneralizedRCNN"
+ BACKBONE:
+ NAME: "build_resnet_fpn_backbone"
+ RESNETS:
+ OUT_FEATURES: ["res2", "res3", "res4", "res5"]
+ FPN:
+ IN_FEATURES: ["res2", "res3", "res4", "res5"]
+ ANCHOR_GENERATOR:
+ SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map
+ ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps)
+ RPN:
+ IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"]
+ PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level
+ PRE_NMS_TOPK_TEST: 1000 # Per FPN level
+ # Detectron1 uses 2000 proposals per-batch,
+ # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue)
+ # which is approximately 1000 proposals per-image since the default batch size for FPN is 2.
+ POST_NMS_TOPK_TRAIN: 1000
+ POST_NMS_TOPK_TEST: 1000
+ ROI_HEADS:
+ NAME: "StandardROIHeads"
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
+ ROI_BOX_HEAD:
+ NAME: "FastRCNNConvFCHead"
+ NUM_FC: 2
+ POOLER_RESOLUTION: 7
+ ROI_MASK_HEAD:
+ NAME: "MaskRCNNConvUpsampleHead"
+ NUM_CONV: 4
+ POOLER_RESOLUTION: 14
+DATASETS:
+ TRAIN: ("coco_2017_train",)
+ TEST: ("coco_2017_val",)
+SOLVER:
+ IMS_PER_BATCH: 16
+ BASE_LR: 0.02
+ STEPS: (60000, 80000)
+ MAX_ITER: 90000
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+VERSION: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Base-RetinaNet.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Base-RetinaNet.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8b45b982bbf84b34d2a6a172ab0a946b1029f7c8
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Base-RetinaNet.yaml
@@ -0,0 +1,25 @@
+MODEL:
+ META_ARCHITECTURE: "RetinaNet"
+ BACKBONE:
+ NAME: "build_retinanet_resnet_fpn_backbone"
+ RESNETS:
+ OUT_FEATURES: ["res3", "res4", "res5"]
+ ANCHOR_GENERATOR:
+ SIZES: !!python/object/apply:eval ["[[x, x * 2**(1.0/3), x * 2**(2.0/3) ] for x in [32, 64, 128, 256, 512 ]]"]
+ FPN:
+ IN_FEATURES: ["res3", "res4", "res5"]
+ RETINANET:
+ IOU_THRESHOLDS: [0.4, 0.5]
+ IOU_LABELS: [0, -1, 1]
+ SMOOTH_L1_LOSS_BETA: 0.0
+DATASETS:
+ TRAIN: ("coco_2017_train",)
+ TEST: ("coco_2017_val",)
+SOLVER:
+ IMS_PER_BATCH: 16
+ BASE_LR: 0.01 # Note that RetinaNet uses a different default learning rate
+ STEPS: (60000, 80000)
+ MAX_ITER: 90000
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+VERSION: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..773ac10e87c626760d00d831bf664ce9ff073c49
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml
@@ -0,0 +1,17 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: False
+ LOAD_PROPOSALS: True
+ RESNETS:
+ DEPTH: 50
+ PROPOSAL_GENERATOR:
+ NAME: "PrecomputedProposals"
+DATASETS:
+ TRAIN: ("coco_2017_train",)
+ PROPOSAL_FILES_TRAIN: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_train_box_proposals_21bc3a.pkl", )
+ TEST: ("coco_2017_val",)
+ PROPOSAL_FILES_TEST: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", )
+DATALOADER:
+ # proposals are part of the dataset_dicts, and take a lot of RAM
+ NUM_WORKERS: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..db142cd671c1841b4f64cf130bee7f7954ecdd28
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-RCNN-C4.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 101
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bceb6b343618d8cd9a6c414ff9eb86ab31cc230a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-RCNN-DilatedC5.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 101
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..57a098f53ee8c54ecfa354cc96efefd890dc1b72
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 101
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f96130105c3ba6ab393e0932870903875f5cb732
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml
@@ -0,0 +1,6 @@
+_BASE_: "../Base-RCNN-C4.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 50
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bc51bce390a85ee3529ffdcebde05748e1646be0
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-RCNN-C4.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 50
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0fe96f57febdac5790ea4cec168fa4b97ac4807a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml
@@ -0,0 +1,6 @@
+_BASE_: "../Base-RCNN-DilatedC5.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 50
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..33fadeb87d1ef67ab2b55926b9a652ab4ac4a27d
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-RCNN-DilatedC5.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 50
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3262019a1211b910d3b371569199ed1afaacf6a4
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml
@@ -0,0 +1,6 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 50
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..41395182bf5c9dd8ab1241c4414068817298d554
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 50
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9c9b5ab77157baa581d90d9847c045c19ed6ffa3
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml
@@ -0,0 +1,13 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ MASK_ON: False
+ WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl"
+ PIXEL_STD: [57.375, 57.120, 58.395]
+ RESNETS:
+ STRIDE_IN_1X1: False # this is a C2 model
+ NUM_GROUPS: 32
+ WIDTH_PER_GROUP: 8
+ DEPTH: 101
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/fcos_R_50_FPN_1x.py b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/fcos_R_50_FPN_1x.py
new file mode 100644
index 0000000000000000000000000000000000000000..86f83c68786f5995c462ade5f3067072d69f047e
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/fcos_R_50_FPN_1x.py
@@ -0,0 +1,11 @@
+from ..common.optim import SGD as optimizer
+from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
+from ..common.data.coco import dataloader
+from ..common.models.fcos import model
+from ..common.train import train
+
+dataloader.train.mapper.use_instance_mask = False
+optimizer.lr = 0.01
+
+model.backbone.bottom_up.freeze_at = 2
+train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4abb1b9a547957aa6afc0b29129e00f89cf98d59
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml
@@ -0,0 +1,8 @@
+_BASE_: "../Base-RetinaNet.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_50_FPN_1x.py b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_50_FPN_1x.py
new file mode 100644
index 0000000000000000000000000000000000000000..43057a8eeed38c78183e26d21b74261eb4dbc1b9
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_50_FPN_1x.py
@@ -0,0 +1,11 @@
+from ..common.optim import SGD as optimizer
+from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
+from ..common.data.coco import dataloader
+from ..common.models.retinanet import model
+from ..common.train import train
+
+dataloader.train.mapper.use_instance_mask = False
+model.backbone.bottom_up.freeze_at = 2
+optimizer.lr = 0.01
+
+train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4a24ce3a9a108a8792e18c8aabfb7b712f0d3725
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml
@@ -0,0 +1,5 @@
+_BASE_: "../Base-RetinaNet.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3b5412d4a7aef1d6c3f7c1e34f94007de639b833
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml
@@ -0,0 +1,8 @@
+_BASE_: "../Base-RetinaNet.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/rpn_R_50_C4_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/rpn_R_50_C4_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e04821156b0376ba5215d5ce5b7010a36b43e6a1
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/rpn_R_50_C4_1x.yaml
@@ -0,0 +1,10 @@
+_BASE_: "../Base-RCNN-C4.yaml"
+MODEL:
+ META_ARCHITECTURE: "ProposalNetwork"
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 50
+ RPN:
+ PRE_NMS_TOPK_TEST: 12000
+ POST_NMS_TOPK_TEST: 2000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/rpn_R_50_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/rpn_R_50_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dc9c95203b1c3c9cd9bb9876bb8d9a5dd9b31d9a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Detection/rpn_R_50_FPN_1x.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ META_ARCHITECTURE: "ProposalNetwork"
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 50
+ RPN:
+ POST_NMS_TOPK_TEST: 2000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1a94cc45a0f2aaa8c92e14871c553b736545e327
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-RCNN-C4.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 101
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..67b70cf4be8c19f5dc735b6f55a8690698f34b69
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-RCNN-DilatedC5.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 101
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1935a302d2d0fa7f69553b3fd50b5a7082c6c0d1
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 101
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.py b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.py
new file mode 100644
index 0000000000000000000000000000000000000000..22016be150df4abbe912700d7ca29f8b7b72554a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.py
@@ -0,0 +1,8 @@
+from ..common.train import train
+from ..common.optim import SGD as optimizer
+from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
+from ..common.data.coco import dataloader
+from ..common.models.mask_rcnn_c4 import model
+
+model.backbone.freeze_at = 2
+train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a9aeb4eac38026dbb867e799f9fd3a8d8eb3af80
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml
@@ -0,0 +1,6 @@
+_BASE_: "../Base-RCNN-C4.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..38ed867d897dfec839cbcf11a2e2dc8abb92f07c
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-RCNN-C4.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b13eefab2a049c48d94d5051c82ceb6dbde40579
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml
@@ -0,0 +1,6 @@
+_BASE_: "../Base-RCNN-DilatedC5.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d401016358f967f6619d88b1c9bd5673a1cdeba8
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-RCNN-DilatedC5.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py
new file mode 100644
index 0000000000000000000000000000000000000000..40844ddeb8d47ff58a6af49ab35bad84e14f5721
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py
@@ -0,0 +1,8 @@
+from ..common.optim import SGD as optimizer
+from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
+from ..common.data.coco import dataloader
+from ..common.models.mask_rcnn_fpn import model
+from ..common.train import train
+
+model.backbone.bottom_up.freeze_at = 2
+train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d50fb866ca7811a87b42555c7213f88e00bf6df1
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
@@ -0,0 +1,6 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x_giou.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x_giou.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bec680ee17a474fefe527b7b79d26266e75c09f0
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x_giou.yaml
@@ -0,0 +1,12 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+ RPN:
+ BBOX_REG_LOSS_TYPE: "giou"
+ BBOX_REG_LOSS_WEIGHT: 2.0
+ ROI_BOX_HEAD:
+ BBOX_REG_LOSS_TYPE: "giou"
+ BBOX_REG_LOSS_WEIGHT: 10.0
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..be7d06b8e0f032ee7fcaabd7c122158518489fd2
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d14c63f74383bfc308750f51d51344398b02a239
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml
@@ -0,0 +1,13 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ MASK_ON: True
+ WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl"
+ PIXEL_STD: [57.375, 57.120, 58.395]
+ RESNETS:
+ STRIDE_IN_1X1: False # this is a C2 model
+ NUM_GROUPS: 32
+ WIDTH_PER_GROUP: 8
+ DEPTH: 101
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7bbdd7d00505f1e51154379c99ab621cb648a6d
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py
@@ -0,0 +1,34 @@
+from ..common.optim import SGD as optimizer
+from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
+from ..common.data.coco import dataloader
+from ..common.models.mask_rcnn_fpn import model
+from ..common.train import train
+
+from detectron2.config import LazyCall as L
+from detectron2.modeling.backbone import RegNet
+from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
+
+
+# Replace default ResNet with RegNetX-4GF from the DDS paper. Config source:
+# https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnetx/RegNetX-4.0GF_dds_8gpu.yaml#L4-L9 # noqa
+model.backbone.bottom_up = L(RegNet)(
+ stem_class=SimpleStem,
+ stem_width=32,
+ block_class=ResBottleneckBlock,
+ depth=23,
+ w_a=38.65,
+ w_0=96,
+ w_m=2.43,
+ group_width=40,
+ freeze_at=2,
+ norm="FrozenBN",
+ out_features=["s1", "s2", "s3", "s4"],
+)
+model.pixel_std = [57.375, 57.120, 58.395]
+
+optimizer.weight_decay = 5e-5
+train.init_checkpoint = (
+ "https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906383/RegNetX-4.0GF_dds_8gpu.pyth"
+)
+# RegNets benefit from enabling cudnn benchmark mode
+train.cudnn_benchmark = True
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py
new file mode 100644
index 0000000000000000000000000000000000000000..72c6b7a5c8939970bd0e1e4a3c1155695943b19a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py
@@ -0,0 +1,35 @@
+from ..common.optim import SGD as optimizer
+from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
+from ..common.data.coco import dataloader
+from ..common.models.mask_rcnn_fpn import model
+from ..common.train import train
+
+from detectron2.config import LazyCall as L
+from detectron2.modeling.backbone import RegNet
+from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
+
+
+# Replace default ResNet with RegNetY-4GF from the DDS paper. Config source:
+# https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnety/RegNetY-4.0GF_dds_8gpu.yaml#L4-L10 # noqa
+model.backbone.bottom_up = L(RegNet)(
+ stem_class=SimpleStem,
+ stem_width=32,
+ block_class=ResBottleneckBlock,
+ depth=22,
+ w_a=31.41,
+ w_0=96,
+ w_m=2.24,
+ group_width=64,
+ se_ratio=0.25,
+ freeze_at=2,
+ norm="FrozenBN",
+ out_features=["s1", "s2", "s3", "s4"],
+)
+model.pixel_std = [57.375, 57.120, 58.395]
+
+optimizer.weight_decay = 5e-5
+train.init_checkpoint = (
+ "https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906838/RegNetY-4.0GF_dds_8gpu.pyth"
+)
+# RegNets benefit from enabling cudnn benchmark mode
+train.cudnn_benchmark = True
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e03944a42d2e497da5ceca17c8fda797dac3f82
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml
@@ -0,0 +1,15 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ KEYPOINT_ON: True
+ ROI_HEADS:
+ NUM_CLASSES: 1
+ ROI_BOX_HEAD:
+ SMOOTH_L1_BETA: 0.5 # Keypoint AP degrades (though box AP improves) when using plain L1 loss
+ RPN:
+ # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2.
+ # 1000 proposals per-image is found to hurt box AP.
+ # Therefore we increase it to 1500 per-image.
+ POST_NMS_TOPK_TRAIN: 1500
+DATASETS:
+ TRAIN: ("keypoints_coco_2017_train",)
+ TEST: ("keypoints_coco_2017_val",)
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9309535c57a1aa7d23297aac80a9bd78a6c79fcc
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml
@@ -0,0 +1,8 @@
+_BASE_: "Base-Keypoint-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py
new file mode 100644
index 0000000000000000000000000000000000000000..1aad53bfef62fb584d5022585d567e346f671a55
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py
@@ -0,0 +1,8 @@
+from ..common.optim import SGD as optimizer
+from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
+from ..common.data.coco_keypoint import dataloader
+from ..common.models.keypoint_rcnn_fpn import model
+from ..common.train import train
+
+model.backbone.bottom_up.freeze_at = 2
+train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7bf85cf745b53b3e7ab28fe94b7f4f9e7fe6e335
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml
@@ -0,0 +1,5 @@
+_BASE_: "Base-Keypoint-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a07f243f650a497b9372501e3face75194cf0941
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml
@@ -0,0 +1,8 @@
+_BASE_: "Base-Keypoint-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d4bfa20a98c0a65c6bd60e93b07e8f4b7d92a867
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml
@@ -0,0 +1,12 @@
+_BASE_: "Base-Keypoint-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl"
+ PIXEL_STD: [57.375, 57.120, 58.395]
+ RESNETS:
+ STRIDE_IN_1X1: False # this is a C2 model
+ NUM_GROUPS: 32
+ WIDTH_PER_GROUP: 8
+ DEPTH: 101
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f00d54b760c2b9271c75643e0a1ab1ffc0d9543a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml
@@ -0,0 +1,11 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ META_ARCHITECTURE: "PanopticFPN"
+ MASK_ON: True
+ SEM_SEG_HEAD:
+ LOSS_WEIGHT: 0.5
+DATASETS:
+ TRAIN: ("coco_2017_train_panoptic_separated",)
+ TEST: ("coco_2017_val_panoptic_separated",)
+DATALOADER:
+ FILTER_EMPTY_ANNOTATIONS: False
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0e01f6fb31e9b00b1857b7de3b5074184d1f4a21
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml
@@ -0,0 +1,8 @@
+_BASE_: "Base-Panoptic-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py
new file mode 100644
index 0000000000000000000000000000000000000000..40cf18131810307157a9a7d1f6d5922b00fd73d5
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.py
@@ -0,0 +1,8 @@
+from ..common.optim import SGD as optimizer
+from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
+from ..common.data.coco_panoptic_separated import dataloader
+from ..common.models.panoptic_fpn import model
+from ..common.train import train
+
+model.backbone.bottom_up.freeze_at = 2
+train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6afa2c1cc92495309ed1553a17359fe5d7d6566e
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml
@@ -0,0 +1,5 @@
+_BASE_: "Base-Panoptic-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b956b3f673e78649184fe2c50e2700b3f1f14794
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml
@@ -0,0 +1,8 @@
+_BASE_: "Base-Panoptic-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1a7aaeb961581ed9492c4cfe5a69a1eb60495b3e
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml
@@ -0,0 +1,27 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ # WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ # For better, more stable performance initialize from COCO
+ WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"
+ MASK_ON: True
+ ROI_HEADS:
+ NUM_CLASSES: 8
+# This is similar to the setting used in Mask R-CNN paper, Appendix A
+# But there are some differences, e.g., we did not initialize the output
+# layer using the corresponding classes from COCO
+INPUT:
+ MIN_SIZE_TRAIN: (800, 832, 864, 896, 928, 960, 992, 1024)
+ MIN_SIZE_TRAIN_SAMPLING: "choice"
+ MIN_SIZE_TEST: 1024
+ MAX_SIZE_TRAIN: 2048
+ MAX_SIZE_TEST: 2048
+DATASETS:
+ TRAIN: ("cityscapes_fine_instance_seg_train",)
+ TEST: ("cityscapes_fine_instance_seg_val",)
+SOLVER:
+ BASE_LR: 0.01
+ STEPS: (18000,)
+ MAX_ITER: 24000
+ IMS_PER_BATCH: 8
+TEST:
+ EVAL_PERIOD: 8000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Detectron1-Comparisons/README.md b/model/vision/grit_src/third_party/CenterNet2/configs/Detectron1-Comparisons/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..924fd00af642ddf1a4ff4c4f5947f676134eb7de
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Detectron1-Comparisons/README.md
@@ -0,0 +1,84 @@
+
+Detectron2 model zoo's experimental settings and a few implementation details are different from Detectron.
+
+The differences in implementation details are shared in
+[Compatibility with Other Libraries](../../docs/notes/compatibility.md).
+
+The differences in model zoo's experimental settings include:
+* Use scale augmentation during training. This improves AP with lower training cost.
+* Use L1 loss instead of smooth L1 loss for simplicity. This sometimes improves box AP but may
+ affect other AP.
+* Use `POOLER_SAMPLING_RATIO=0` instead of 2. This does not significantly affect AP.
+* Use `ROIAlignV2`. This does not significantly affect AP.
+
+In this directory, we provide a few configs that __do not__ have the above changes.
+They mimic Detectron's behavior as close as possible,
+and provide a fair comparison of accuracy and speed against Detectron.
+
+
+
+
+
+
+
+Name
+lr sched
+train time (s/iter)
+inference time (s/im)
+train mem (GB)
+box AP
+mask AP
+kp. AP
+model id
+download
+
+
+ Faster R-CNN
+1x
+0.219
+0.038
+3.1
+36.9
+
+
+137781054
+model | metrics
+
+
+ Keypoint R-CNN
+1x
+0.313
+0.071
+5.0
+53.1
+
+64.2
+137781195
+model | metrics
+
+
+ Mask R-CNN
+1x
+0.273
+0.043
+3.4
+37.8
+34.9
+
+137781281
+model | metrics
+
+
+
+## Comparisons:
+
+* Faster R-CNN: Detectron's AP is 36.7, similar to ours.
+* Keypoint R-CNN: Detectron's AP is box 53.6, keypoint 64.2. Fixing a Detectron's
+ [bug](https://github.com/facebookresearch/Detectron/issues/459) lead to a drop in box AP, and can be
+ compensated back by some parameter tuning.
+* Mask R-CNN: Detectron's AP is box 37.7, mask 33.9. We're 1 AP better in mask AP, due to more correct implementation.
+ See [this article](https://ppwwyyxx.com/blog/2021/Where-are-Pixels/) for details.
+
+For speed comparison, see [benchmarks](https://detectron2.readthedocs.io/notes/benchmarks.html).
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6ce77f137fa2c4e5254a62b58c18b8b76096f2aa
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml
@@ -0,0 +1,17 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 50
+ # Detectron1 uses smooth L1 loss with some magic beta values.
+ # The defaults are changed to L1 loss in Detectron2.
+ RPN:
+ SMOOTH_L1_BETA: 0.1111
+ ROI_BOX_HEAD:
+ SMOOTH_L1_BETA: 1.0
+ POOLER_SAMPLING_RATIO: 2
+ POOLER_TYPE: "ROIAlign"
+INPUT:
+ # no scale augmentation
+ MIN_SIZE_TRAIN: (800, )
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..aacf868ba5290c752031c130a2081af48afc0808
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml
@@ -0,0 +1,27 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ KEYPOINT_ON: True
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 1
+ ROI_KEYPOINT_HEAD:
+ POOLER_RESOLUTION: 14
+ POOLER_SAMPLING_RATIO: 2
+ POOLER_TYPE: "ROIAlign"
+ # Detectron1 uses smooth L1 loss with some magic beta values.
+ # The defaults are changed to L1 loss in Detectron2.
+ ROI_BOX_HEAD:
+ SMOOTH_L1_BETA: 1.0
+ POOLER_SAMPLING_RATIO: 2
+ POOLER_TYPE: "ROIAlign"
+ RPN:
+ SMOOTH_L1_BETA: 0.1111
+ # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2
+ # 1000 proposals per-image is found to hurt box AP.
+ # Therefore we increase it to 1500 per-image.
+ POST_NMS_TOPK_TRAIN: 1500
+DATASETS:
+ TRAIN: ("keypoints_coco_2017_train",)
+ TEST: ("keypoints_coco_2017_val",)
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4ea86a8d8e2cd3e51cbc7311b0d00710c07d01f6
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml
@@ -0,0 +1,20 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+ # Detectron1 uses smooth L1 loss with some magic beta values.
+ # The defaults are changed to L1 loss in Detectron2.
+ RPN:
+ SMOOTH_L1_BETA: 0.1111
+ ROI_BOX_HEAD:
+ SMOOTH_L1_BETA: 1.0
+ POOLER_SAMPLING_RATIO: 2
+ POOLER_TYPE: "ROIAlign"
+ ROI_MASK_HEAD:
+ POOLER_SAMPLING_RATIO: 2
+ POOLER_TYPE: "ROIAlign"
+INPUT:
+ # no scale augmentation
+ MIN_SIZE_TRAIN: (800, )
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f0c3a1bbc0a09e1384de522f30c443ba1e36fafa
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml
@@ -0,0 +1,19 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 101
+ ROI_HEADS:
+ NUM_CLASSES: 1230
+ SCORE_THRESH_TEST: 0.0001
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+DATASETS:
+ TRAIN: ("lvis_v0.5_train",)
+ TEST: ("lvis_v0.5_val",)
+TEST:
+ DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300
+DATALOADER:
+ SAMPLER_TRAIN: "RepeatFactorTrainingSampler"
+ REPEAT_THRESHOLD: 0.001
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..64b4caa4ef2b284782367ea702e1ae6653472630
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
@@ -0,0 +1,19 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 1230
+ SCORE_THRESH_TEST: 0.0001
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+DATASETS:
+ TRAIN: ("lvis_v0.5_train",)
+ TEST: ("lvis_v0.5_val",)
+TEST:
+ DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300
+DATALOADER:
+ SAMPLER_TRAIN: "RepeatFactorTrainingSampler"
+ REPEAT_THRESHOLD: 0.001
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c8b822c6c006ba642f4caf9b55e7983f6797427a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml
@@ -0,0 +1,23 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl"
+ PIXEL_STD: [57.375, 57.120, 58.395]
+ MASK_ON: True
+ RESNETS:
+ STRIDE_IN_1X1: False # this is a C2 model
+ NUM_GROUPS: 32
+ WIDTH_PER_GROUP: 8
+ DEPTH: 101
+ ROI_HEADS:
+ NUM_CLASSES: 1230
+ SCORE_THRESH_TEST: 0.0001
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+DATASETS:
+ TRAIN: ("lvis_v0.5_train",)
+ TEST: ("lvis_v0.5_val",)
+TEST:
+ DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300
+DATALOADER:
+ SAMPLER_TRAIN: "RepeatFactorTrainingSampler"
+ REPEAT_THRESHOLD: 0.001
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/LVISv1-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/LVISv1-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ca4dd97144561276ecaabbb6c254e3a7737ac157
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/LVISv1-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml
@@ -0,0 +1,22 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 101
+ ROI_HEADS:
+ NUM_CLASSES: 1203
+ SCORE_THRESH_TEST: 0.0001
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+DATASETS:
+ TRAIN: ("lvis_v1_train",)
+ TEST: ("lvis_v1_val",)
+TEST:
+ DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300
+SOLVER:
+ STEPS: (120000, 160000)
+ MAX_ITER: 180000 # 180000 * 16 / 100000 ~ 28.8 epochs
+DATALOADER:
+ SAMPLER_TRAIN: "RepeatFactorTrainingSampler"
+ REPEAT_THRESHOLD: 0.001
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/LVISv1-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/LVISv1-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f313295ee5f0d553d394ce2efe003810c79af47d
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/LVISv1-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
@@ -0,0 +1,22 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 1203
+ SCORE_THRESH_TEST: 0.0001
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+DATASETS:
+ TRAIN: ("lvis_v1_train",)
+ TEST: ("lvis_v1_val",)
+TEST:
+ DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300
+SOLVER:
+ STEPS: (120000, 160000)
+ MAX_ITER: 180000 # 180000 * 16 / 100000 ~ 28.8 epochs
+DATALOADER:
+ SAMPLER_TRAIN: "RepeatFactorTrainingSampler"
+ REPEAT_THRESHOLD: 0.001
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/LVISv1-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/LVISv1-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f6528f7c31c8cfbf139c14fd0cae598592d8e898
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/LVISv1-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml
@@ -0,0 +1,26 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl"
+ PIXEL_STD: [57.375, 57.120, 58.395]
+ MASK_ON: True
+ RESNETS:
+ STRIDE_IN_1X1: False # this is a C2 model
+ NUM_GROUPS: 32
+ WIDTH_PER_GROUP: 8
+ DEPTH: 101
+ ROI_HEADS:
+ NUM_CLASSES: 1203
+ SCORE_THRESH_TEST: 0.0001
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+DATASETS:
+ TRAIN: ("lvis_v1_train",)
+ TEST: ("lvis_v1_val",)
+SOLVER:
+ STEPS: (120000, 160000)
+ MAX_ITER: 180000 # 180000 * 16 / 100000 ~ 28.8 epochs
+TEST:
+ DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300
+DATALOADER:
+ SAMPLER_TRAIN: "RepeatFactorTrainingSampler"
+ REPEAT_THRESHOLD: 0.001
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..abb33b618932e94b66239945ac892f4c84a6e8f8
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml
@@ -0,0 +1,12 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NAME: CascadeROIHeads
+ ROI_BOX_HEAD:
+ CLS_AGNOSTIC_BBOX_REG: True
+ RPN:
+ POST_NMS_TOPK_TRAIN: 2000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e2201ad5c46ded91ccfa47b7698a521625c5e447
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml
@@ -0,0 +1,15 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NAME: CascadeROIHeads
+ ROI_BOX_HEAD:
+ CLS_AGNOSTIC_BBOX_REG: True
+ RPN:
+ POST_NMS_TOPK_TRAIN: 2000
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fc117f6b5e3e51558ec2f01b73c5365622e5ce25
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml
@@ -0,0 +1,36 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ MASK_ON: True
+ WEIGHTS: "catalog://ImageNetPretrained/FAIR/X-152-32x8d-IN5k"
+ RESNETS:
+ STRIDE_IN_1X1: False # this is a C2 model
+ NUM_GROUPS: 32
+ WIDTH_PER_GROUP: 8
+ DEPTH: 152
+ DEFORM_ON_PER_STAGE: [False, True, True, True]
+ ROI_HEADS:
+ NAME: "CascadeROIHeads"
+ ROI_BOX_HEAD:
+ NAME: "FastRCNNConvFCHead"
+ NUM_CONV: 4
+ NUM_FC: 1
+ NORM: "GN"
+ CLS_AGNOSTIC_BBOX_REG: True
+ ROI_MASK_HEAD:
+ NUM_CONV: 8
+ NORM: "GN"
+ RPN:
+ POST_NMS_TOPK_TRAIN: 2000
+SOLVER:
+ IMS_PER_BATCH: 128
+ STEPS: (35000, 45000)
+ MAX_ITER: 50000
+ BASE_LR: 0.16
+INPUT:
+ MIN_SIZE_TRAIN: (640, 864)
+ MIN_SIZE_TRAIN_SAMPLING: "range"
+ MAX_SIZE_TRAIN: 1440
+ CROP:
+ ENABLED: True
+TEST:
+ EVAL_PERIOD: 2500
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4c3b767ff473bbab7225cc8a4a92608543d78246
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml
@@ -0,0 +1,10 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+ ROI_BOX_HEAD:
+ CLS_AGNOSTIC_BBOX_REG: True
+ ROI_MASK_HEAD:
+ CLS_AGNOSTIC_MASK: True
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..04ff988d073ef9169ee4ca2cbce0d6f030c15232
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml
@@ -0,0 +1,8 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+ DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3,Res4,Res5
+ DEFORM_MODULATED: False
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..68c0ca58d7df97ca728c339da0ca9828fe6be318
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml
@@ -0,0 +1,11 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+ DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3,Res4,Res5
+ DEFORM_MODULATED: False
+SOLVER:
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..74d274e5a529b5a8afe186940868f9d48c6112b3
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml
@@ -0,0 +1,21 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "catalog://ImageNetPretrained/FAIR/R-50-GN"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+ NORM: "GN"
+ STRIDE_IN_1X1: False
+ FPN:
+ NORM: "GN"
+ ROI_BOX_HEAD:
+ NAME: "FastRCNNConvFCHead"
+ NUM_CONV: 4
+ NUM_FC: 1
+ NORM: "GN"
+ ROI_MASK_HEAD:
+ NORM: "GN"
+SOLVER:
+ # 3x schedule
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..11ebb076ba529f26c71a0d972e96ca4c2d6a830b
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml
@@ -0,0 +1,24 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+ NORM: "SyncBN"
+ STRIDE_IN_1X1: True
+ FPN:
+ NORM: "SyncBN"
+ ROI_BOX_HEAD:
+ NAME: "FastRCNNConvFCHead"
+ NUM_CONV: 4
+ NUM_FC: 1
+ NORM: "SyncBN"
+ ROI_MASK_HEAD:
+ NORM: "SyncBN"
+SOLVER:
+ # 3x schedule
+ STEPS: (210000, 250000)
+ MAX_ITER: 270000
+TEST:
+ PRECISE_BN:
+ ENABLED: True
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f2464be744c083985898a25f9e71d00104f689d
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py
@@ -0,0 +1,151 @@
+# An example config to train a mmdetection model using detectron2.
+
+from ..common.data.coco import dataloader
+from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
+from ..common.optim import SGD as optimizer
+from ..common.train import train
+
+from detectron2.modeling.mmdet_wrapper import MMDetDetector
+from detectron2.config import LazyCall as L
+
+model = L(MMDetDetector)(
+ detector=dict(
+ type="MaskRCNN",
+ pretrained="torchvision://resnet50",
+ backbone=dict(
+ type="ResNet",
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ frozen_stages=1,
+ norm_cfg=dict(type="BN", requires_grad=True),
+ norm_eval=True,
+ style="pytorch",
+ ),
+ neck=dict(type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5),
+ rpn_head=dict(
+ type="RPNHead",
+ in_channels=256,
+ feat_channels=256,
+ anchor_generator=dict(
+ type="AnchorGenerator",
+ scales=[8],
+ ratios=[0.5, 1.0, 2.0],
+ strides=[4, 8, 16, 32, 64],
+ ),
+ bbox_coder=dict(
+ type="DeltaXYWHBBoxCoder",
+ target_means=[0.0, 0.0, 0.0, 0.0],
+ target_stds=[1.0, 1.0, 1.0, 1.0],
+ ),
+ loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0),
+ loss_bbox=dict(type="L1Loss", loss_weight=1.0),
+ ),
+ roi_head=dict(
+ type="StandardRoIHead",
+ bbox_roi_extractor=dict(
+ type="SingleRoIExtractor",
+ roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0),
+ out_channels=256,
+ featmap_strides=[4, 8, 16, 32],
+ ),
+ bbox_head=dict(
+ type="Shared2FCBBoxHead",
+ in_channels=256,
+ fc_out_channels=1024,
+ roi_feat_size=7,
+ num_classes=80,
+ bbox_coder=dict(
+ type="DeltaXYWHBBoxCoder",
+ target_means=[0.0, 0.0, 0.0, 0.0],
+ target_stds=[0.1, 0.1, 0.2, 0.2],
+ ),
+ reg_class_agnostic=False,
+ loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0),
+ loss_bbox=dict(type="L1Loss", loss_weight=1.0),
+ ),
+ mask_roi_extractor=dict(
+ type="SingleRoIExtractor",
+ roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0),
+ out_channels=256,
+ featmap_strides=[4, 8, 16, 32],
+ ),
+ mask_head=dict(
+ type="FCNMaskHead",
+ num_convs=4,
+ in_channels=256,
+ conv_out_channels=256,
+ num_classes=80,
+ loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0),
+ ),
+ ),
+ # model training and testing settings
+ train_cfg=dict(
+ rpn=dict(
+ assigner=dict(
+ type="MaxIoUAssigner",
+ pos_iou_thr=0.7,
+ neg_iou_thr=0.3,
+ min_pos_iou=0.3,
+ match_low_quality=True,
+ ignore_iof_thr=-1,
+ ),
+ sampler=dict(
+ type="RandomSampler",
+ num=256,
+ pos_fraction=0.5,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=False,
+ ),
+ allowed_border=-1,
+ pos_weight=-1,
+ debug=False,
+ ),
+ rpn_proposal=dict(
+ nms_pre=2000,
+ max_per_img=1000,
+ nms=dict(type="nms", iou_threshold=0.7),
+ min_bbox_size=0,
+ ),
+ rcnn=dict(
+ assigner=dict(
+ type="MaxIoUAssigner",
+ pos_iou_thr=0.5,
+ neg_iou_thr=0.5,
+ min_pos_iou=0.5,
+ match_low_quality=True,
+ ignore_iof_thr=-1,
+ ),
+ sampler=dict(
+ type="RandomSampler",
+ num=512,
+ pos_fraction=0.25,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True,
+ ),
+ mask_size=28,
+ pos_weight=-1,
+ debug=False,
+ ),
+ ),
+ test_cfg=dict(
+ rpn=dict(
+ nms_pre=1000,
+ max_per_img=1000,
+ nms=dict(type="nms", iou_threshold=0.7),
+ min_bbox_size=0,
+ ),
+ rcnn=dict(
+ score_thr=0.05,
+ nms=dict(type="nms", iou_threshold=0.5),
+ max_per_img=100,
+ mask_thr_binary=0.5,
+ ),
+ ),
+ ),
+ pixel_mean=[123.675, 116.280, 103.530],
+ pixel_std=[58.395, 57.120, 57.375],
+)
+
+dataloader.train.mapper.image_format = "RGB" # torchvision pretrained model
+train.init_checkpoint = None # pretrained model is loaded inside backbone
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..34016cea3ca9d7fb69ef4fe01d6b47ee8690a13b
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml
@@ -0,0 +1,26 @@
+# A large PanopticFPN for demo purposes.
+# Use GN on backbone to support semantic seg.
+# Use Cascade + Deform Conv to improve localization.
+_BASE_: "../COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml"
+MODEL:
+ WEIGHTS: "catalog://ImageNetPretrained/FAIR/R-101-GN"
+ RESNETS:
+ DEPTH: 101
+ NORM: "GN"
+ DEFORM_ON_PER_STAGE: [False, True, True, True]
+ STRIDE_IN_1X1: False
+ FPN:
+ NORM: "GN"
+ ROI_HEADS:
+ NAME: CascadeROIHeads
+ ROI_BOX_HEAD:
+ CLS_AGNOSTIC_BBOX_REG: True
+ ROI_MASK_HEAD:
+ NORM: "GN"
+ RPN:
+ POST_NMS_TOPK_TRAIN: 2000
+SOLVER:
+ STEPS: (105000, 125000)
+ MAX_ITER: 135000
+ IMS_PER_BATCH: 32
+ BASE_LR: 0.04
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f3400288cde242fcf66eef7f63b5a9165ca663c5
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml
@@ -0,0 +1,13 @@
+_BASE_: "mask_rcnn_R_50_FPN_3x_gn.yaml"
+MODEL:
+ # Train from random initialization.
+ WEIGHTS: ""
+ # It makes sense to divide by STD when training from scratch
+ # But it seems to make no difference on the results and C2's models didn't do this.
+ # So we keep things consistent with C2.
+ # PIXEL_STD: [57.375, 57.12, 58.395]
+ MASK_ON: True
+ BACKBONE:
+ FREEZE_AT: 0
+# NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883
+# to learn what you need for training from scratch.
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d90c9ff0ef4573252ee165b4c958ec5f74178176
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn.yaml
@@ -0,0 +1,19 @@
+_BASE_: "mask_rcnn_R_50_FPN_3x_gn.yaml"
+MODEL:
+ PIXEL_STD: [57.375, 57.12, 58.395]
+ WEIGHTS: ""
+ MASK_ON: True
+ RESNETS:
+ STRIDE_IN_1X1: False
+ BACKBONE:
+ FREEZE_AT: 0
+SOLVER:
+ # 9x schedule
+ IMS_PER_BATCH: 64 # 4x the standard
+ STEPS: (187500, 197500) # last 60/4==15k and last 20/4==5k
+ MAX_ITER: 202500 # 90k * 9 / 4
+ BASE_LR: 0.08
+TEST:
+ EVAL_PERIOD: 2500
+# NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883
+# to learn what you need for training from scratch.
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..60d4e42330e396a1901437df8e17b262d5ad547a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml
@@ -0,0 +1,19 @@
+_BASE_: "mask_rcnn_R_50_FPN_3x_syncbn.yaml"
+MODEL:
+ PIXEL_STD: [57.375, 57.12, 58.395]
+ WEIGHTS: ""
+ MASK_ON: True
+ RESNETS:
+ STRIDE_IN_1X1: False
+ BACKBONE:
+ FREEZE_AT: 0
+SOLVER:
+ # 9x schedule
+ IMS_PER_BATCH: 64 # 4x the standard
+ STEPS: (187500, 197500) # last 60/4==15k and last 20/4==5k
+ MAX_ITER: 202500 # 90k * 9 / 4
+ BASE_LR: 0.08
+TEST:
+ EVAL_PERIOD: 2500
+# NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883
+# to learn what you need for training from scratch.
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/semantic_R_50_FPN_1x.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/semantic_R_50_FPN_1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ac256e1372770ab3d9ae522c962de0fd0dbceeb5
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/semantic_R_50_FPN_1x.yaml
@@ -0,0 +1,11 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ META_ARCHITECTURE: "SemanticSegmentor"
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+DATASETS:
+ TRAIN: ("coco_2017_train_panoptic_stuffonly",)
+ TEST: ("coco_2017_val_panoptic_stuffonly",)
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/Misc/torchvision_imagenet_R_50.py b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/torchvision_imagenet_R_50.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d75305bcf7445b98db84b3d489a1505d2fce5af
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/Misc/torchvision_imagenet_R_50.py
@@ -0,0 +1,150 @@
+"""
+An example config file to train a ImageNet classifier with detectron2.
+Model and dataloader both come from torchvision.
+This shows how to use detectron2 as a general engine for any new models and tasks.
+
+To run, use the following command:
+
+python tools/lazyconfig_train_net.py --config-file configs/Misc/torchvision_imagenet_R_50.py \
+ --num-gpus 8 dataloader.train.dataset.root=/path/to/imagenet/
+
+"""
+
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+from omegaconf import OmegaConf
+import torchvision
+from torchvision.transforms import transforms as T
+from torchvision.models.resnet import ResNet, Bottleneck
+from fvcore.common.param_scheduler import MultiStepParamScheduler
+
+from detectron2.solver import WarmupParamScheduler
+from detectron2.solver.build import get_default_optimizer_params
+from detectron2.config import LazyCall as L
+from detectron2.model_zoo import get_config
+from detectron2.data.samplers import TrainingSampler, InferenceSampler
+from detectron2.evaluation import DatasetEvaluator
+from detectron2.utils import comm
+
+
+"""
+Note: Here we put reusable code (models, evaluation, data) together with configs just as a
+proof-of-concept, to easily demonstrate what's needed to train a ImageNet classifier in detectron2.
+Writing code in configs offers extreme flexibility but is often not a good engineering practice.
+In practice, you might want to put code in your project and import them instead.
+"""
+
+
+def build_data_loader(dataset, batch_size, num_workers, training=True):
+ return torch.utils.data.DataLoader(
+ dataset,
+ sampler=(TrainingSampler if training else InferenceSampler)(len(dataset)),
+ batch_size=batch_size,
+ num_workers=num_workers,
+ pin_memory=True,
+ )
+
+
+class ClassificationNet(nn.Module):
+ def __init__(self, model: nn.Module):
+ super().__init__()
+ self.model = model
+
+ @property
+ def device(self):
+ return list(self.model.parameters())[0].device
+
+ def forward(self, inputs):
+ image, label = inputs
+ pred = self.model(image.to(self.device))
+ if self.training:
+ label = label.to(self.device)
+ return F.cross_entropy(pred, label)
+ else:
+ return pred
+
+
+class ClassificationAcc(DatasetEvaluator):
+ def reset(self):
+ self.corr = self.total = 0
+
+ def process(self, inputs, outputs):
+ image, label = inputs
+ self.corr += (outputs.argmax(dim=1).cpu() == label.cpu()).sum().item()
+ self.total += len(label)
+
+ def evaluate(self):
+ all_corr_total = comm.all_gather([self.corr, self.total])
+ corr = sum(x[0] for x in all_corr_total)
+ total = sum(x[1] for x in all_corr_total)
+ return {"accuracy": corr / total}
+
+
+# --- End of code that could be in a project and be imported
+
+
+dataloader = OmegaConf.create()
+dataloader.train = L(build_data_loader)(
+ dataset=L(torchvision.datasets.ImageNet)(
+ root="/path/to/imagenet",
+ split="train",
+ transform=L(T.Compose)(
+ transforms=[
+ L(T.RandomResizedCrop)(size=224),
+ L(T.RandomHorizontalFlip)(),
+ T.ToTensor(),
+ L(T.Normalize)(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
+ ]
+ ),
+ ),
+ batch_size=256 // 8,
+ num_workers=4,
+ training=True,
+)
+
+dataloader.test = L(build_data_loader)(
+ dataset=L(torchvision.datasets.ImageNet)(
+ root="${...train.dataset.root}",
+ split="val",
+ transform=L(T.Compose)(
+ transforms=[
+ L(T.Resize)(size=256),
+ L(T.CenterCrop)(size=224),
+ T.ToTensor(),
+ L(T.Normalize)(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
+ ]
+ ),
+ ),
+ batch_size=256 // 8,
+ num_workers=4,
+ training=False,
+)
+
+dataloader.evaluator = L(ClassificationAcc)()
+
+model = L(ClassificationNet)(
+ model=(ResNet)(block=Bottleneck, layers=[3, 4, 6, 3], zero_init_residual=True)
+)
+
+
+optimizer = L(torch.optim.SGD)(
+ params=L(get_default_optimizer_params)(),
+ lr=0.1,
+ momentum=0.9,
+ weight_decay=1e-4,
+)
+
+lr_multiplier = L(WarmupParamScheduler)(
+ scheduler=L(MultiStepParamScheduler)(
+ values=[1.0, 0.1, 0.01, 0.001], milestones=[30, 60, 90, 100]
+ ),
+ warmup_length=1 / 100,
+ warmup_factor=0.1,
+)
+
+
+train = get_config("common/train.py").train
+train.init_checkpoint = None
+train.max_iter = 100 * 1281167 // 256
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ea2a6baaebd1a186db18f2904430ffb25901898e
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml
@@ -0,0 +1,18 @@
+_BASE_: "../Base-RCNN-C4.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 20
+INPUT:
+ MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800)
+ MIN_SIZE_TEST: 800
+DATASETS:
+ TRAIN: ('voc_2007_trainval', 'voc_2012_trainval')
+ TEST: ('voc_2007_test',)
+SOLVER:
+ STEPS: (12000, 16000)
+ MAX_ITER: 18000 # 17.4 epochs
+ WARMUP_ITERS: 100
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e554cab18a358a27b630c1ab0c2359666b0e1514
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml
@@ -0,0 +1,18 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: False
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 20
+INPUT:
+ MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800)
+ MIN_SIZE_TEST: 800
+DATASETS:
+ TRAIN: ('voc_2007_trainval', 'voc_2012_trainval')
+ TEST: ('voc_2007_test',)
+SOLVER:
+ STEPS: (12000, 16000)
+ MAX_ITER: 18000 # 17.4 epochs
+ WARMUP_ITERS: 100
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/README.md b/model/vision/grit_src/third_party/CenterNet2/configs/common/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..912cc29927542bfe4258d3208cf52d73cb0ea477
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/README.md
@@ -0,0 +1,6 @@
+This directory provides definitions for a few common models, dataloaders, scheduler,
+and optimizers that are often used in training.
+The definition of these objects are provided in the form of lazy instantiation:
+their arguments can be edited by users before constructing the objects.
+
+They can be imported, or loaded by `model_zoo.get_config` API in users' own configs.
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/coco_schedule.py b/model/vision/grit_src/third_party/CenterNet2/configs/common/coco_schedule.py
new file mode 100644
index 0000000000000000000000000000000000000000..355e66a1d213cb599a7ffe55089d854089c8ead2
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/coco_schedule.py
@@ -0,0 +1,47 @@
+from fvcore.common.param_scheduler import MultiStepParamScheduler
+
+from detectron2.config import LazyCall as L
+from detectron2.solver import WarmupParamScheduler
+
+
+def default_X_scheduler(num_X):
+ """
+ Returns the config for a default multi-step LR scheduler such as "1x", "3x",
+ commonly referred to in papers, where every 1x has the total length of 1440k
+ training images (~12 COCO epochs). LR is decayed twice at the end of training
+ following the strategy defined in "Rethinking ImageNet Pretraining", Sec 4.
+
+ Args:
+ num_X: a positive real number
+
+ Returns:
+ DictConfig: configs that define the multiplier for LR during training
+ """
+ # total number of iterations assuming 16 batch size, using 1440000/16=90000
+ total_steps_16bs = num_X * 90000
+
+ if num_X <= 2:
+ scheduler = L(MultiStepParamScheduler)(
+ values=[1.0, 0.1, 0.01],
+ # note that scheduler is scale-invariant. This is equivalent to
+ # milestones=[6, 8, 9]
+ milestones=[60000, 80000, 90000],
+ )
+ else:
+ scheduler = L(MultiStepParamScheduler)(
+ values=[1.0, 0.1, 0.01],
+ milestones=[total_steps_16bs - 60000, total_steps_16bs - 20000, total_steps_16bs],
+ )
+ return L(WarmupParamScheduler)(
+ scheduler=scheduler,
+ warmup_length=1000 / total_steps_16bs,
+ warmup_method="linear",
+ warmup_factor=0.001,
+ )
+
+
+lr_multiplier_1x = default_X_scheduler(1)
+lr_multiplier_2x = default_X_scheduler(2)
+lr_multiplier_3x = default_X_scheduler(3)
+lr_multiplier_6x = default_X_scheduler(6)
+lr_multiplier_9x = default_X_scheduler(9)
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/data/coco.py b/model/vision/grit_src/third_party/CenterNet2/configs/common/data/coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..703c4385c7ddc7eb0759c98d102ab2384d6a9e3e
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/data/coco.py
@@ -0,0 +1,48 @@
+from omegaconf import OmegaConf
+
+import detectron2.data.transforms as T
+from detectron2.config import LazyCall as L
+from detectron2.data import (
+ DatasetMapper,
+ build_detection_test_loader,
+ build_detection_train_loader,
+ get_detection_dataset_dicts,
+)
+from detectron2.evaluation import COCOEvaluator
+
+dataloader = OmegaConf.create()
+
+dataloader.train = L(build_detection_train_loader)(
+ dataset=L(get_detection_dataset_dicts)(names="coco_2017_train"),
+ mapper=L(DatasetMapper)(
+ is_train=True,
+ augmentations=[
+ L(T.ResizeShortestEdge)(
+ short_edge_length=(640, 672, 704, 736, 768, 800),
+ sample_style="choice",
+ max_size=1333,
+ ),
+ L(T.RandomFlip)(horizontal=True),
+ ],
+ image_format="BGR",
+ use_instance_mask=True,
+ ),
+ total_batch_size=16,
+ num_workers=4,
+)
+
+dataloader.test = L(build_detection_test_loader)(
+ dataset=L(get_detection_dataset_dicts)(names="coco_2017_val", filter_empty=False),
+ mapper=L(DatasetMapper)(
+ is_train=False,
+ augmentations=[
+ L(T.ResizeShortestEdge)(short_edge_length=800, max_size=1333),
+ ],
+ image_format="${...train.mapper.image_format}",
+ ),
+ num_workers=4,
+)
+
+dataloader.evaluator = L(COCOEvaluator)(
+ dataset_name="${..test.dataset.names}",
+)
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/data/coco_keypoint.py b/model/vision/grit_src/third_party/CenterNet2/configs/common/data/coco_keypoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4ceb066faf696954244205dc75376b767071217
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/data/coco_keypoint.py
@@ -0,0 +1,13 @@
+from detectron2.data.detection_utils import create_keypoint_hflip_indices
+
+from .coco import dataloader
+
+dataloader.train.dataset.min_keypoints = 1
+dataloader.train.dataset.names = "keypoints_coco_2017_train"
+dataloader.test.dataset.names = "keypoints_coco_2017_val"
+
+dataloader.train.mapper.update(
+ use_instance_mask=False,
+ use_keypoint=True,
+ keypoint_hflip_indices=create_keypoint_hflip_indices(dataloader.train.dataset.names),
+)
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/data/coco_panoptic_separated.py b/model/vision/grit_src/third_party/CenterNet2/configs/common/data/coco_panoptic_separated.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ccbc77e64d1c92c99cbd7158d047bab54cb9f3d
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/data/coco_panoptic_separated.py
@@ -0,0 +1,26 @@
+from detectron2.config import LazyCall as L
+from detectron2.evaluation import (
+ COCOEvaluator,
+ COCOPanopticEvaluator,
+ DatasetEvaluators,
+ SemSegEvaluator,
+)
+
+from .coco import dataloader
+
+dataloader.train.dataset.names = "coco_2017_train_panoptic_separated"
+dataloader.train.dataset.filter_empty = False
+dataloader.test.dataset.names = "coco_2017_val_panoptic_separated"
+
+
+dataloader.evaluator = [
+ L(COCOEvaluator)(
+ dataset_name="${...test.dataset.names}",
+ ),
+ L(SemSegEvaluator)(
+ dataset_name="${...test.dataset.names}",
+ ),
+ L(COCOPanopticEvaluator)(
+ dataset_name="${...test.dataset.names}",
+ ),
+]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/models/cascade_rcnn.py b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/cascade_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7372a801dc00d7fec4db8cda8c2612ce281d48a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/cascade_rcnn.py
@@ -0,0 +1,36 @@
+from detectron2.config import LazyCall as L
+from detectron2.layers import ShapeSpec
+from detectron2.modeling.box_regression import Box2BoxTransform
+from detectron2.modeling.matcher import Matcher
+from detectron2.modeling.roi_heads import FastRCNNOutputLayers, FastRCNNConvFCHead, CascadeROIHeads
+
+from .mask_rcnn_fpn import model
+
+# arguments that don't exist for Cascade R-CNN
+[model.roi_heads.pop(k) for k in ["box_head", "box_predictor", "proposal_matcher"]]
+
+model.roi_heads.update(
+ _target_=CascadeROIHeads,
+ box_heads=[
+ L(FastRCNNConvFCHead)(
+ input_shape=ShapeSpec(channels=256, height=7, width=7),
+ conv_dims=[],
+ fc_dims=[1024, 1024],
+ )
+ for k in range(3)
+ ],
+ box_predictors=[
+ L(FastRCNNOutputLayers)(
+ input_shape=ShapeSpec(channels=1024),
+ test_score_thresh=0.05,
+ box2box_transform=L(Box2BoxTransform)(weights=(w1, w1, w2, w2)),
+ cls_agnostic_bbox_reg=True,
+ num_classes="${...num_classes}",
+ )
+ for (w1, w2) in [(10, 5), (20, 10), (30, 15)]
+ ],
+ proposal_matchers=[
+ L(Matcher)(thresholds=[th], labels=[0, 1], allow_low_quality_matches=False)
+ for th in [0.5, 0.6, 0.7]
+ ],
+)
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/models/fcos.py b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/fcos.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c752029b7fc64ec375a55182e5342c9eb48bb33
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/fcos.py
@@ -0,0 +1,23 @@
+from detectron2.modeling.meta_arch.fcos import FCOS, FCOSHead
+
+from .retinanet import model
+
+model._target_ = FCOS
+
+del model.anchor_generator
+del model.box2box_transform
+del model.anchor_matcher
+del model.input_format
+
+# Use P5 instead of C5 to compute P6/P7
+# (Sec 2.2 of https://arxiv.org/abs/2006.09214)
+model.backbone.top_block.in_feature = "p5"
+model.backbone.top_block.in_channels = 256
+
+# New score threshold determined based on sqrt(cls_score * centerness)
+model.test_score_thresh = 0.2
+model.test_nms_thresh = 0.6
+
+model.head._target_ = FCOSHead
+del model.head.num_anchors
+model.head.norm = "GN"
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/models/keypoint_rcnn_fpn.py b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/keypoint_rcnn_fpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..56b3994df249884d4816fc9a5c7f553a9ab6f400
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/keypoint_rcnn_fpn.py
@@ -0,0 +1,33 @@
+from detectron2.config import LazyCall as L
+from detectron2.layers import ShapeSpec
+from detectron2.modeling.poolers import ROIPooler
+from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead
+
+from .mask_rcnn_fpn import model
+
+[model.roi_heads.pop(x) for x in ["mask_in_features", "mask_pooler", "mask_head"]]
+
+model.roi_heads.update(
+ num_classes=1,
+ keypoint_in_features=["p2", "p3", "p4", "p5"],
+ keypoint_pooler=L(ROIPooler)(
+ output_size=14,
+ scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
+ sampling_ratio=0,
+ pooler_type="ROIAlignV2",
+ ),
+ keypoint_head=L(KRCNNConvDeconvUpsampleHead)(
+ input_shape=ShapeSpec(channels=256, width=14, height=14),
+ num_keypoints=17,
+ conv_dims=[512] * 8,
+ loss_normalizer="visible",
+ ),
+)
+
+# Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2.
+# 1000 proposals per-image is found to hurt box AP.
+# Therefore we increase it to 1500 per-image.
+model.proposal_generator.post_nms_topk = (1500, 1000)
+
+# Keypoint AP degrades (though box AP improves) when using plain L1 loss
+model.roi_heads.box_predictor.smooth_l1_beta = 0.5
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_c4.py b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_c4.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3dcf8be42a39c6e5f6e76e3ab23adeccb33085d
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_c4.py
@@ -0,0 +1,88 @@
+from detectron2.config import LazyCall as L
+from detectron2.layers import ShapeSpec
+from detectron2.modeling.meta_arch import GeneralizedRCNN
+from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
+from detectron2.modeling.backbone import BasicStem, BottleneckBlock, ResNet
+from detectron2.modeling.box_regression import Box2BoxTransform
+from detectron2.modeling.matcher import Matcher
+from detectron2.modeling.poolers import ROIPooler
+from detectron2.modeling.proposal_generator import RPN, StandardRPNHead
+from detectron2.modeling.roi_heads import (
+ FastRCNNOutputLayers,
+ MaskRCNNConvUpsampleHead,
+ Res5ROIHeads,
+)
+
+model = L(GeneralizedRCNN)(
+ backbone=L(ResNet)(
+ stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"),
+ stages=L(ResNet.make_default_stages)(
+ depth=50,
+ stride_in_1x1=True,
+ norm="FrozenBN",
+ ),
+ out_features=["res4"],
+ ),
+ proposal_generator=L(RPN)(
+ in_features=["res4"],
+ head=L(StandardRPNHead)(in_channels=1024, num_anchors=15),
+ anchor_generator=L(DefaultAnchorGenerator)(
+ sizes=[[32, 64, 128, 256, 512]],
+ aspect_ratios=[0.5, 1.0, 2.0],
+ strides=[16],
+ offset=0.0,
+ ),
+ anchor_matcher=L(Matcher)(
+ thresholds=[0.3, 0.7], labels=[0, -1, 1], allow_low_quality_matches=True
+ ),
+ box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]),
+ batch_size_per_image=256,
+ positive_fraction=0.5,
+ pre_nms_topk=(12000, 6000),
+ post_nms_topk=(2000, 1000),
+ nms_thresh=0.7,
+ ),
+ roi_heads=L(Res5ROIHeads)(
+ num_classes=80,
+ batch_size_per_image=512,
+ positive_fraction=0.25,
+ proposal_matcher=L(Matcher)(
+ thresholds=[0.5], labels=[0, 1], allow_low_quality_matches=False
+ ),
+ in_features=["res4"],
+ pooler=L(ROIPooler)(
+ output_size=14,
+ scales=(1.0 / 16,),
+ sampling_ratio=0,
+ pooler_type="ROIAlignV2",
+ ),
+ res5=L(ResNet.make_stage)(
+ block_class=BottleneckBlock,
+ num_blocks=3,
+ stride_per_block=[2, 1, 1],
+ in_channels=1024,
+ bottleneck_channels=512,
+ out_channels=2048,
+ norm="FrozenBN",
+ stride_in_1x1=True,
+ ),
+ box_predictor=L(FastRCNNOutputLayers)(
+ input_shape=L(ShapeSpec)(channels="${...res5.out_channels}", height=1, width=1),
+ test_score_thresh=0.05,
+ box2box_transform=L(Box2BoxTransform)(weights=(10, 10, 5, 5)),
+ num_classes="${..num_classes}",
+ ),
+ mask_head=L(MaskRCNNConvUpsampleHead)(
+ input_shape=L(ShapeSpec)(
+ channels="${...res5.out_channels}",
+ width="${...pooler.output_size}",
+ height="${...pooler.output_size}",
+ ),
+ num_classes="${..num_classes}",
+ conv_dims=[256],
+ ),
+ ),
+ pixel_mean=[103.530, 116.280, 123.675],
+ pixel_std=[1.0, 1.0, 1.0],
+ input_format="BGR",
+)
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_fpn.py b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_fpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..744d5306f5b0ba4cf508731bd790bad823b520fa
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_fpn.py
@@ -0,0 +1,93 @@
+from detectron2.config import LazyCall as L
+from detectron2.layers import ShapeSpec
+from detectron2.modeling.meta_arch import GeneralizedRCNN
+from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
+from detectron2.modeling.backbone.fpn import LastLevelMaxPool
+from detectron2.modeling.backbone import BasicStem, FPN, ResNet
+from detectron2.modeling.box_regression import Box2BoxTransform
+from detectron2.modeling.matcher import Matcher
+from detectron2.modeling.poolers import ROIPooler
+from detectron2.modeling.proposal_generator import RPN, StandardRPNHead
+from detectron2.modeling.roi_heads import (
+ StandardROIHeads,
+ FastRCNNOutputLayers,
+ MaskRCNNConvUpsampleHead,
+ FastRCNNConvFCHead,
+)
+
+model = L(GeneralizedRCNN)(
+ backbone=L(FPN)(
+ bottom_up=L(ResNet)(
+ stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"),
+ stages=L(ResNet.make_default_stages)(
+ depth=50,
+ stride_in_1x1=True,
+ norm="FrozenBN",
+ ),
+ out_features=["res2", "res3", "res4", "res5"],
+ ),
+ in_features="${.bottom_up.out_features}",
+ out_channels=256,
+ top_block=L(LastLevelMaxPool)(),
+ ),
+ proposal_generator=L(RPN)(
+ in_features=["p2", "p3", "p4", "p5", "p6"],
+ head=L(StandardRPNHead)(in_channels=256, num_anchors=3),
+ anchor_generator=L(DefaultAnchorGenerator)(
+ sizes=[[32], [64], [128], [256], [512]],
+ aspect_ratios=[0.5, 1.0, 2.0],
+ strides=[4, 8, 16, 32, 64],
+ offset=0.0,
+ ),
+ anchor_matcher=L(Matcher)(
+ thresholds=[0.3, 0.7], labels=[0, -1, 1], allow_low_quality_matches=True
+ ),
+ box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]),
+ batch_size_per_image=256,
+ positive_fraction=0.5,
+ pre_nms_topk=(2000, 1000),
+ post_nms_topk=(1000, 1000),
+ nms_thresh=0.7,
+ ),
+ roi_heads=L(StandardROIHeads)(
+ num_classes=80,
+ batch_size_per_image=512,
+ positive_fraction=0.25,
+ proposal_matcher=L(Matcher)(
+ thresholds=[0.5], labels=[0, 1], allow_low_quality_matches=False
+ ),
+ box_in_features=["p2", "p3", "p4", "p5"],
+ box_pooler=L(ROIPooler)(
+ output_size=7,
+ scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
+ sampling_ratio=0,
+ pooler_type="ROIAlignV2",
+ ),
+ box_head=L(FastRCNNConvFCHead)(
+ input_shape=ShapeSpec(channels=256, height=7, width=7),
+ conv_dims=[],
+ fc_dims=[1024, 1024],
+ ),
+ box_predictor=L(FastRCNNOutputLayers)(
+ input_shape=ShapeSpec(channels=1024),
+ test_score_thresh=0.05,
+ box2box_transform=L(Box2BoxTransform)(weights=(10, 10, 5, 5)),
+ num_classes="${..num_classes}",
+ ),
+ mask_in_features=["p2", "p3", "p4", "p5"],
+ mask_pooler=L(ROIPooler)(
+ output_size=14,
+ scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
+ sampling_ratio=0,
+ pooler_type="ROIAlignV2",
+ ),
+ mask_head=L(MaskRCNNConvUpsampleHead)(
+ input_shape=ShapeSpec(channels=256, width=14, height=14),
+ num_classes="${..num_classes}",
+ conv_dims=[256, 256, 256, 256, 256],
+ ),
+ ),
+ pixel_mean=[103.530, 116.280, 123.675],
+ pixel_std=[1.0, 1.0, 1.0],
+ input_format="BGR",
+)
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/models/panoptic_fpn.py b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/panoptic_fpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..88f55d2ce9db62e61445d6a3700067d9d864ecae
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/panoptic_fpn.py
@@ -0,0 +1,20 @@
+from detectron2.config import LazyCall as L
+from detectron2.layers import ShapeSpec
+from detectron2.modeling import PanopticFPN
+from detectron2.modeling.meta_arch.semantic_seg import SemSegFPNHead
+
+from .mask_rcnn_fpn import model
+
+model._target_ = PanopticFPN
+model.sem_seg_head = L(SemSegFPNHead)(
+ input_shape={
+ f: L(ShapeSpec)(stride=s, channels="${....backbone.out_channels}")
+ for f, s in zip(["p2", "p3", "p4", "p5"], [4, 8, 16, 32])
+ },
+ ignore_value=255,
+ num_classes=54, # COCO stuff + 1
+ conv_dims=128,
+ common_stride=4,
+ loss_weight=0.5,
+ norm="GN",
+)
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/models/retinanet.py b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/retinanet.py
new file mode 100644
index 0000000000000000000000000000000000000000..83cfda4b6001750c676c22feb5e3560cba394140
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/models/retinanet.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+
+from detectron2.config import LazyCall as L
+from detectron2.layers import ShapeSpec
+from detectron2.modeling.meta_arch import RetinaNet
+from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
+from detectron2.modeling.backbone.fpn import LastLevelP6P7
+from detectron2.modeling.backbone import BasicStem, FPN, ResNet
+from detectron2.modeling.box_regression import Box2BoxTransform
+from detectron2.modeling.matcher import Matcher
+from detectron2.modeling.meta_arch.retinanet import RetinaNetHead
+
+model = L(RetinaNet)(
+ backbone=L(FPN)(
+ bottom_up=L(ResNet)(
+ stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"),
+ stages=L(ResNet.make_default_stages)(
+ depth=50,
+ stride_in_1x1=True,
+ norm="FrozenBN",
+ ),
+ out_features=["res3", "res4", "res5"],
+ ),
+ in_features=["res3", "res4", "res5"],
+ out_channels=256,
+ top_block=L(LastLevelP6P7)(in_channels=2048, out_channels="${..out_channels}"),
+ ),
+ head=L(RetinaNetHead)(
+ # Shape for each input feature map
+ input_shape=[ShapeSpec(channels=256)] * 5,
+ num_classes="${..num_classes}",
+ conv_dims=[256, 256, 256, 256],
+ prior_prob=0.01,
+ num_anchors=9,
+ ),
+ anchor_generator=L(DefaultAnchorGenerator)(
+ sizes=[[x, x * 2 ** (1.0 / 3), x * 2 ** (2.0 / 3)] for x in [32, 64, 128, 256, 512]],
+ aspect_ratios=[0.5, 1.0, 2.0],
+ strides=[8, 16, 32, 64, 128],
+ offset=0.0,
+ ),
+ box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]),
+ anchor_matcher=L(Matcher)(
+ thresholds=[0.4, 0.5], labels=[0, -1, 1], allow_low_quality_matches=True
+ ),
+ num_classes=80,
+ head_in_features=["p3", "p4", "p5", "p6", "p7"],
+ focal_loss_alpha=0.25,
+ focal_loss_gamma=2.0,
+ pixel_mean=[103.530, 116.280, 123.675],
+ pixel_std=[1.0, 1.0, 1.0],
+ input_format="BGR",
+)
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/optim.py b/model/vision/grit_src/third_party/CenterNet2/configs/common/optim.py
new file mode 100644
index 0000000000000000000000000000000000000000..d39d3aaa546c17e831d21d1758b69e8c1609415e
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/optim.py
@@ -0,0 +1,15 @@
+import torch
+
+from detectron2.config import LazyCall as L
+from detectron2.solver.build import get_default_optimizer_params
+
+SGD = L(torch.optim.SGD)(
+ params=L(get_default_optimizer_params)(
+ # params.model is meant to be set to the model object, before instantiating
+ # the optimizer.
+ weight_decay_norm=0.0
+ ),
+ lr=0.02,
+ momentum=0.9,
+ weight_decay=1e-4,
+)
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/common/train.py b/model/vision/grit_src/third_party/CenterNet2/configs/common/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6ed02bd59f540ca58df20bf72d462f195210a32
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/common/train.py
@@ -0,0 +1,18 @@
+# Common training-related configs that are designed for "tools/lazyconfig_train_net.py"
+# You can use your own instead, together with your own train_net.py
+train = dict(
+ output_dir="./output",
+ init_checkpoint="",
+ max_iter=90000,
+ amp=dict(enabled=False), # options for Automatic Mixed Precision
+ ddp=dict( # options for DistributedDataParallel
+ broadcast_buffers=False,
+ find_unused_parameters=False,
+ fp16_compression=False,
+ ),
+ checkpointer=dict(period=5000, max_to_keep=100), # options for PeriodicCheckpointer
+ eval_period=5000,
+ log_period=20,
+ device="cuda"
+ # ...
+)
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ.py b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ.py
new file mode 100644
index 0000000000000000000000000000000000000000..3740e9bb08c5f168a9ab3a6d94561678bad1775c
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ.py
@@ -0,0 +1,9 @@
+from .mask_rcnn_R_50_FPN_100ep_LSJ import (
+ dataloader,
+ lr_multiplier,
+ model,
+ optimizer,
+ train,
+)
+
+model.backbone.bottom_up.stages.depth = 101
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py
new file mode 100644
index 0000000000000000000000000000000000000000..18e5f0720c568db4ef0c97b59688b5e7866df606
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ.py
@@ -0,0 +1,14 @@
+from .mask_rcnn_R_101_FPN_100ep_LSJ import (
+ dataloader,
+ lr_multiplier,
+ model,
+ optimizer,
+ train,
+)
+
+train.max_iter *= 2 # 100ep -> 200ep
+
+lr_multiplier.scheduler.milestones = [
+ milestone * 2 for milestone in lr_multiplier.scheduler.milestones
+]
+lr_multiplier.scheduler.num_updates = train.max_iter
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py
new file mode 100644
index 0000000000000000000000000000000000000000..63c54ee9a5ce2368494b775cc90fada1439feaa5
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py
@@ -0,0 +1,14 @@
+from .mask_rcnn_R_101_FPN_100ep_LSJ import (
+ dataloader,
+ lr_multiplier,
+ model,
+ optimizer,
+ train,
+)
+
+train.max_iter *= 4 # 100ep -> 400ep
+
+lr_multiplier.scheduler.milestones = [
+ milestone * 4 for milestone in lr_multiplier.scheduler.milestones
+]
+lr_multiplier.scheduler.num_updates = train.max_iter
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py
new file mode 100644
index 0000000000000000000000000000000000000000..df7a2aedf480ed8dc4aa3645e37420e9b893fae4
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py
@@ -0,0 +1,72 @@
+import detectron2.data.transforms as T
+from detectron2.config.lazy import LazyCall as L
+from detectron2.layers.batch_norm import NaiveSyncBatchNorm
+from detectron2.solver import WarmupParamScheduler
+from fvcore.common.param_scheduler import MultiStepParamScheduler
+
+from ..common.data.coco import dataloader
+from ..common.models.mask_rcnn_fpn import model
+from ..common.optim import SGD as optimizer
+from ..common.train import train
+
+# train from scratch
+train.init_checkpoint = ""
+train.amp.enabled = True
+train.ddp.fp16_compression = True
+model.backbone.bottom_up.freeze_at = 0
+
+# SyncBN
+# fmt: off
+model.backbone.bottom_up.stem.norm = \
+ model.backbone.bottom_up.stages.norm = \
+ model.backbone.norm = "SyncBN"
+
+# Using NaiveSyncBatchNorm becase heads may have empty input. That is not supported by
+# torch.nn.SyncBatchNorm. We can remove this after
+# https://github.com/pytorch/pytorch/issues/36530 is fixed.
+model.roi_heads.box_head.conv_norm = \
+ model.roi_heads.mask_head.conv_norm = lambda c: NaiveSyncBatchNorm(c,
+ stats_mode="N")
+# fmt: on
+
+# 2conv in RPN:
+# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/modeling/architecture/heads.py#L95-L97 # noqa: E501, B950
+model.proposal_generator.head.conv_dims = [-1, -1]
+
+# 4conv1fc box head
+model.roi_heads.box_head.conv_dims = [256, 256, 256, 256]
+model.roi_heads.box_head.fc_dims = [1024]
+
+# resize_and_crop_image in:
+# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/utils/input_utils.py#L127 # noqa: E501, B950
+image_size = 1024
+dataloader.train.mapper.augmentations = [
+ L(T.ResizeScale)(
+ min_scale=0.1, max_scale=2.0, target_height=image_size, target_width=image_size
+ ),
+ L(T.FixedSizeCrop)(crop_size=(image_size, image_size)),
+ L(T.RandomFlip)(horizontal=True),
+]
+
+# recompute boxes due to cropping
+dataloader.train.mapper.recompute_boxes = True
+
+# larger batch-size.
+dataloader.train.total_batch_size = 64
+
+# Equivalent to 100 epochs.
+# 100 ep = 184375 iters * 64 images/iter / 118000 images/ep
+train.max_iter = 184375
+
+lr_multiplier = L(WarmupParamScheduler)(
+ scheduler=L(MultiStepParamScheduler)(
+ values=[1.0, 0.1, 0.01],
+ milestones=[163889, 177546],
+ num_updates=train.max_iter,
+ ),
+ warmup_length=500 / train.max_iter,
+ warmup_factor=0.067,
+)
+
+optimizer.lr = 0.1
+optimizer.weight_decay = 4e-5
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a7c376da5f9269197c44079f3e0f3b09cdc63fa
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py
@@ -0,0 +1,14 @@
+from .mask_rcnn_R_50_FPN_100ep_LSJ import (
+ dataloader,
+ lr_multiplier,
+ model,
+ optimizer,
+ train,
+)
+
+train.max_iter *= 2 # 100ep -> 200ep
+
+lr_multiplier.scheduler.milestones = [
+ milestone * 2 for milestone in lr_multiplier.scheduler.milestones
+]
+lr_multiplier.scheduler.num_updates = train.max_iter
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py
new file mode 100644
index 0000000000000000000000000000000000000000..97586b8f5330a9d995a0bffd1f5e7bd5b5656462
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ.py
@@ -0,0 +1,14 @@
+from .mask_rcnn_R_50_FPN_100ep_LSJ import (
+ dataloader,
+ lr_multiplier,
+ model,
+ optimizer,
+ train,
+)
+
+train.max_iter *= 4 # 100ep -> 400ep
+
+lr_multiplier.scheduler.milestones = [
+ milestone * 4 for milestone in lr_multiplier.scheduler.milestones
+]
+lr_multiplier.scheduler.num_updates = train.max_iter
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ca1ede262cf5c37a3a54778458c74aff1479411
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py
@@ -0,0 +1,14 @@
+from .mask_rcnn_R_50_FPN_100ep_LSJ import (
+ dataloader,
+ lr_multiplier,
+ model,
+ optimizer,
+ train,
+)
+
+train.max_iter //= 2 # 100ep -> 50ep
+
+lr_multiplier.scheduler.milestones = [
+ milestone // 2 for milestone in lr_multiplier.scheduler.milestones
+]
+lr_multiplier.scheduler.num_updates = train.max_iter
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef0b6d16d4403fb5d16a3aeb71a22621a0be5e21
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py
@@ -0,0 +1,29 @@
+from .mask_rcnn_R_50_FPN_100ep_LSJ import (
+ dataloader,
+ lr_multiplier,
+ model,
+ optimizer,
+ train,
+)
+from detectron2.config import LazyCall as L
+from detectron2.modeling.backbone import RegNet
+from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
+
+# Config source:
+# https://github.com/facebookresearch/detectron2/blob/main/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py # noqa
+model.backbone.bottom_up = L(RegNet)(
+ stem_class=SimpleStem,
+ stem_width=32,
+ block_class=ResBottleneckBlock,
+ depth=23,
+ w_a=38.65,
+ w_0=96,
+ w_m=2.43,
+ group_width=40,
+ norm="SyncBN",
+ out_features=["s1", "s2", "s3", "s4"],
+)
+model.pixel_std = [57.375, 57.120, 58.395]
+
+# RegNets benefit from enabling cudnn benchmark mode
+train.cudnn_benchmark = True
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ.py b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ.py
new file mode 100644
index 0000000000000000000000000000000000000000..731320e74ebed4d8ceec58c07cb906542b8b021b
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ.py
@@ -0,0 +1,14 @@
+from .mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ import (
+ dataloader,
+ lr_multiplier,
+ model,
+ optimizer,
+ train,
+)
+
+train.max_iter *= 2 # 100ep -> 200ep
+
+lr_multiplier.scheduler.milestones = [
+ milestone * 2 for milestone in lr_multiplier.scheduler.milestones
+]
+lr_multiplier.scheduler.num_updates = train.max_iter
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ.py b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f369a2afedb6c6e69fd52ff9a9a6b1cdf965937
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ.py
@@ -0,0 +1,14 @@
+from .mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ import (
+ dataloader,
+ lr_multiplier,
+ model,
+ optimizer,
+ train,
+)
+
+train.max_iter *= 4 # 100ep -> 400ep
+
+lr_multiplier.scheduler.milestones = [
+ milestone * 4 for milestone in lr_multiplier.scheduler.milestones
+]
+lr_multiplier.scheduler.num_updates = train.max_iter
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba2c3274a493d5136507364558c8289eb6ee6259
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py
@@ -0,0 +1,30 @@
+from .mask_rcnn_R_50_FPN_100ep_LSJ import (
+ dataloader,
+ lr_multiplier,
+ model,
+ optimizer,
+ train,
+)
+from detectron2.config import LazyCall as L
+from detectron2.modeling.backbone import RegNet
+from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
+
+# Config source:
+# https://github.com/facebookresearch/detectron2/blob/main/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py # noqa
+model.backbone.bottom_up = L(RegNet)(
+ stem_class=SimpleStem,
+ stem_width=32,
+ block_class=ResBottleneckBlock,
+ depth=22,
+ w_a=31.41,
+ w_0=96,
+ w_m=2.24,
+ group_width=64,
+ se_ratio=0.25,
+ norm="SyncBN",
+ out_features=["s1", "s2", "s3", "s4"],
+)
+model.pixel_std = [57.375, 57.120, 58.395]
+
+# RegNets benefit from enabling cudnn benchmark mode
+train.cudnn_benchmark = True
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ.py b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ.py
new file mode 100644
index 0000000000000000000000000000000000000000..b867cc865e5ac4d7b70221da141894efd7cbd75c
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ.py
@@ -0,0 +1,14 @@
+from .mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ import (
+ dataloader,
+ lr_multiplier,
+ model,
+ optimizer,
+ train,
+)
+
+train.max_iter *= 2 # 100ep -> 200ep
+
+lr_multiplier.scheduler.milestones = [
+ milestone * 2 for milestone in lr_multiplier.scheduler.milestones
+]
+lr_multiplier.scheduler.num_updates = train.max_iter
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b86ea8c6c5c48f5d26c9e0df7cf96e745b17b34
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py
@@ -0,0 +1,14 @@
+from .mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ import (
+ dataloader,
+ lr_multiplier,
+ model,
+ optimizer,
+ train,
+)
+
+train.max_iter *= 4 # 100ep -> 400ep
+
+lr_multiplier.scheduler.milestones = [
+ milestone * 4 for milestone in lr_multiplier.scheduler.milestones
+]
+lr_multiplier.scheduler.num_updates = train.max_iter
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/README.md b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..4e6c82ef3f75a73c7006f33d7c850a0d4781a58f
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/README.md
@@ -0,0 +1,8 @@
+These are quick configs for performance or accuracy regression tracking purposes.
+
+* `*instance_test.yaml`: can train on 2 GPUs. They are used to test whether the training can
+ successfully finish. They are not expected to produce reasonable training results.
+* `*inference_acc_test.yaml`: They should be run using `--eval-only`. They run inference using pre-trained models and verify
+ the results are as expected.
+* `*training_acc_test.yaml`: They should be trained on 8 GPUs. They finish in about an hour and verify the training accuracy
+ is within the normal range.
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_inference_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_inference_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fc5a4116cb096278823049c1f823e99f8e16e97e
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_inference_acc_test.yaml
@@ -0,0 +1,7 @@
+_BASE_: "../Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml"
+MODEL:
+ WEIGHTS: "detectron2://Misc/cascade_mask_rcnn_R_50_FPN_3x/144998488/model_final_480dd8.pkl"
+DATASETS:
+ TEST: ("coco_2017_val_100",)
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 50.18, 0.02], ["segm", "AP", 43.87, 0.02]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_instant_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e41a0fe7ffe9c3531741df49e546aa45cfe4fdee
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_instant_test.yaml
@@ -0,0 +1,11 @@
+_BASE_: "../Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml"
+DATASETS:
+ TRAIN: ("coco_2017_val_100",)
+ TEST: ("coco_2017_val_100",)
+SOLVER:
+ BASE_LR: 0.005
+ STEPS: (30,)
+ MAX_ITER: 40
+ IMS_PER_BATCH: 4
+DATALOADER:
+ NUM_WORKERS: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a2f37e5e2cc2a9e195e13703e9930e67e0f9a896
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml
@@ -0,0 +1,7 @@
+_BASE_: "../COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml"
+MODEL:
+ WEIGHTS: "detectron2://COCO-Detection/fast_rcnn_R_50_FPN_1x/137635226/model_final_e5f7ce.pkl"
+DATASETS:
+ TEST: ("coco_2017_val_100",)
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 45.70, 0.02]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..52fc0ec03c8b87ab2be1dda97bec1e8c93e6bb5c
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml
@@ -0,0 +1,15 @@
+_BASE_: "../COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+DATASETS:
+ TRAIN: ("coco_2017_val_100",)
+ PROPOSAL_FILES_TRAIN: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", )
+ TEST: ("coco_2017_val_100",)
+ PROPOSAL_FILES_TEST: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", )
+SOLVER:
+ BASE_LR: 0.005
+ STEPS: (30,)
+ MAX_ITER: 40
+ IMS_PER_BATCH: 4
+DATALOADER:
+ NUM_WORKERS: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..14cf2aa82aec52ad44e28ead0665dad811d55457
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml
@@ -0,0 +1,7 @@
+_BASE_: "../COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"
+MODEL:
+ WEIGHTS: "detectron2://COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x/137849621/model_final_a6e10b.pkl"
+DATASETS:
+ TEST: ("keypoints_coco_2017_val_100",)
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 52.47, 0.02], ["keypoints", "AP", 67.36, 0.02]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3dd209f693bd0bfdd46a2c9e7e750dede3abc141
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml
@@ -0,0 +1,16 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ KEYPOINT_ON: True
+ ROI_HEADS:
+ NUM_CLASSES: 1
+DATASETS:
+ TRAIN: ("keypoints_coco_2017_val_100",)
+ TEST: ("keypoints_coco_2017_val_100",)
+SOLVER:
+ BASE_LR: 0.005
+ STEPS: (30,)
+ MAX_ITER: 40
+ IMS_PER_BATCH: 4
+DATALOADER:
+ NUM_WORKERS: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4b92392f1c4457033ae4c87a521e339fe9e184ce
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml
@@ -0,0 +1,30 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ KEYPOINT_ON: True
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ BATCH_SIZE_PER_IMAGE: 256
+ NUM_CLASSES: 1
+ ROI_KEYPOINT_HEAD:
+ POOLER_RESOLUTION: 14
+ POOLER_SAMPLING_RATIO: 2
+ NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS: False
+ LOSS_WEIGHT: 4.0
+ ROI_BOX_HEAD:
+ SMOOTH_L1_BETA: 1.0 # Keypoint AP degrades when using plain L1 loss
+ RPN:
+ SMOOTH_L1_BETA: 0.2 # Keypoint AP degrades when using plain L1 loss
+DATASETS:
+ TRAIN: ("keypoints_coco_2017_val",)
+ TEST: ("keypoints_coco_2017_val",)
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+SOLVER:
+ WARMUP_FACTOR: 0.33333333
+ WARMUP_ITERS: 100
+ STEPS: (5500, 5800)
+ MAX_ITER: 6000
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 55.35, 1.0], ["keypoints", "AP", 76.91, 1.0]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9bd962878fea64035887c48981beeb8d41bfdbd0
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml
@@ -0,0 +1,28 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ KEYPOINT_ON: True
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ BATCH_SIZE_PER_IMAGE: 256
+ NUM_CLASSES: 1
+ ROI_KEYPOINT_HEAD:
+ POOLER_RESOLUTION: 14
+ POOLER_SAMPLING_RATIO: 2
+ ROI_BOX_HEAD:
+ SMOOTH_L1_BETA: 1.0 # Keypoint AP degrades when using plain L1 loss
+ RPN:
+ SMOOTH_L1_BETA: 0.2 # Keypoint AP degrades when using plain L1 loss
+DATASETS:
+ TRAIN: ("keypoints_coco_2017_val",)
+ TEST: ("keypoints_coco_2017_val",)
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+SOLVER:
+ WARMUP_FACTOR: 0.33333333
+ WARMUP_ITERS: 100
+ STEPS: (5500, 5800)
+ MAX_ITER: 6000
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 53.5, 1.0], ["keypoints", "AP", 72.4, 1.0]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_C4_GCV_instant_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_C4_GCV_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ab6e69812b94ea7e071f29d9a6937d5c70805b5b
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_C4_GCV_instant_test.yaml
@@ -0,0 +1,18 @@
+_BASE_: "../Base-RCNN-C4.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+DATASETS:
+ TRAIN: ("coco_2017_val_100",)
+ TEST: ("coco_2017_val_100",)
+SOLVER:
+ BASE_LR: 0.001
+ STEPS: (30,)
+ MAX_ITER: 40
+ IMS_PER_BATCH: 4
+ CLIP_GRADIENTS:
+ ENABLED: True
+ CLIP_TYPE: "value"
+ CLIP_VALUE: 1.0
+DATALOADER:
+ NUM_WORKERS: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b2d5b7ff87e069f8c774a230bdfd47b8c12d18a3
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml
@@ -0,0 +1,7 @@
+_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml"
+MODEL:
+ WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x/137849525/model_final_4ce675.pkl"
+DATASETS:
+ TEST: ("coco_2017_val_100",)
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 47.37, 0.02], ["segm", "AP", 40.99, 0.02]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6c4f1214efa520944fd941daec082ad45c164a23
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml
@@ -0,0 +1,14 @@
+_BASE_: "../Base-RCNN-C4.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+DATASETS:
+ TRAIN: ("coco_2017_val_100",)
+ TEST: ("coco_2017_val_100",)
+SOLVER:
+ BASE_LR: 0.001
+ STEPS: (30,)
+ MAX_ITER: 40
+ IMS_PER_BATCH: 4
+DATALOADER:
+ NUM_WORKERS: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f68dd8f96c7896b5fc95d694a399f2ce417c1deb
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml
@@ -0,0 +1,22 @@
+_BASE_: "../Base-RCNN-C4.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ ROI_HEADS:
+ BATCH_SIZE_PER_IMAGE: 256
+ MASK_ON: True
+DATASETS:
+ TRAIN: ("coco_2017_val",)
+ TEST: ("coco_2017_val",)
+INPUT:
+ MIN_SIZE_TRAIN: (600,)
+ MAX_SIZE_TRAIN: 1000
+ MIN_SIZE_TEST: 800
+ MAX_SIZE_TEST: 1000
+SOLVER:
+ IMS_PER_BATCH: 8 # base uses 16
+ WARMUP_FACTOR: 0.33333
+ WARMUP_ITERS: 100
+ STEPS: (11000, 11600)
+ MAX_ITER: 12000
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 41.88, 0.7], ["segm", "AP", 33.79, 0.5]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e3ce6cf922ae07fba5b5e01edbac19bf58a8e9dd
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml
@@ -0,0 +1,7 @@
+_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml"
+MODEL:
+ WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x/137849551/model_final_84107b.pkl"
+DATASETS:
+ TEST: ("coco_2017_val_100",)
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 47.44, 0.02], ["segm", "AP", 42.94, 0.02]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e5454bfd95cc37749c50aec7866f32d9a80ca2b7
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml
@@ -0,0 +1,10 @@
+_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
+MODEL:
+ WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"
+DATASETS:
+ TEST: ("coco_2017_val_100",)
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 47.34, 0.02], ["segm", "AP", 42.67, 0.02], ["bbox_TTA", "AP", 49.11, 0.02], ["segm_TTA", "AP", 45.04, 0.02]]
+ AUG:
+ ENABLED: True
+ MIN_SIZES: (700, 800) # to save some time
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6dbfcde0bf837990634d419a6dda1e2909c3cd7f
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml
@@ -0,0 +1,14 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+DATASETS:
+ TRAIN: ("coco_2017_val_100",)
+ TEST: ("coco_2017_val_100",)
+SOLVER:
+ BASE_LR: 0.005
+ STEPS: (30,)
+ MAX_ITER: 40
+ IMS_PER_BATCH: 4
+DATALOADER:
+ NUM_WORKERS: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_FPN_pred_boxes_training_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_FPN_pred_boxes_training_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..52f78762bda23331c97afd523cf98a5c118b113e
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_FPN_pred_boxes_training_acc_test.yaml
@@ -0,0 +1,6 @@
+_BASE_: "./mask_rcnn_R_50_FPN_training_acc_test.yaml"
+MODEL:
+ ROI_BOX_HEAD:
+ TRAIN_ON_PRED_BOXES: True
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 42.6, 1.0], ["segm", "AP", 35.8, 0.8]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..aadae4ce898761e1e40e5af65a9e5ea01053b936
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml
@@ -0,0 +1,21 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ ROI_HEADS:
+ BATCH_SIZE_PER_IMAGE: 256
+ MASK_ON: True
+DATASETS:
+ TRAIN: ("coco_2017_val",)
+ TEST: ("coco_2017_val",)
+INPUT:
+ MIN_SIZE_TRAIN: (600,)
+ MAX_SIZE_TRAIN: 1000
+ MIN_SIZE_TEST: 800
+ MAX_SIZE_TEST: 1000
+SOLVER:
+ WARMUP_FACTOR: 0.3333333
+ WARMUP_ITERS: 100
+ STEPS: (5500, 5800)
+ MAX_ITER: 6000
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 42.5, 1.0], ["segm", "AP", 35.8, 0.8]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..70874e3a92c9034d75cbbebb145b61084ba15e42
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml
@@ -0,0 +1,7 @@
+_BASE_: "../COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml"
+MODEL:
+ WEIGHTS: "detectron2://COCO-PanopticSegmentation/panoptic_fpn_R_50_3x/139514569/model_final_c10459.pkl"
+DATASETS:
+ TEST: ("coco_2017_val_100_panoptic_separated",)
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 46.47, 0.02], ["segm", "AP", 43.39, 0.02], ["sem_seg", "mIoU", 42.55, 0.02], ["panoptic_seg", "PQ", 38.99, 0.02]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7cdee7bfcf6dc75dda52602a0d9177ad0a9cc6ed
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml
@@ -0,0 +1,19 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ META_ARCHITECTURE: "PanopticFPN"
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+ SEM_SEG_HEAD:
+ LOSS_WEIGHT: 0.5
+DATASETS:
+ TRAIN: ("coco_2017_val_100_panoptic_separated",)
+ TEST: ("coco_2017_val_100_panoptic_separated",)
+SOLVER:
+ BASE_LR: 0.005
+ STEPS: (30,)
+ MAX_ITER: 40
+ IMS_PER_BATCH: 4
+DATALOADER:
+ NUM_WORKERS: 1
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f3bbf30196cb35434340d4c343cab0c96283cd4f
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml
@@ -0,0 +1,20 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ META_ARCHITECTURE: "PanopticFPN"
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ RESNETS:
+ DEPTH: 50
+ SEM_SEG_HEAD:
+ LOSS_WEIGHT: 0.5
+DATASETS:
+ TRAIN: ("coco_2017_val_panoptic_separated",)
+ TEST: ("coco_2017_val_panoptic_separated",)
+SOLVER:
+ BASE_LR: 0.01
+ WARMUP_FACTOR: 0.001
+ WARMUP_ITERS: 500
+ STEPS: (5500,)
+ MAX_ITER: 7000
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 46.70, 1.1], ["segm", "AP", 39.0, 0.7], ["sem_seg", "mIoU", 64.73, 1.3], ["panoptic_seg", "PQ", 48.13, 0.8]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cb666c1a6b3e351227046bc9c2af8799408858e8
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml
@@ -0,0 +1,7 @@
+_BASE_: "../COCO-Detection/retinanet_R_50_FPN_3x.yaml"
+MODEL:
+ WEIGHTS: "detectron2://COCO-Detection/retinanet_R_50_FPN_3x/190397829/model_final_5bd44e.pkl"
+DATASETS:
+ TEST: ("coco_2017_val_100",)
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 44.45, 0.02]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8d95c1f614296716374686b22055a587ccd052b9
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml
@@ -0,0 +1,13 @@
+_BASE_: "../COCO-Detection/retinanet_R_50_FPN_1x.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+DATASETS:
+ TRAIN: ("coco_2017_val_100",)
+ TEST: ("coco_2017_val_100",)
+SOLVER:
+ BASE_LR: 0.005
+ STEPS: (30,)
+ MAX_ITER: 40
+ IMS_PER_BATCH: 4
+DATALOADER:
+ NUM_WORKERS: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c7c3f908a9e80e98b2d25b6d384a60acaba9d4f8
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml
@@ -0,0 +1,7 @@
+_BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml"
+MODEL:
+ WEIGHTS: "detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/model_final_02ce48.pkl"
+DATASETS:
+ TEST: ("coco_2017_val_100",)
+TEST:
+ EXPECTED_RESULTS: [["box_proposals", "AR@1000", 58.16, 0.02]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..402d432477507dc36f04c4a9777cb80fe06b2809
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml
@@ -0,0 +1,13 @@
+_BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+DATASETS:
+ TRAIN: ("coco_2017_val_100",)
+ TEST: ("coco_2017_val_100",)
+SOLVER:
+ STEPS: (30,)
+ MAX_ITER: 40
+ BASE_LR: 0.005
+ IMS_PER_BATCH: 4
+DATALOADER:
+ NUM_WORKERS: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bca74987d5218736983617883e0fe37f79d219b7
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml
@@ -0,0 +1,10 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ META_ARCHITECTURE: "SemanticSegmentor"
+ WEIGHTS: "detectron2://semantic_R_50_FPN_1x/111802073/model_final_c18079783c55a94968edc28b7101c5f0.pkl"
+ RESNETS:
+ DEPTH: 50
+DATASETS:
+ TEST: ("coco_2017_val_100_panoptic_stuffonly",)
+TEST:
+ EXPECTED_RESULTS: [["sem_seg", "mIoU", 39.53, 0.02], ["sem_seg", "mACC", 51.50, 0.02]]
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..14ab606f219b462fe37fcc7d5fbdbe65cb5c2642
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml
@@ -0,0 +1,18 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ META_ARCHITECTURE: "SemanticSegmentor"
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+DATASETS:
+ TRAIN: ("coco_2017_val_100_panoptic_stuffonly",)
+ TEST: ("coco_2017_val_100_panoptic_stuffonly",)
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+SOLVER:
+ BASE_LR: 0.005
+ STEPS: (30,)
+ MAX_ITER: 40
+ IMS_PER_BATCH: 4
+DATALOADER:
+ NUM_WORKERS: 2
diff --git a/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1f78d775889b11e9e76743de5ddb8139198edf61
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml
@@ -0,0 +1,20 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ META_ARCHITECTURE: "SemanticSegmentor"
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+DATASETS:
+ TRAIN: ("coco_2017_val_panoptic_stuffonly",)
+ TEST: ("coco_2017_val_panoptic_stuffonly",)
+SOLVER:
+ BASE_LR: 0.01
+ WARMUP_FACTOR: 0.001
+ WARMUP_ITERS: 300
+ STEPS: (5500,)
+ MAX_ITER: 7000
+TEST:
+ EXPECTED_RESULTS: [["sem_seg", "mIoU", 76.51, 1.0], ["sem_seg", "mACC", 83.25, 1.0]]
+INPUT:
+ # no scale augmentation
+ MIN_SIZE_TRAIN: (800, )
diff --git a/model/vision/grit_src/third_party/CenterNet2/datasets/README.md b/model/vision/grit_src/third_party/CenterNet2/datasets/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..0eb44cc3b23beeb1755ab8d12002d26f13434235
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/datasets/README.md
@@ -0,0 +1,140 @@
+# Use Builtin Datasets
+
+A dataset can be used by accessing [DatasetCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.DatasetCatalog)
+for its data, or [MetadataCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.MetadataCatalog) for its metadata (class names, etc).
+This document explains how to setup the builtin datasets so they can be used by the above APIs.
+[Use Custom Datasets](https://detectron2.readthedocs.io/tutorials/datasets.html) gives a deeper dive on how to use `DatasetCatalog` and `MetadataCatalog`,
+and how to add new datasets to them.
+
+Detectron2 has builtin support for a few datasets.
+The datasets are assumed to exist in a directory specified by the environment variable
+`DETECTRON2_DATASETS`.
+Under this directory, detectron2 will look for datasets in the structure described below, if needed.
+```
+$DETECTRON2_DATASETS/
+ coco/
+ lvis/
+ cityscapes/
+ VOC20{07,12}/
+```
+
+You can set the location for builtin datasets by `export DETECTRON2_DATASETS=/path/to/datasets`.
+If left unset, the default is `./datasets` relative to your current working directory.
+
+The [model zoo](https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md)
+contains configs and models that use these builtin datasets.
+
+## Expected dataset structure for [COCO instance/keypoint detection](https://cocodataset.org/#download):
+
+```
+coco/
+ annotations/
+ instances_{train,val}2017.json
+ person_keypoints_{train,val}2017.json
+ {train,val}2017/
+ # image files that are mentioned in the corresponding json
+```
+
+You can use the 2014 version of the dataset as well.
+
+Some of the builtin tests (`dev/run_*_tests.sh`) uses a tiny version of the COCO dataset,
+which you can download with `./datasets/prepare_for_tests.sh`.
+
+## Expected dataset structure for PanopticFPN:
+
+Extract panoptic annotations from [COCO website](https://cocodataset.org/#download)
+into the following structure:
+```
+coco/
+ annotations/
+ panoptic_{train,val}2017.json
+ panoptic_{train,val}2017/ # png annotations
+ panoptic_stuff_{train,val}2017/ # generated by the script mentioned below
+```
+
+Install panopticapi by:
+```
+pip install git+https://github.com/cocodataset/panopticapi.git
+```
+Then, run `python datasets/prepare_panoptic_fpn.py`, to extract semantic annotations from panoptic annotations.
+
+## Expected dataset structure for [LVIS instance segmentation](https://www.lvisdataset.org/dataset):
+```
+coco/
+ {train,val,test}2017/
+lvis/
+ lvis_v0.5_{train,val}.json
+ lvis_v0.5_image_info_test.json
+ lvis_v1_{train,val}.json
+ lvis_v1_image_info_test{,_challenge}.json
+```
+
+Install lvis-api by:
+```
+pip install git+https://github.com/lvis-dataset/lvis-api.git
+```
+
+To evaluate models trained on the COCO dataset using LVIS annotations,
+run `python datasets/prepare_cocofied_lvis.py` to prepare "cocofied" LVIS annotations.
+
+## Expected dataset structure for [cityscapes](https://www.cityscapes-dataset.com/downloads/):
+```
+cityscapes/
+ gtFine/
+ train/
+ aachen/
+ color.png, instanceIds.png, labelIds.png, polygons.json,
+ labelTrainIds.png
+ ...
+ val/
+ test/
+ # below are generated Cityscapes panoptic annotation
+ cityscapes_panoptic_train.json
+ cityscapes_panoptic_train/
+ cityscapes_panoptic_val.json
+ cityscapes_panoptic_val/
+ cityscapes_panoptic_test.json
+ cityscapes_panoptic_test/
+ leftImg8bit/
+ train/
+ val/
+ test/
+```
+Install cityscapes scripts by:
+```
+pip install git+https://github.com/mcordts/cityscapesScripts.git
+```
+
+Note: to create labelTrainIds.png, first prepare the above structure, then run cityscapesescript with:
+```
+CITYSCAPES_DATASET=/path/to/abovementioned/cityscapes python cityscapesscripts/preparation/createTrainIdLabelImgs.py
+```
+These files are not needed for instance segmentation.
+
+Note: to generate Cityscapes panoptic dataset, run cityscapesescript with:
+```
+CITYSCAPES_DATASET=/path/to/abovementioned/cityscapes python cityscapesscripts/preparation/createPanopticImgs.py
+```
+These files are not needed for semantic and instance segmentation.
+
+## Expected dataset structure for [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/index.html):
+```
+VOC20{07,12}/
+ Annotations/
+ ImageSets/
+ Main/
+ trainval.txt
+ test.txt
+ # train.txt or val.txt, if you use these splits
+ JPEGImages/
+```
+
+## Expected dataset structure for [ADE20k Scene Parsing](http://sceneparsing.csail.mit.edu/):
+```
+ADEChallengeData2016/
+ annotations/
+ annotations_detectron2/
+ images/
+ objectInfo150.txt
+```
+The directory `annotations_detectron2` is generated by running `python datasets/prepare_ade20k_sem_seg.py`.
diff --git a/model/vision/grit_src/third_party/CenterNet2/datasets/lvis/lvis_v1_train_cat_info.json b/model/vision/grit_src/third_party/CenterNet2/datasets/lvis/lvis_v1_train_cat_info.json
new file mode 100644
index 0000000000000000000000000000000000000000..95fef0923366adbf4378da64113f6bc63a1265a6
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/datasets/lvis/lvis_v1_train_cat_info.json
@@ -0,0 +1 @@
+[{"name": "aerosol_can", "instance_count": 109, "def": "a dispenser that holds a substance under pressure", "synonyms": ["aerosol_can", "spray_can"], "image_count": 64, "id": 1, "frequency": "c", "synset": "aerosol.n.02"}, {"name": "air_conditioner", "instance_count": 1081, "def": "a machine that keeps air cool and dry", "synonyms": ["air_conditioner"], "image_count": 364, "id": 2, "frequency": "f", "synset": "air_conditioner.n.01"}, {"name": "airplane", "instance_count": 3720, "def": "an aircraft that has a fixed wing and is powered by propellers or jets", "synonyms": ["airplane", "aeroplane"], "image_count": 1911, "id": 3, "frequency": "f", "synset": "airplane.n.01"}, {"name": "alarm_clock", "instance_count": 158, "def": "a clock that wakes a sleeper at some preset time", "synonyms": ["alarm_clock"], "image_count": 149, "id": 4, "frequency": "f", "synset": "alarm_clock.n.01"}, {"name": "alcohol", "instance_count": 207, "def": "a liquor or brew containing alcohol as the active agent", "synonyms": ["alcohol", "alcoholic_beverage"], "image_count": 29, "id": 5, "frequency": "c", "synset": "alcohol.n.01"}, {"name": "alligator", "instance_count": 39, "def": "amphibious reptiles related to crocodiles but with shorter broader snouts", "synonyms": ["alligator", "gator"], "image_count": 26, "id": 6, "frequency": "c", "synset": "alligator.n.02"}, {"name": "almond", "instance_count": 1700, "def": "oval-shaped edible seed of the almond tree", "synonyms": ["almond"], "image_count": 59, "id": 7, "frequency": "c", "synset": "almond.n.02"}, {"name": "ambulance", "instance_count": 25, "def": "a vehicle that takes people to and from hospitals", "synonyms": ["ambulance"], "image_count": 22, "id": 8, "frequency": "c", "synset": "ambulance.n.01"}, {"name": "amplifier", "instance_count": 16, "def": "electronic equipment that increases strength of signals", "synonyms": ["amplifier"], "image_count": 12, "id": 9, "frequency": "c", "synset": "amplifier.n.01"}, {"name": "anklet", "instance_count": 39, "def": "an ornament worn around the ankle", "synonyms": ["anklet", "ankle_bracelet"], "image_count": 28, "id": 10, "frequency": "c", "synset": "anklet.n.03"}, {"name": "antenna", "instance_count": 1018, "def": "an electrical device that sends or receives radio or television signals", "synonyms": ["antenna", "aerial", "transmitting_aerial"], "image_count": 505, "id": 11, "frequency": "f", "synset": "antenna.n.01"}, {"name": "apple", "instance_count": 17451, "def": "fruit with red or yellow or green skin and sweet to tart crisp whitish flesh", "synonyms": ["apple"], "image_count": 1207, "id": 12, "frequency": "f", "synset": "apple.n.01"}, {"name": "applesauce", "instance_count": 7, "def": "puree of stewed apples usually sweetened and spiced", "synonyms": ["applesauce"], "image_count": 4, "id": 13, "frequency": "r", "synset": "applesauce.n.01"}, {"name": "apricot", "instance_count": 62, "def": "downy yellow to rosy-colored fruit resembling a small peach", "synonyms": ["apricot"], "image_count": 10, "id": 14, "frequency": "r", "synset": "apricot.n.02"}, {"name": "apron", "instance_count": 881, "def": "a garment of cloth that is tied about the waist and worn to protect clothing", "synonyms": ["apron"], "image_count": 500, "id": 15, "frequency": "f", "synset": "apron.n.01"}, {"name": "aquarium", "instance_count": 36, "def": "a tank/pool/bowl filled with water for keeping live fish and underwater animals", "synonyms": ["aquarium", "fish_tank"], "image_count": 33, "id": 16, "frequency": "c", "synset": "aquarium.n.01"}, {"name": "arctic_(type_of_shoe)", "instance_count": 8, "def": "a waterproof overshoe that protects shoes from water or snow", "synonyms": ["arctic_(type_of_shoe)", "galosh", "golosh", "rubber_(type_of_shoe)", "gumshoe"], "image_count": 3, "id": 17, "frequency": "r", "synset": "arctic.n.02"}, {"name": "armband", "instance_count": 85, "def": "a band worn around the upper arm", "synonyms": ["armband"], "image_count": 44, "id": 18, "frequency": "c", "synset": "armband.n.02"}, {"name": "armchair", "instance_count": 1112, "def": "chair with a support on each side for arms", "synonyms": ["armchair"], "image_count": 561, "id": 19, "frequency": "f", "synset": "armchair.n.01"}, {"name": "armoire", "instance_count": 11, "def": "a large wardrobe or cabinet", "synonyms": ["armoire"], "image_count": 8, "id": 20, "frequency": "r", "synset": "armoire.n.01"}, {"name": "armor", "instance_count": 23, "def": "protective covering made of metal and used in combat", "synonyms": ["armor", "armour"], "image_count": 9, "id": 21, "frequency": "r", "synset": "armor.n.01"}, {"name": "artichoke", "instance_count": 293, "def": "a thistlelike flower head with edible fleshy leaves and heart", "synonyms": ["artichoke"], "image_count": 33, "id": 22, "frequency": "c", "synset": "artichoke.n.02"}, {"name": "trash_can", "instance_count": 2722, "def": "a bin that holds rubbish until it is collected", "synonyms": ["trash_can", "garbage_can", "wastebin", "dustbin", "trash_barrel", "trash_bin"], "image_count": 1883, "id": 23, "frequency": "f", "synset": "ashcan.n.01"}, {"name": "ashtray", "instance_count": 136, "def": "a receptacle for the ash from smokers' cigars or cigarettes", "synonyms": ["ashtray"], "image_count": 98, "id": 24, "frequency": "c", "synset": "ashtray.n.01"}, {"name": "asparagus", "instance_count": 969, "def": "edible young shoots of the asparagus plant", "synonyms": ["asparagus"], "image_count": 70, "id": 25, "frequency": "c", "synset": "asparagus.n.02"}, {"name": "atomizer", "instance_count": 67, "def": "a dispenser that turns a liquid (such as perfume) into a fine mist", "synonyms": ["atomizer", "atomiser", "spray", "sprayer", "nebulizer", "nebuliser"], "image_count": 46, "id": 26, "frequency": "c", "synset": "atomizer.n.01"}, {"name": "avocado", "instance_count": 1048, "def": "a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed", "synonyms": ["avocado"], "image_count": 117, "id": 27, "frequency": "f", "synset": "avocado.n.01"}, {"name": "award", "instance_count": 163, "def": "a tangible symbol signifying approval or distinction", "synonyms": ["award", "accolade"], "image_count": 41, "id": 28, "frequency": "c", "synset": "award.n.02"}, {"name": "awning", "instance_count": 4270, "def": "a canopy made of canvas to shelter people or things from rain or sun", "synonyms": ["awning"], "image_count": 1395, "id": 29, "frequency": "f", "synset": "awning.n.01"}, {"name": "ax", "instance_count": 8, "def": "an edge tool with a heavy bladed head mounted across a handle", "synonyms": ["ax", "axe"], "image_count": 7, "id": 30, "frequency": "r", "synset": "ax.n.01"}, {"name": "baboon", "instance_count": 3, "def": "large terrestrial monkeys having doglike muzzles", "synonyms": ["baboon"], "image_count": 1, "id": 31, "frequency": "r", "synset": "baboon.n.01"}, {"name": "baby_buggy", "instance_count": 447, "def": "a small vehicle with four wheels in which a baby or child is pushed around", "synonyms": ["baby_buggy", "baby_carriage", "perambulator", "pram", "stroller"], "image_count": 314, "id": 32, "frequency": "f", "synset": "baby_buggy.n.01"}, {"name": "basketball_backboard", "instance_count": 42, "def": "a raised vertical board with basket attached; used to play basketball", "synonyms": ["basketball_backboard"], "image_count": 31, "id": 33, "frequency": "c", "synset": "backboard.n.01"}, {"name": "backpack", "instance_count": 3907, "def": "a bag carried by a strap on your back or shoulder", "synonyms": ["backpack", "knapsack", "packsack", "rucksack", "haversack"], "image_count": 1905, "id": 34, "frequency": "f", "synset": "backpack.n.01"}, {"name": "handbag", "instance_count": 3947, "def": "a container used for carrying money and small personal items or accessories", "synonyms": ["handbag", "purse", "pocketbook"], "image_count": 1859, "id": 35, "frequency": "f", "synset": "bag.n.04"}, {"name": "suitcase", "instance_count": 8537, "def": "cases used to carry belongings when traveling", "synonyms": ["suitcase", "baggage", "luggage"], "image_count": 1623, "id": 36, "frequency": "f", "synset": "bag.n.06"}, {"name": "bagel", "instance_count": 372, "def": "glazed yeast-raised doughnut-shaped roll with hard crust", "synonyms": ["bagel", "beigel"], "image_count": 47, "id": 37, "frequency": "c", "synset": "bagel.n.01"}, {"name": "bagpipe", "instance_count": 6, "def": "a tubular wind instrument; the player blows air into a bag and squeezes it out", "synonyms": ["bagpipe"], "image_count": 3, "id": 38, "frequency": "r", "synset": "bagpipe.n.01"}, {"name": "baguet", "instance_count": 9, "def": "narrow French stick loaf", "synonyms": ["baguet", "baguette"], "image_count": 3, "id": 39, "frequency": "r", "synset": "baguet.n.01"}, {"name": "bait", "instance_count": 1, "def": "something used to lure fish or other animals into danger so they can be trapped or killed", "synonyms": ["bait", "lure"], "image_count": 1, "id": 40, "frequency": "r", "synset": "bait.n.02"}, {"name": "ball", "instance_count": 755, "def": "a spherical object used as a plaything", "synonyms": ["ball"], "image_count": 305, "id": 41, "frequency": "f", "synset": "ball.n.06"}, {"name": "ballet_skirt", "instance_count": 12, "def": "very short skirt worn by ballerinas", "synonyms": ["ballet_skirt", "tutu"], "image_count": 6, "id": 42, "frequency": "r", "synset": "ballet_skirt.n.01"}, {"name": "balloon", "instance_count": 1556, "def": "large tough nonrigid bag filled with gas or heated air", "synonyms": ["balloon"], "image_count": 210, "id": 43, "frequency": "f", "synset": "balloon.n.01"}, {"name": "bamboo", "instance_count": 243, "def": "woody tropical grass having hollow woody stems", "synonyms": ["bamboo"], "image_count": 36, "id": 44, "frequency": "c", "synset": "bamboo.n.02"}, {"name": "banana", "instance_count": 50552, "def": "elongated crescent-shaped yellow fruit with soft sweet flesh", "synonyms": ["banana"], "image_count": 1787, "id": 45, "frequency": "f", "synset": "banana.n.02"}, {"name": "Band_Aid", "instance_count": 19, "def": "trade name for an adhesive bandage to cover small cuts or blisters", "synonyms": ["Band_Aid"], "image_count": 17, "id": 46, "frequency": "c", "synset": "band_aid.n.01"}, {"name": "bandage", "instance_count": 92, "def": "a piece of soft material that covers and protects an injured part of the body", "synonyms": ["bandage"], "image_count": 51, "id": 47, "frequency": "c", "synset": "bandage.n.01"}, {"name": "bandanna", "instance_count": 219, "def": "large and brightly colored handkerchief; often used as a neckerchief", "synonyms": ["bandanna", "bandana"], "image_count": 138, "id": 48, "frequency": "f", "synset": "bandanna.n.01"}, {"name": "banjo", "instance_count": 3, "def": "a stringed instrument of the guitar family with a long neck and circular body", "synonyms": ["banjo"], "image_count": 3, "id": 49, "frequency": "r", "synset": "banjo.n.01"}, {"name": "banner", "instance_count": 5907, "def": "long strip of cloth or paper used for decoration or advertising", "synonyms": ["banner", "streamer"], "image_count": 1470, "id": 50, "frequency": "f", "synset": "banner.n.01"}, {"name": "barbell", "instance_count": 4, "def": "a bar to which heavy discs are attached at each end; used in weightlifting", "synonyms": ["barbell"], "image_count": 3, "id": 51, "frequency": "r", "synset": "barbell.n.01"}, {"name": "barge", "instance_count": 3, "def": "a flatbottom boat for carrying heavy loads (especially on canals)", "synonyms": ["barge"], "image_count": 2, "id": 52, "frequency": "r", "synset": "barge.n.01"}, {"name": "barrel", "instance_count": 707, "def": "a cylindrical container that holds liquids", "synonyms": ["barrel", "cask"], "image_count": 186, "id": 53, "frequency": "f", "synset": "barrel.n.02"}, {"name": "barrette", "instance_count": 119, "def": "a pin for holding women's hair in place", "synonyms": ["barrette"], "image_count": 76, "id": 54, "frequency": "c", "synset": "barrette.n.01"}, {"name": "barrow", "instance_count": 30, "def": "a cart for carrying small loads; has handles and one or more wheels", "synonyms": ["barrow", "garden_cart", "lawn_cart", "wheelbarrow"], "image_count": 26, "id": 55, "frequency": "c", "synset": "barrow.n.03"}, {"name": "baseball_base", "instance_count": 404, "def": "a place that the runner must touch before scoring", "synonyms": ["baseball_base"], "image_count": 303, "id": 56, "frequency": "f", "synset": "base.n.03"}, {"name": "baseball", "instance_count": 1013, "def": "a ball used in playing baseball", "synonyms": ["baseball"], "image_count": 738, "id": 57, "frequency": "f", "synset": "baseball.n.02"}, {"name": "baseball_bat", "instance_count": 2698, "def": "an implement used in baseball by the batter", "synonyms": ["baseball_bat"], "image_count": 1799, "id": 58, "frequency": "f", "synset": "baseball_bat.n.01"}, {"name": "baseball_cap", "instance_count": 9028, "def": "a cap with a bill", "synonyms": ["baseball_cap", "jockey_cap", "golf_cap"], "image_count": 1934, "id": 59, "frequency": "f", "synset": "baseball_cap.n.01"}, {"name": "baseball_glove", "instance_count": 2536, "def": "the handwear used by fielders in playing baseball", "synonyms": ["baseball_glove", "baseball_mitt"], "image_count": 1609, "id": 60, "frequency": "f", "synset": "baseball_glove.n.01"}, {"name": "basket", "instance_count": 3984, "def": "a container that is usually woven and has handles", "synonyms": ["basket", "handbasket"], "image_count": 1622, "id": 61, "frequency": "f", "synset": "basket.n.01"}, {"name": "basketball", "instance_count": 56, "def": "an inflated ball used in playing basketball", "synonyms": ["basketball"], "image_count": 41, "id": 62, "frequency": "c", "synset": "basketball.n.02"}, {"name": "bass_horn", "instance_count": 6, "def": "the lowest brass wind instrument", "synonyms": ["bass_horn", "sousaphone", "tuba"], "image_count": 4, "id": 63, "frequency": "r", "synset": "bass_horn.n.01"}, {"name": "bat_(animal)", "instance_count": 47, "def": "nocturnal mouselike mammal with forelimbs modified to form membranous wings", "synonyms": ["bat_(animal)"], "image_count": 11, "id": 64, "frequency": "c", "synset": "bat.n.01"}, {"name": "bath_mat", "instance_count": 336, "def": "a heavy towel or mat to stand on while drying yourself after a bath", "synonyms": ["bath_mat"], "image_count": 270, "id": 65, "frequency": "f", "synset": "bath_mat.n.01"}, {"name": "bath_towel", "instance_count": 1210, "def": "a large towel; to dry yourself after a bath", "synonyms": ["bath_towel"], "image_count": 349, "id": 66, "frequency": "f", "synset": "bath_towel.n.01"}, {"name": "bathrobe", "instance_count": 53, "def": "a loose-fitting robe of towelling; worn after a bath or swim", "synonyms": ["bathrobe"], "image_count": 42, "id": 67, "frequency": "c", "synset": "bathrobe.n.01"}, {"name": "bathtub", "instance_count": 868, "def": "a large open container that you fill with water and use to wash the body", "synonyms": ["bathtub", "bathing_tub"], "image_count": 823, "id": 68, "frequency": "f", "synset": "bathtub.n.01"}, {"name": "batter_(food)", "instance_count": 26, "def": "a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking", "synonyms": ["batter_(food)"], "image_count": 6, "id": 69, "frequency": "r", "synset": "batter.n.02"}, {"name": "battery", "instance_count": 155, "def": "a portable device that produces electricity", "synonyms": ["battery"], "image_count": 48, "id": 70, "frequency": "c", "synset": "battery.n.02"}, {"name": "beachball", "instance_count": 3, "def": "large and light ball; for play at the seaside", "synonyms": ["beachball"], "image_count": 3, "id": 71, "frequency": "r", "synset": "beach_ball.n.01"}, {"name": "bead", "instance_count": 1371, "def": "a small ball with a hole through the middle used for ornamentation, jewellery, etc.", "synonyms": ["bead"], "image_count": 42, "id": 72, "frequency": "c", "synset": "bead.n.01"}, {"name": "bean_curd", "instance_count": 231, "def": "cheeselike food made of curdled soybean milk", "synonyms": ["bean_curd", "tofu"], "image_count": 24, "id": 73, "frequency": "c", "synset": "bean_curd.n.01"}, {"name": "beanbag", "instance_count": 20, "def": "a bag filled with dried beans or similar items; used in games or to sit on", "synonyms": ["beanbag"], "image_count": 16, "id": 74, "frequency": "c", "synset": "beanbag.n.01"}, {"name": "beanie", "instance_count": 1907, "def": "a small skullcap; formerly worn by schoolboys and college freshmen", "synonyms": ["beanie", "beany"], "image_count": 605, "id": 75, "frequency": "f", "synset": "beanie.n.01"}, {"name": "bear", "instance_count": 1069, "def": "large carnivorous or omnivorous mammals with shaggy coats and claws", "synonyms": ["bear"], "image_count": 646, "id": 76, "frequency": "f", "synset": "bear.n.01"}, {"name": "bed", "instance_count": 2137, "def": "a piece of furniture that provides a place to sleep", "synonyms": ["bed"], "image_count": 1765, "id": 77, "frequency": "f", "synset": "bed.n.01"}, {"name": "bedpan", "instance_count": 2, "def": "a shallow vessel used by a bedridden patient for defecation and urination", "synonyms": ["bedpan"], "image_count": 2, "id": 78, "frequency": "r", "synset": "bedpan.n.01"}, {"name": "bedspread", "instance_count": 188, "def": "decorative cover for a bed", "synonyms": ["bedspread", "bedcover", "bed_covering", "counterpane", "spread"], "image_count": 125, "id": 79, "frequency": "f", "synset": "bedspread.n.01"}, {"name": "cow", "instance_count": 8085, "def": "cattle/cow", "synonyms": ["cow"], "image_count": 1420, "id": 80, "frequency": "f", "synset": "beef.n.01"}, {"name": "beef_(food)", "instance_count": 1242, "def": "meat from an adult domestic bovine", "synonyms": ["beef_(food)", "boeuf_(food)"], "image_count": 140, "id": 81, "frequency": "f", "synset": "beef.n.02"}, {"name": "beeper", "instance_count": 4, "def": "an device that beeps when the person carrying it is being paged", "synonyms": ["beeper", "pager"], "image_count": 4, "id": 82, "frequency": "r", "synset": "beeper.n.01"}, {"name": "beer_bottle", "instance_count": 1227, "def": "a bottle that holds beer", "synonyms": ["beer_bottle"], "image_count": 322, "id": 83, "frequency": "f", "synset": "beer_bottle.n.01"}, {"name": "beer_can", "instance_count": 203, "def": "a can that holds beer", "synonyms": ["beer_can"], "image_count": 60, "id": 84, "frequency": "c", "synset": "beer_can.n.01"}, {"name": "beetle", "instance_count": 9, "def": "insect with hard wing covers", "synonyms": ["beetle"], "image_count": 2, "id": 85, "frequency": "r", "synset": "beetle.n.01"}, {"name": "bell", "instance_count": 590, "def": "a hollow device made of metal that makes a ringing sound when struck", "synonyms": ["bell"], "image_count": 231, "id": 86, "frequency": "f", "synset": "bell.n.01"}, {"name": "bell_pepper", "instance_count": 4369, "def": "large bell-shaped sweet pepper in green or red or yellow or orange or black varieties", "synonyms": ["bell_pepper", "capsicum"], "image_count": 333, "id": 87, "frequency": "f", "synset": "bell_pepper.n.02"}, {"name": "belt", "instance_count": 3683, "def": "a band to tie or buckle around the body (usually at the waist)", "synonyms": ["belt"], "image_count": 1941, "id": 88, "frequency": "f", "synset": "belt.n.02"}, {"name": "belt_buckle", "instance_count": 589, "def": "the buckle used to fasten a belt", "synonyms": ["belt_buckle"], "image_count": 367, "id": 89, "frequency": "f", "synset": "belt_buckle.n.01"}, {"name": "bench", "instance_count": 4374, "def": "a long seat for more than one person", "synonyms": ["bench"], "image_count": 1922, "id": 90, "frequency": "f", "synset": "bench.n.01"}, {"name": "beret", "instance_count": 57, "def": "a cap with no brim or bill; made of soft cloth", "synonyms": ["beret"], "image_count": 18, "id": 91, "frequency": "c", "synset": "beret.n.01"}, {"name": "bib", "instance_count": 96, "def": "a napkin tied under the chin of a child while eating", "synonyms": ["bib"], "image_count": 81, "id": 92, "frequency": "c", "synset": "bib.n.02"}, {"name": "Bible", "instance_count": 2, "def": "the sacred writings of the Christian religions", "synonyms": ["Bible"], "image_count": 1, "id": 93, "frequency": "r", "synset": "bible.n.01"}, {"name": "bicycle", "instance_count": 4566, "def": "a wheeled vehicle that has two wheels and is moved by foot pedals", "synonyms": ["bicycle", "bike_(bicycle)"], "image_count": 1852, "id": 94, "frequency": "f", "synset": "bicycle.n.01"}, {"name": "visor", "instance_count": 777, "def": "a brim that projects to the front to shade the eyes", "synonyms": ["visor", "vizor"], "image_count": 430, "id": 95, "frequency": "f", "synset": "bill.n.09"}, {"name": "billboard", "instance_count": 1025, "def": "large outdoor signboard", "synonyms": ["billboard"], "image_count": 247, "id": 96, "frequency": "f", "synset": "billboard.n.01"}, {"name": "binder", "instance_count": 311, "def": "holds loose papers or magazines", "synonyms": ["binder", "ring-binder"], "image_count": 94, "id": 97, "frequency": "c", "synset": "binder.n.03"}, {"name": "binoculars", "instance_count": 22, "def": "an optical instrument designed for simultaneous use by both eyes", "synonyms": ["binoculars", "field_glasses", "opera_glasses"], "image_count": 21, "id": 98, "frequency": "c", "synset": "binoculars.n.01"}, {"name": "bird", "instance_count": 11557, "def": "animal characterized by feathers and wings", "synonyms": ["bird"], "image_count": 1821, "id": 99, "frequency": "f", "synset": "bird.n.01"}, {"name": "birdfeeder", "instance_count": 16, "def": "an outdoor device that supplies food for wild birds", "synonyms": ["birdfeeder"], "image_count": 16, "id": 100, "frequency": "c", "synset": "bird_feeder.n.01"}, {"name": "birdbath", "instance_count": 12, "def": "an ornamental basin (usually in a garden) for birds to bathe in", "synonyms": ["birdbath"], "image_count": 12, "id": 101, "frequency": "c", "synset": "birdbath.n.01"}, {"name": "birdcage", "instance_count": 180, "def": "a cage in which a bird can be kept", "synonyms": ["birdcage"], "image_count": 25, "id": 102, "frequency": "c", "synset": "birdcage.n.01"}, {"name": "birdhouse", "instance_count": 60, "def": "a shelter for birds", "synonyms": ["birdhouse"], "image_count": 41, "id": 103, "frequency": "c", "synset": "birdhouse.n.01"}, {"name": "birthday_cake", "instance_count": 311, "def": "decorated cake served at a birthday party", "synonyms": ["birthday_cake"], "image_count": 244, "id": 104, "frequency": "f", "synset": "birthday_cake.n.01"}, {"name": "birthday_card", "instance_count": 23, "def": "a card expressing a birthday greeting", "synonyms": ["birthday_card"], "image_count": 7, "id": 105, "frequency": "r", "synset": "birthday_card.n.01"}, {"name": "pirate_flag", "instance_count": 1, "def": "a flag usually bearing a white skull and crossbones on a black background", "synonyms": ["pirate_flag"], "image_count": 1, "id": 106, "frequency": "r", "synset": "black_flag.n.01"}, {"name": "black_sheep", "instance_count": 214, "def": "sheep with a black coat", "synonyms": ["black_sheep"], "image_count": 40, "id": 107, "frequency": "c", "synset": "black_sheep.n.02"}, {"name": "blackberry", "instance_count": 406, "def": "large sweet black or very dark purple edible aggregate fruit", "synonyms": ["blackberry"], "image_count": 40, "id": 108, "frequency": "c", "synset": "blackberry.n.01"}, {"name": "blackboard", "instance_count": 154, "def": "sheet of slate; for writing with chalk", "synonyms": ["blackboard", "chalkboard"], "image_count": 104, "id": 109, "frequency": "f", "synset": "blackboard.n.01"}, {"name": "blanket", "instance_count": 3075, "def": "bedding that keeps a person warm in bed", "synonyms": ["blanket"], "image_count": 1671, "id": 110, "frequency": "f", "synset": "blanket.n.01"}, {"name": "blazer", "instance_count": 124, "def": "lightweight jacket; often striped in the colors of a club or school", "synonyms": ["blazer", "sport_jacket", "sport_coat", "sports_jacket", "sports_coat"], "image_count": 49, "id": 111, "frequency": "c", "synset": "blazer.n.01"}, {"name": "blender", "instance_count": 316, "def": "an electrically powered mixer that mix or chop or liquefy foods", "synonyms": ["blender", "liquidizer", "liquidiser"], "image_count": 243, "id": 112, "frequency": "f", "synset": "blender.n.01"}, {"name": "blimp", "instance_count": 3, "def": "a small nonrigid airship used for observation or as a barrage balloon", "synonyms": ["blimp"], "image_count": 2, "id": 113, "frequency": "r", "synset": "blimp.n.02"}, {"name": "blinker", "instance_count": 1269, "def": "a light that flashes on and off; used as a signal or to send messages", "synonyms": ["blinker", "flasher"], "image_count": 242, "id": 114, "frequency": "f", "synset": "blinker.n.01"}, {"name": "blouse", "instance_count": 623, "def": "a top worn by women", "synonyms": ["blouse"], "image_count": 271, "id": 115, "frequency": "f", "synset": "blouse.n.01"}, {"name": "blueberry", "instance_count": 2114, "def": "sweet edible dark-blue berries of blueberry plants", "synonyms": ["blueberry"], "image_count": 104, "id": 116, "frequency": "f", "synset": "blueberry.n.02"}, {"name": "gameboard", "instance_count": 17, "def": "a flat portable surface (usually rectangular) designed for board games", "synonyms": ["gameboard"], "image_count": 8, "id": 117, "frequency": "r", "synset": "board.n.09"}, {"name": "boat", "instance_count": 9981, "def": "a vessel for travel on water", "synonyms": ["boat", "ship_(boat)"], "image_count": 1758, "id": 118, "frequency": "f", "synset": "boat.n.01"}, {"name": "bob", "instance_count": 2, "def": "a small float usually made of cork; attached to a fishing line", "synonyms": ["bob", "bobber", "bobfloat"], "image_count": 1, "id": 119, "frequency": "r", "synset": "bob.n.05"}, {"name": "bobbin", "instance_count": 190, "def": "a thing around which thread/tape/film or other flexible materials can be wound", "synonyms": ["bobbin", "spool", "reel"], "image_count": 48, "id": 120, "frequency": "c", "synset": "bobbin.n.01"}, {"name": "bobby_pin", "instance_count": 43, "def": "a flat wire hairpin used to hold bobbed hair in place", "synonyms": ["bobby_pin", "hairgrip"], "image_count": 14, "id": 121, "frequency": "c", "synset": "bobby_pin.n.01"}, {"name": "boiled_egg", "instance_count": 125, "def": "egg cooked briefly in the shell in gently boiling water", "synonyms": ["boiled_egg", "coddled_egg"], "image_count": 40, "id": 122, "frequency": "c", "synset": "boiled_egg.n.01"}, {"name": "bolo_tie", "instance_count": 1, "def": "a cord fastened around the neck with an ornamental clasp and worn as a necktie", "synonyms": ["bolo_tie", "bolo", "bola_tie", "bola"], "image_count": 1, "id": 123, "frequency": "r", "synset": "bolo_tie.n.01"}, {"name": "deadbolt", "instance_count": 46, "def": "the part of a lock that is engaged or withdrawn with a key", "synonyms": ["deadbolt"], "image_count": 37, "id": 124, "frequency": "c", "synset": "bolt.n.03"}, {"name": "bolt", "instance_count": 11261, "def": "a screw that screws into a nut to form a fastener", "synonyms": ["bolt"], "image_count": 1510, "id": 125, "frequency": "f", "synset": "bolt.n.06"}, {"name": "bonnet", "instance_count": 10, "def": "a hat tied under the chin", "synonyms": ["bonnet"], "image_count": 6, "id": 126, "frequency": "r", "synset": "bonnet.n.01"}, {"name": "book", "instance_count": 33353, "def": "a written work or composition that has been published", "synonyms": ["book"], "image_count": 1903, "id": 127, "frequency": "f", "synset": "book.n.01"}, {"name": "bookcase", "instance_count": 113, "def": "a piece of furniture with shelves for storing books", "synonyms": ["bookcase"], "image_count": 70, "id": 128, "frequency": "c", "synset": "bookcase.n.01"}, {"name": "booklet", "instance_count": 439, "def": "a small book usually having a paper cover", "synonyms": ["booklet", "brochure", "leaflet", "pamphlet"], "image_count": 86, "id": 129, "frequency": "c", "synset": "booklet.n.01"}, {"name": "bookmark", "instance_count": 15, "def": "a marker (a piece of paper or ribbon) placed between the pages of a book", "synonyms": ["bookmark", "bookmarker"], "image_count": 7, "id": 130, "frequency": "r", "synset": "bookmark.n.01"}, {"name": "boom_microphone", "instance_count": 10, "def": "a pole carrying an overhead microphone projected over a film or tv set", "synonyms": ["boom_microphone", "microphone_boom"], "image_count": 5, "id": 131, "frequency": "r", "synset": "boom.n.04"}, {"name": "boot", "instance_count": 4194, "def": "footwear that covers the whole foot and lower leg", "synonyms": ["boot"], "image_count": 1406, "id": 132, "frequency": "f", "synset": "boot.n.01"}, {"name": "bottle", "instance_count": 7969, "def": "a glass or plastic vessel used for storing drinks or other liquids", "synonyms": ["bottle"], "image_count": 1901, "id": 133, "frequency": "f", "synset": "bottle.n.01"}, {"name": "bottle_opener", "instance_count": 15, "def": "an opener for removing caps or corks from bottles", "synonyms": ["bottle_opener"], "image_count": 15, "id": 134, "frequency": "c", "synset": "bottle_opener.n.01"}, {"name": "bouquet", "instance_count": 53, "def": "an arrangement of flowers that is usually given as a present", "synonyms": ["bouquet"], "image_count": 28, "id": 135, "frequency": "c", "synset": "bouquet.n.01"}, {"name": "bow_(weapon)", "instance_count": 6, "def": "a weapon for shooting arrows", "synonyms": ["bow_(weapon)"], "image_count": 6, "id": 136, "frequency": "r", "synset": "bow.n.04"}, {"name": "bow_(decorative_ribbons)", "instance_count": 1144, "def": "a decorative interlacing of ribbons", "synonyms": ["bow_(decorative_ribbons)"], "image_count": 494, "id": 137, "frequency": "f", "synset": "bow.n.08"}, {"name": "bow-tie", "instance_count": 359, "def": "a man's tie that ties in a bow", "synonyms": ["bow-tie", "bowtie"], "image_count": 234, "id": 138, "frequency": "f", "synset": "bow_tie.n.01"}, {"name": "bowl", "instance_count": 5308, "def": "a dish that is round and open at the top for serving foods", "synonyms": ["bowl"], "image_count": 1922, "id": 139, "frequency": "f", "synset": "bowl.n.03"}, {"name": "pipe_bowl", "instance_count": 1, "def": "a small round container that is open at the top for holding tobacco", "synonyms": ["pipe_bowl"], "image_count": 1, "id": 140, "frequency": "r", "synset": "bowl.n.08"}, {"name": "bowler_hat", "instance_count": 89, "def": "a felt hat that is round and hard with a narrow brim", "synonyms": ["bowler_hat", "bowler", "derby_hat", "derby", "plug_hat"], "image_count": 35, "id": 141, "frequency": "c", "synset": "bowler_hat.n.01"}, {"name": "bowling_ball", "instance_count": 38, "def": "a large ball with finger holes used in the sport of bowling", "synonyms": ["bowling_ball"], "image_count": 5, "id": 142, "frequency": "r", "synset": "bowling_ball.n.01"}, {"name": "box", "instance_count": 7855, "def": "a (usually rectangular) container; may have a lid", "synonyms": ["box"], "image_count": 1828, "id": 143, "frequency": "f", "synset": "box.n.01"}, {"name": "boxing_glove", "instance_count": 22, "def": "large glove coverings the fists of a fighter worn for the sport of boxing", "synonyms": ["boxing_glove"], "image_count": 8, "id": 144, "frequency": "r", "synset": "boxing_glove.n.01"}, {"name": "suspenders", "instance_count": 88, "def": "elastic straps that hold trousers up (usually used in the plural)", "synonyms": ["suspenders"], "image_count": 63, "id": 145, "frequency": "c", "synset": "brace.n.06"}, {"name": "bracelet", "instance_count": 3219, "def": "jewelry worn around the wrist for decoration", "synonyms": ["bracelet", "bangle"], "image_count": 1668, "id": 146, "frequency": "f", "synset": "bracelet.n.02"}, {"name": "brass_plaque", "instance_count": 4, "def": "a memorial made of brass", "synonyms": ["brass_plaque"], "image_count": 4, "id": 147, "frequency": "r", "synset": "brass.n.07"}, {"name": "brassiere", "instance_count": 118, "def": "an undergarment worn by women to support their breasts", "synonyms": ["brassiere", "bra", "bandeau"], "image_count": 95, "id": 148, "frequency": "c", "synset": "brassiere.n.01"}, {"name": "bread-bin", "instance_count": 17, "def": "a container used to keep bread or cake in", "synonyms": ["bread-bin", "breadbox"], "image_count": 17, "id": 149, "frequency": "c", "synset": "bread-bin.n.01"}, {"name": "bread", "instance_count": 6550, "def": "food made from dough of flour or meal and usually raised with yeast or baking powder and then baked", "synonyms": ["bread"], "image_count": 1567, "id": 150, "frequency": "f", "synset": "bread.n.01"}, {"name": "breechcloth", "instance_count": 3, "def": "a garment that provides covering for the loins", "synonyms": ["breechcloth", "breechclout", "loincloth"], "image_count": 2, "id": 151, "frequency": "r", "synset": "breechcloth.n.01"}, {"name": "bridal_gown", "instance_count": 118, "def": "a gown worn by the bride at a wedding", "synonyms": ["bridal_gown", "wedding_gown", "wedding_dress"], "image_count": 103, "id": 152, "frequency": "f", "synset": "bridal_gown.n.01"}, {"name": "briefcase", "instance_count": 84, "def": "a case with a handle; for carrying papers or files or books", "synonyms": ["briefcase"], "image_count": 50, "id": 153, "frequency": "c", "synset": "briefcase.n.01"}, {"name": "broccoli", "instance_count": 12166, "def": "plant with dense clusters of tight green flower buds", "synonyms": ["broccoli"], "image_count": 1309, "id": 154, "frequency": "f", "synset": "broccoli.n.01"}, {"name": "broach", "instance_count": 9, "def": "a decorative pin worn by women", "synonyms": ["broach"], "image_count": 6, "id": 155, "frequency": "r", "synset": "brooch.n.01"}, {"name": "broom", "instance_count": 144, "def": "bundle of straws or twigs attached to a long handle; used for cleaning", "synonyms": ["broom"], "image_count": 92, "id": 156, "frequency": "c", "synset": "broom.n.01"}, {"name": "brownie", "instance_count": 217, "def": "square or bar of very rich chocolate cake usually with nuts", "synonyms": ["brownie"], "image_count": 19, "id": 157, "frequency": "c", "synset": "brownie.n.03"}, {"name": "brussels_sprouts", "instance_count": 590, "def": "the small edible cabbage-like buds growing along a stalk", "synonyms": ["brussels_sprouts"], "image_count": 37, "id": 158, "frequency": "c", "synset": "brussels_sprouts.n.01"}, {"name": "bubble_gum", "instance_count": 4, "def": "a kind of chewing gum that can be blown into bubbles", "synonyms": ["bubble_gum"], "image_count": 4, "id": 159, "frequency": "r", "synset": "bubble_gum.n.01"}, {"name": "bucket", "instance_count": 1346, "def": "a roughly cylindrical vessel that is open at the top", "synonyms": ["bucket", "pail"], "image_count": 709, "id": 160, "frequency": "f", "synset": "bucket.n.01"}, {"name": "horse_buggy", "instance_count": 19, "def": "a small lightweight carriage; drawn by a single horse", "synonyms": ["horse_buggy"], "image_count": 9, "id": 161, "frequency": "r", "synset": "buggy.n.01"}, {"name": "bull", "instance_count": 230, "def": "a cow with horns", "synonyms": ["horned_cow"], "image_count": 82, "id": 162, "frequency": "c", "synset": "bull.n.11"}, {"name": "bulldog", "instance_count": 21, "def": "a thickset short-haired dog with a large head and strong undershot lower jaw", "synonyms": ["bulldog"], "image_count": 15, "id": 163, "frequency": "c", "synset": "bulldog.n.01"}, {"name": "bulldozer", "instance_count": 4, "def": "large powerful tractor; a large blade in front flattens areas of ground", "synonyms": ["bulldozer", "dozer"], "image_count": 3, "id": 164, "frequency": "r", "synset": "bulldozer.n.01"}, {"name": "bullet_train", "instance_count": 80, "def": "a high-speed passenger train", "synonyms": ["bullet_train"], "image_count": 61, "id": 165, "frequency": "c", "synset": "bullet_train.n.01"}, {"name": "bulletin_board", "instance_count": 76, "def": "a board that hangs on a wall; displays announcements", "synonyms": ["bulletin_board", "notice_board"], "image_count": 51, "id": 166, "frequency": "c", "synset": "bulletin_board.n.02"}, {"name": "bulletproof_vest", "instance_count": 27, "def": "a vest capable of resisting the impact of a bullet", "synonyms": ["bulletproof_vest"], "image_count": 5, "id": 167, "frequency": "r", "synset": "bulletproof_vest.n.01"}, {"name": "bullhorn", "instance_count": 15, "def": "a portable loudspeaker with built-in microphone and amplifier", "synonyms": ["bullhorn", "megaphone"], "image_count": 13, "id": 168, "frequency": "c", "synset": "bullhorn.n.01"}, {"name": "bun", "instance_count": 1780, "def": "small rounded bread either plain or sweet", "synonyms": ["bun", "roll"], "image_count": 642, "id": 169, "frequency": "f", "synset": "bun.n.01"}, {"name": "bunk_bed", "instance_count": 44, "def": "beds built one above the other", "synonyms": ["bunk_bed"], "image_count": 24, "id": 170, "frequency": "c", "synset": "bunk_bed.n.01"}, {"name": "buoy", "instance_count": 1404, "def": "a float attached by rope to the seabed to mark channels in a harbor or underwater hazards", "synonyms": ["buoy"], "image_count": 255, "id": 171, "frequency": "f", "synset": "buoy.n.01"}, {"name": "burrito", "instance_count": 14, "def": "a flour tortilla folded around a filling", "synonyms": ["burrito"], "image_count": 9, "id": 172, "frequency": "r", "synset": "burrito.n.01"}, {"name": "bus_(vehicle)", "instance_count": 3281, "def": "a vehicle carrying many passengers; used for public transport", "synonyms": ["bus_(vehicle)", "autobus", "charabanc", "double-decker", "motorbus", "motorcoach"], "image_count": 1808, "id": 173, "frequency": "f", "synset": "bus.n.01"}, {"name": "business_card", "instance_count": 84, "def": "a card on which are printed the person's name and business affiliation", "synonyms": ["business_card"], "image_count": 31, "id": 174, "frequency": "c", "synset": "business_card.n.01"}, {"name": "butter", "instance_count": 308, "def": "an edible emulsion of fat globules made by churning milk or cream; for cooking and table use", "synonyms": ["butter"], "image_count": 158, "id": 175, "frequency": "f", "synset": "butter.n.01"}, {"name": "butterfly", "instance_count": 296, "def": "insect typically having a slender body with knobbed antennae and broad colorful wings", "synonyms": ["butterfly"], "image_count": 80, "id": 176, "frequency": "c", "synset": "butterfly.n.01"}, {"name": "button", "instance_count": 7884, "def": "a round fastener sewn to shirts and coats etc to fit through buttonholes", "synonyms": ["button"], "image_count": 1884, "id": 177, "frequency": "f", "synset": "button.n.01"}, {"name": "cab_(taxi)", "instance_count": 414, "def": "a car that takes passengers where they want to go in exchange for money", "synonyms": ["cab_(taxi)", "taxi", "taxicab"], "image_count": 158, "id": 178, "frequency": "f", "synset": "cab.n.03"}, {"name": "cabana", "instance_count": 20, "def": "a small tent used as a dressing room beside the sea or a swimming pool", "synonyms": ["cabana"], "image_count": 2, "id": 179, "frequency": "r", "synset": "cabana.n.01"}, {"name": "cabin_car", "instance_count": 14, "def": "a car on a freight train for use of the train crew; usually the last car on the train", "synonyms": ["cabin_car", "caboose"], "image_count": 12, "id": 180, "frequency": "c", "synset": "cabin_car.n.01"}, {"name": "cabinet", "instance_count": 7371, "def": "a piece of furniture resembling a cupboard with doors and shelves and drawers", "synonyms": ["cabinet"], "image_count": 1659, "id": 181, "frequency": "f", "synset": "cabinet.n.01"}, {"name": "locker", "instance_count": 95, "def": "a storage compartment for clothes and valuables; usually it has a lock", "synonyms": ["locker", "storage_locker"], "image_count": 7, "id": 182, "frequency": "r", "synset": "cabinet.n.03"}, {"name": "cake", "instance_count": 2297, "def": "baked goods made from or based on a mixture of flour, sugar, eggs, and fat", "synonyms": ["cake"], "image_count": 834, "id": 183, "frequency": "f", "synset": "cake.n.03"}, {"name": "calculator", "instance_count": 60, "def": "a small machine that is used for mathematical calculations", "synonyms": ["calculator"], "image_count": 57, "id": 184, "frequency": "c", "synset": "calculator.n.02"}, {"name": "calendar", "instance_count": 251, "def": "a list or register of events (appointments/social events/court cases, etc)", "synonyms": ["calendar"], "image_count": 174, "id": 185, "frequency": "f", "synset": "calendar.n.02"}, {"name": "calf", "instance_count": 301, "def": "young of domestic cattle", "synonyms": ["calf"], "image_count": 95, "id": 186, "frequency": "c", "synset": "calf.n.01"}, {"name": "camcorder", "instance_count": 45, "def": "a portable television camera and videocassette recorder", "synonyms": ["camcorder"], "image_count": 27, "id": 187, "frequency": "c", "synset": "camcorder.n.01"}, {"name": "camel", "instance_count": 34, "def": "cud-chewing mammal used as a draft or saddle animal in desert regions", "synonyms": ["camel"], "image_count": 22, "id": 188, "frequency": "c", "synset": "camel.n.01"}, {"name": "camera", "instance_count": 2471, "def": "equipment for taking photographs", "synonyms": ["camera"], "image_count": 1391, "id": 189, "frequency": "f", "synset": "camera.n.01"}, {"name": "camera_lens", "instance_count": 167, "def": "a lens that focuses the image in a camera", "synonyms": ["camera_lens"], "image_count": 90, "id": 190, "frequency": "c", "synset": "camera_lens.n.01"}, {"name": "camper_(vehicle)", "instance_count": 102, "def": "a recreational vehicle equipped for camping out while traveling", "synonyms": ["camper_(vehicle)", "camping_bus", "motor_home"], "image_count": 40, "id": 191, "frequency": "c", "synset": "camper.n.02"}, {"name": "can", "instance_count": 1424, "def": "airtight sealed metal container for food or drink or paint etc.", "synonyms": ["can", "tin_can"], "image_count": 445, "id": 192, "frequency": "f", "synset": "can.n.01"}, {"name": "can_opener", "instance_count": 22, "def": "a device for cutting cans open", "synonyms": ["can_opener", "tin_opener"], "image_count": 21, "id": 193, "frequency": "c", "synset": "can_opener.n.01"}, {"name": "candle", "instance_count": 4288, "def": "stick of wax with a wick in the middle", "synonyms": ["candle", "candlestick"], "image_count": 1132, "id": 194, "frequency": "f", "synset": "candle.n.01"}, {"name": "candle_holder", "instance_count": 530, "def": "a holder with sockets for candles", "synonyms": ["candle_holder"], "image_count": 177, "id": 195, "frequency": "f", "synset": "candlestick.n.01"}, {"name": "candy_bar", "instance_count": 29, "def": "a candy shaped as a bar", "synonyms": ["candy_bar"], "image_count": 4, "id": 196, "frequency": "r", "synset": "candy_bar.n.01"}, {"name": "candy_cane", "instance_count": 107, "def": "a hard candy in the shape of a rod (usually with stripes)", "synonyms": ["candy_cane"], "image_count": 17, "id": 197, "frequency": "c", "synset": "candy_cane.n.01"}, {"name": "walking_cane", "instance_count": 106, "def": "a stick that people can lean on to help them walk", "synonyms": ["walking_cane"], "image_count": 84, "id": 198, "frequency": "c", "synset": "cane.n.01"}, {"name": "canister", "instance_count": 218, "def": "metal container for storing dry foods such as tea or flour", "synonyms": ["canister", "cannister"], "image_count": 55, "id": 199, "frequency": "c", "synset": "canister.n.02"}, {"name": "canoe", "instance_count": 96, "def": "small and light boat; pointed at both ends; propelled with a paddle", "synonyms": ["canoe"], "image_count": 30, "id": 200, "frequency": "c", "synset": "canoe.n.01"}, {"name": "cantaloup", "instance_count": 193, "def": "the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh", "synonyms": ["cantaloup", "cantaloupe"], "image_count": 25, "id": 201, "frequency": "c", "synset": "cantaloup.n.02"}, {"name": "canteen", "instance_count": 2, "def": "a flask for carrying water; used by soldiers or travelers", "synonyms": ["canteen"], "image_count": 2, "id": 202, "frequency": "r", "synset": "canteen.n.01"}, {"name": "cap_(headwear)", "instance_count": 636, "def": "a tight-fitting headwear", "synonyms": ["cap_(headwear)"], "image_count": 125, "id": 203, "frequency": "f", "synset": "cap.n.01"}, {"name": "bottle_cap", "instance_count": 5293, "def": "a top (as for a bottle)", "synonyms": ["bottle_cap", "cap_(container_lid)"], "image_count": 1135, "id": 204, "frequency": "f", "synset": "cap.n.02"}, {"name": "cape", "instance_count": 27, "def": "a sleeveless garment like a cloak but shorter", "synonyms": ["cape"], "image_count": 19, "id": 205, "frequency": "c", "synset": "cape.n.02"}, {"name": "cappuccino", "instance_count": 87, "def": "equal parts of espresso and steamed milk", "synonyms": ["cappuccino", "coffee_cappuccino"], "image_count": 72, "id": 206, "frequency": "c", "synset": "cappuccino.n.01"}, {"name": "car_(automobile)", "instance_count": 10528, "def": "a motor vehicle with four wheels", "synonyms": ["car_(automobile)", "auto_(automobile)", "automobile"], "image_count": 1926, "id": 207, "frequency": "f", "synset": "car.n.01"}, {"name": "railcar_(part_of_a_train)", "instance_count": 928, "def": "a wheeled vehicle adapted to the rails of railroad (mark each individual railcar separately)", "synonyms": ["railcar_(part_of_a_train)", "railway_car_(part_of_a_train)", "railroad_car_(part_of_a_train)"], "image_count": 159, "id": 208, "frequency": "f", "synset": "car.n.02"}, {"name": "elevator_car", "instance_count": 10, "def": "where passengers ride up and down", "synonyms": ["elevator_car"], "image_count": 7, "id": 209, "frequency": "r", "synset": "car.n.04"}, {"name": "car_battery", "instance_count": 1, "def": "a battery in a motor vehicle", "synonyms": ["car_battery", "automobile_battery"], "image_count": 1, "id": 210, "frequency": "r", "synset": "car_battery.n.01"}, {"name": "identity_card", "instance_count": 16, "def": "a card certifying the identity of the bearer", "synonyms": ["identity_card"], "image_count": 13, "id": 211, "frequency": "c", "synset": "card.n.02"}, {"name": "card", "instance_count": 122, "def": "a rectangular piece of paper used to send messages (e.g. greetings or pictures)", "synonyms": ["card"], "image_count": 35, "id": 212, "frequency": "c", "synset": "card.n.03"}, {"name": "cardigan", "instance_count": 22, "def": "knitted jacket that is fastened up the front with buttons or a zipper", "synonyms": ["cardigan"], "image_count": 18, "id": 213, "frequency": "c", "synset": "cardigan.n.01"}, {"name": "cargo_ship", "instance_count": 15, "def": "a ship designed to carry cargo", "synonyms": ["cargo_ship", "cargo_vessel"], "image_count": 8, "id": 214, "frequency": "r", "synset": "cargo_ship.n.01"}, {"name": "carnation", "instance_count": 22, "def": "plant with pink to purple-red spice-scented usually double flowers", "synonyms": ["carnation"], "image_count": 6, "id": 215, "frequency": "r", "synset": "carnation.n.01"}, {"name": "horse_carriage", "instance_count": 49, "def": "a vehicle with wheels drawn by one or more horses", "synonyms": ["horse_carriage"], "image_count": 35, "id": 216, "frequency": "c", "synset": "carriage.n.02"}, {"name": "carrot", "instance_count": 18049, "def": "deep orange edible root of the cultivated carrot plant", "synonyms": ["carrot"], "image_count": 1222, "id": 217, "frequency": "f", "synset": "carrot.n.01"}, {"name": "tote_bag", "instance_count": 231, "def": "a capacious bag or basket", "synonyms": ["tote_bag"], "image_count": 103, "id": 218, "frequency": "f", "synset": "carryall.n.01"}, {"name": "cart", "instance_count": 51, "def": "a heavy open wagon usually having two wheels and drawn by an animal", "synonyms": ["cart"], "image_count": 28, "id": 219, "frequency": "c", "synset": "cart.n.01"}, {"name": "carton", "instance_count": 206, "def": "a container made of cardboard for holding food or drink", "synonyms": ["carton"], "image_count": 63, "id": 220, "frequency": "c", "synset": "carton.n.02"}, {"name": "cash_register", "instance_count": 33, "def": "a cashbox with an adding machine to register transactions", "synonyms": ["cash_register", "register_(for_cash_transactions)"], "image_count": 28, "id": 221, "frequency": "c", "synset": "cash_register.n.01"}, {"name": "casserole", "instance_count": 12, "def": "food cooked and served in a casserole", "synonyms": ["casserole"], "image_count": 5, "id": 222, "frequency": "r", "synset": "casserole.n.01"}, {"name": "cassette", "instance_count": 74, "def": "a container that holds a magnetic tape used for recording or playing sound or video", "synonyms": ["cassette"], "image_count": 7, "id": 223, "frequency": "r", "synset": "cassette.n.01"}, {"name": "cast", "instance_count": 15, "def": "bandage consisting of a firm covering that immobilizes broken bones while they heal", "synonyms": ["cast", "plaster_cast", "plaster_bandage"], "image_count": 14, "id": 224, "frequency": "c", "synset": "cast.n.05"}, {"name": "cat", "instance_count": 2387, "def": "a domestic house cat", "synonyms": ["cat"], "image_count": 1918, "id": 225, "frequency": "f", "synset": "cat.n.01"}, {"name": "cauliflower", "instance_count": 1035, "def": "edible compact head of white undeveloped flowers", "synonyms": ["cauliflower"], "image_count": 133, "id": 226, "frequency": "f", "synset": "cauliflower.n.02"}, {"name": "cayenne_(spice)", "instance_count": 49, "def": "ground pods and seeds of pungent red peppers of the genus Capsicum", "synonyms": ["cayenne_(spice)", "cayenne_pepper_(spice)", "red_pepper_(spice)"], "image_count": 16, "id": 227, "frequency": "c", "synset": "cayenne.n.02"}, {"name": "CD_player", "instance_count": 37, "def": "electronic equipment for playing compact discs (CDs)", "synonyms": ["CD_player"], "image_count": 27, "id": 228, "frequency": "c", "synset": "cd_player.n.01"}, {"name": "celery", "instance_count": 911, "def": "widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked", "synonyms": ["celery"], "image_count": 110, "id": 229, "frequency": "f", "synset": "celery.n.01"}, {"name": "cellular_telephone", "instance_count": 2902, "def": "a hand-held mobile telephone", "synonyms": ["cellular_telephone", "cellular_phone", "cellphone", "mobile_phone", "smart_phone"], "image_count": 1895, "id": 230, "frequency": "f", "synset": "cellular_telephone.n.01"}, {"name": "chain_mail", "instance_count": 13, "def": "(Middle Ages) flexible armor made of interlinked metal rings", "synonyms": ["chain_mail", "ring_mail", "chain_armor", "chain_armour", "ring_armor", "ring_armour"], "image_count": 4, "id": 231, "frequency": "r", "synset": "chain_mail.n.01"}, {"name": "chair", "instance_count": 11549, "def": "a seat for one person, with a support for the back", "synonyms": ["chair"], "image_count": 1927, "id": 232, "frequency": "f", "synset": "chair.n.01"}, {"name": "chaise_longue", "instance_count": 15, "def": "a long chair; for reclining", "synonyms": ["chaise_longue", "chaise", "daybed"], "image_count": 8, "id": 233, "frequency": "r", "synset": "chaise_longue.n.01"}, {"name": "chalice", "instance_count": 1, "def": "a bowl-shaped drinking vessel; especially the Eucharistic cup", "synonyms": ["chalice"], "image_count": 1, "id": 234, "frequency": "r", "synset": "chalice.n.01"}, {"name": "chandelier", "instance_count": 392, "def": "branched lighting fixture; often ornate; hangs from the ceiling", "synonyms": ["chandelier"], "image_count": 263, "id": 235, "frequency": "f", "synset": "chandelier.n.01"}, {"name": "chap", "instance_count": 19, "def": "leather leggings without a seat; worn over trousers by cowboys to protect their legs", "synonyms": ["chap"], "image_count": 10, "id": 236, "frequency": "r", "synset": "chap.n.04"}, {"name": "checkbook", "instance_count": 2, "def": "a book issued to holders of checking accounts", "synonyms": ["checkbook", "chequebook"], "image_count": 2, "id": 237, "frequency": "r", "synset": "checkbook.n.01"}, {"name": "checkerboard", "instance_count": 3, "def": "a board having 64 squares of two alternating colors", "synonyms": ["checkerboard"], "image_count": 3, "id": 238, "frequency": "r", "synset": "checkerboard.n.01"}, {"name": "cherry", "instance_count": 903, "def": "a red fruit with a single hard stone", "synonyms": ["cherry"], "image_count": 87, "id": 239, "frequency": "c", "synset": "cherry.n.03"}, {"name": "chessboard", "instance_count": 13, "def": "a checkerboard used to play chess", "synonyms": ["chessboard"], "image_count": 9, "id": 240, "frequency": "r", "synset": "chessboard.n.01"}, {"name": "chicken_(animal)", "instance_count": 417, "def": "a domestic fowl bred for flesh or eggs", "synonyms": ["chicken_(animal)"], "image_count": 71, "id": 241, "frequency": "c", "synset": "chicken.n.02"}, {"name": "chickpea", "instance_count": 265, "def": "the seed of the chickpea plant; usually dried", "synonyms": ["chickpea", "garbanzo"], "image_count": 13, "id": 242, "frequency": "c", "synset": "chickpea.n.01"}, {"name": "chili_(vegetable)", "instance_count": 354, "def": "very hot and finely tapering pepper of special pungency", "synonyms": ["chili_(vegetable)", "chili_pepper_(vegetable)", "chilli_(vegetable)", "chilly_(vegetable)", "chile_(vegetable)"], "image_count": 18, "id": 243, "frequency": "c", "synset": "chili.n.02"}, {"name": "chime", "instance_count": 2, "def": "an instrument consisting of a set of bells that are struck with a hammer", "synonyms": ["chime", "gong"], "image_count": 2, "id": 244, "frequency": "r", "synset": "chime.n.01"}, {"name": "chinaware", "instance_count": 41, "def": "dishware made of high quality porcelain", "synonyms": ["chinaware"], "image_count": 5, "id": 245, "frequency": "r", "synset": "chinaware.n.01"}, {"name": "crisp_(potato_chip)", "instance_count": 541, "def": "a thin crisp slice of potato fried in deep fat", "synonyms": ["crisp_(potato_chip)", "potato_chip"], "image_count": 45, "id": 246, "frequency": "c", "synset": "chip.n.04"}, {"name": "poker_chip", "instance_count": 21, "def": "a small disk-shaped counter used to represent money when gambling", "synonyms": ["poker_chip"], "image_count": 1, "id": 247, "frequency": "r", "synset": "chip.n.06"}, {"name": "chocolate_bar", "instance_count": 179, "def": "a bar of chocolate candy", "synonyms": ["chocolate_bar"], "image_count": 23, "id": 248, "frequency": "c", "synset": "chocolate_bar.n.01"}, {"name": "chocolate_cake", "instance_count": 80, "def": "cake containing chocolate", "synonyms": ["chocolate_cake"], "image_count": 32, "id": 249, "frequency": "c", "synset": "chocolate_cake.n.01"}, {"name": "chocolate_milk", "instance_count": 7, "def": "milk flavored with chocolate syrup", "synonyms": ["chocolate_milk"], "image_count": 4, "id": 250, "frequency": "r", "synset": "chocolate_milk.n.01"}, {"name": "chocolate_mousse", "instance_count": 1, "def": "dessert mousse made with chocolate", "synonyms": ["chocolate_mousse"], "image_count": 1, "id": 251, "frequency": "r", "synset": "chocolate_mousse.n.01"}, {"name": "choker", "instance_count": 1380, "def": "shirt collar, animal collar, or tight-fitting necklace", "synonyms": ["choker", "collar", "neckband"], "image_count": 858, "id": 252, "frequency": "f", "synset": "choker.n.03"}, {"name": "chopping_board", "instance_count": 840, "def": "a wooden board where meats or vegetables can be cut", "synonyms": ["chopping_board", "cutting_board", "chopping_block"], "image_count": 661, "id": 253, "frequency": "f", "synset": "chopping_board.n.01"}, {"name": "chopstick", "instance_count": 557, "def": "one of a pair of slender sticks used as oriental tableware to eat food with", "synonyms": ["chopstick"], "image_count": 168, "id": 254, "frequency": "f", "synset": "chopstick.n.01"}, {"name": "Christmas_tree", "instance_count": 303, "def": "an ornamented evergreen used as a Christmas decoration", "synonyms": ["Christmas_tree"], "image_count": 210, "id": 255, "frequency": "f", "synset": "christmas_tree.n.05"}, {"name": "slide", "instance_count": 106, "def": "sloping channel through which things can descend", "synonyms": ["slide"], "image_count": 65, "id": 256, "frequency": "c", "synset": "chute.n.02"}, {"name": "cider", "instance_count": 38, "def": "a beverage made from juice pressed from apples", "synonyms": ["cider", "cyder"], "image_count": 4, "id": 257, "frequency": "r", "synset": "cider.n.01"}, {"name": "cigar_box", "instance_count": 3, "def": "a box for holding cigars", "synonyms": ["cigar_box"], "image_count": 2, "id": 258, "frequency": "r", "synset": "cigar_box.n.01"}, {"name": "cigarette", "instance_count": 269, "def": "finely ground tobacco wrapped in paper; for smoking", "synonyms": ["cigarette"], "image_count": 159, "id": 259, "frequency": "f", "synset": "cigarette.n.01"}, {"name": "cigarette_case", "instance_count": 35, "def": "a small flat case for holding cigarettes", "synonyms": ["cigarette_case", "cigarette_pack"], "image_count": 31, "id": 260, "frequency": "c", "synset": "cigarette_case.n.01"}, {"name": "cistern", "instance_count": 901, "def": "a tank that holds the water used to flush a toilet", "synonyms": ["cistern", "water_tank"], "image_count": 811, "id": 261, "frequency": "f", "synset": "cistern.n.02"}, {"name": "clarinet", "instance_count": 1, "def": "a single-reed instrument with a straight tube", "synonyms": ["clarinet"], "image_count": 1, "id": 262, "frequency": "r", "synset": "clarinet.n.01"}, {"name": "clasp", "instance_count": 197, "def": "a fastener (as a buckle or hook) that is used to hold two things together", "synonyms": ["clasp"], "image_count": 42, "id": 263, "frequency": "c", "synset": "clasp.n.01"}, {"name": "cleansing_agent", "instance_count": 63, "def": "a preparation used in cleaning something", "synonyms": ["cleansing_agent", "cleanser", "cleaner"], "image_count": 27, "id": 264, "frequency": "c", "synset": "cleansing_agent.n.01"}, {"name": "cleat_(for_securing_rope)", "instance_count": 8, "def": "a fastener (usually with two projecting horns) around which a rope can be secured", "synonyms": ["cleat_(for_securing_rope)"], "image_count": 2, "id": 265, "frequency": "r", "synset": "cleat.n.02"}, {"name": "clementine", "instance_count": 108, "def": "a variety of mandarin orange", "synonyms": ["clementine"], "image_count": 5, "id": 266, "frequency": "r", "synset": "clementine.n.01"}, {"name": "clip", "instance_count": 301, "def": "any of various small fasteners used to hold loose articles together", "synonyms": ["clip"], "image_count": 95, "id": 267, "frequency": "c", "synset": "clip.n.03"}, {"name": "clipboard", "instance_count": 36, "def": "a small writing board with a clip at the top for holding papers", "synonyms": ["clipboard"], "image_count": 32, "id": 268, "frequency": "c", "synset": "clipboard.n.01"}, {"name": "clippers_(for_plants)", "instance_count": 1, "def": "shears for cutting grass or shrubbery (often used in the plural)", "synonyms": ["clippers_(for_plants)"], "image_count": 1, "id": 269, "frequency": "r", "synset": "clipper.n.03"}, {"name": "cloak", "instance_count": 1, "def": "a loose outer garment", "synonyms": ["cloak"], "image_count": 1, "id": 270, "frequency": "r", "synset": "cloak.n.02"}, {"name": "clock", "instance_count": 2677, "def": "a timepiece that shows the time of day", "synonyms": ["clock", "timepiece", "timekeeper"], "image_count": 1844, "id": 271, "frequency": "f", "synset": "clock.n.01"}, {"name": "clock_tower", "instance_count": 932, "def": "a tower with a large clock visible high up on an outside face", "synonyms": ["clock_tower"], "image_count": 897, "id": 272, "frequency": "f", "synset": "clock_tower.n.01"}, {"name": "clothes_hamper", "instance_count": 47, "def": "a hamper that holds dirty clothes to be washed or wet clothes to be dried", "synonyms": ["clothes_hamper", "laundry_basket", "clothes_basket"], "image_count": 31, "id": 273, "frequency": "c", "synset": "clothes_hamper.n.01"}, {"name": "clothespin", "instance_count": 111, "def": "wood or plastic fastener; for holding clothes on a clothesline", "synonyms": ["clothespin", "clothes_peg"], "image_count": 23, "id": 274, "frequency": "c", "synset": "clothespin.n.01"}, {"name": "clutch_bag", "instance_count": 1, "def": "a woman's strapless purse that is carried in the hand", "synonyms": ["clutch_bag"], "image_count": 1, "id": 275, "frequency": "r", "synset": "clutch_bag.n.01"}, {"name": "coaster", "instance_count": 390, "def": "a covering (plate or mat) that protects the surface of a table", "synonyms": ["coaster"], "image_count": 202, "id": 276, "frequency": "f", "synset": "coaster.n.03"}, {"name": "coat", "instance_count": 4145, "def": "an outer garment that has sleeves and covers the body from shoulder down", "synonyms": ["coat"], "image_count": 746, "id": 277, "frequency": "f", "synset": "coat.n.01"}, {"name": "coat_hanger", "instance_count": 282, "def": "a hanger that is shaped like a person's shoulders", "synonyms": ["coat_hanger", "clothes_hanger", "dress_hanger"], "image_count": 44, "id": 278, "frequency": "c", "synset": "coat_hanger.n.01"}, {"name": "coatrack", "instance_count": 16, "def": "a rack with hooks for temporarily holding coats and hats", "synonyms": ["coatrack", "hatrack"], "image_count": 14, "id": 279, "frequency": "c", "synset": "coatrack.n.01"}, {"name": "cock", "instance_count": 132, "def": "adult male chicken", "synonyms": ["cock", "rooster"], "image_count": 26, "id": 280, "frequency": "c", "synset": "cock.n.04"}, {"name": "cockroach", "instance_count": 1, "def": "any of numerous chiefly nocturnal insects; some are domestic pests", "synonyms": ["cockroach"], "image_count": 1, "id": 281, "frequency": "r", "synset": "cockroach.n.01"}, {"name": "cocoa_(beverage)", "instance_count": 4, "def": "a beverage made from cocoa powder and milk and sugar; usually drunk hot", "synonyms": ["cocoa_(beverage)", "hot_chocolate_(beverage)", "drinking_chocolate"], "image_count": 2, "id": 282, "frequency": "r", "synset": "cocoa.n.01"}, {"name": "coconut", "instance_count": 273, "def": "large hard-shelled brown oval nut with a fibrous husk", "synonyms": ["coconut", "cocoanut"], "image_count": 25, "id": 283, "frequency": "c", "synset": "coconut.n.02"}, {"name": "coffee_maker", "instance_count": 271, "def": "a kitchen appliance for brewing coffee automatically", "synonyms": ["coffee_maker", "coffee_machine"], "image_count": 238, "id": 284, "frequency": "f", "synset": "coffee_maker.n.01"}, {"name": "coffee_table", "instance_count": 709, "def": "low table where magazines can be placed and coffee or cocktails are served", "synonyms": ["coffee_table", "cocktail_table"], "image_count": 592, "id": 285, "frequency": "f", "synset": "coffee_table.n.01"}, {"name": "coffeepot", "instance_count": 32, "def": "tall pot in which coffee is brewed", "synonyms": ["coffeepot"], "image_count": 26, "id": 286, "frequency": "c", "synset": "coffeepot.n.01"}, {"name": "coil", "instance_count": 7, "def": "tubing that is wound in a spiral", "synonyms": ["coil"], "image_count": 5, "id": 287, "frequency": "r", "synset": "coil.n.05"}, {"name": "coin", "instance_count": 305, "def": "a flat metal piece (usually a disc) used as money", "synonyms": ["coin"], "image_count": 42, "id": 288, "frequency": "c", "synset": "coin.n.01"}, {"name": "colander", "instance_count": 16, "def": "bowl-shaped strainer; used to wash or drain foods", "synonyms": ["colander", "cullender"], "image_count": 13, "id": 289, "frequency": "c", "synset": "colander.n.01"}, {"name": "coleslaw", "instance_count": 72, "def": "basically shredded cabbage", "synonyms": ["coleslaw", "slaw"], "image_count": 46, "id": 290, "frequency": "c", "synset": "coleslaw.n.01"}, {"name": "coloring_material", "instance_count": 1, "def": "any material used for its color", "synonyms": ["coloring_material", "colouring_material"], "image_count": 1, "id": 291, "frequency": "r", "synset": "coloring_material.n.01"}, {"name": "combination_lock", "instance_count": 13, "def": "lock that can be opened only by turning dials in a special sequence", "synonyms": ["combination_lock"], "image_count": 8, "id": 292, "frequency": "r", "synset": "combination_lock.n.01"}, {"name": "pacifier", "instance_count": 40, "def": "device used for an infant to suck or bite on", "synonyms": ["pacifier", "teething_ring"], "image_count": 34, "id": 293, "frequency": "c", "synset": "comforter.n.04"}, {"name": "comic_book", "instance_count": 97, "def": "a magazine devoted to comic strips", "synonyms": ["comic_book"], "image_count": 5, "id": 294, "frequency": "r", "synset": "comic_book.n.01"}, {"name": "compass", "instance_count": 1, "def": "navigational instrument for finding directions", "synonyms": ["compass"], "image_count": 1, "id": 295, "frequency": "r", "synset": "compass.n.01"}, {"name": "computer_keyboard", "instance_count": 2745, "def": "a keyboard that is a data input device for computers", "synonyms": ["computer_keyboard", "keyboard_(computer)"], "image_count": 1871, "id": 296, "frequency": "f", "synset": "computer_keyboard.n.01"}, {"name": "condiment", "instance_count": 2985, "def": "a preparation (a sauce or relish or spice) to enhance flavor or enjoyment", "synonyms": ["condiment"], "image_count": 717, "id": 297, "frequency": "f", "synset": "condiment.n.01"}, {"name": "cone", "instance_count": 4081, "def": "a cone-shaped object used to direct traffic", "synonyms": ["cone", "traffic_cone"], "image_count": 1010, "id": 298, "frequency": "f", "synset": "cone.n.01"}, {"name": "control", "instance_count": 1775, "def": "a mechanism that controls the operation of a machine", "synonyms": ["control", "controller"], "image_count": 679, "id": 299, "frequency": "f", "synset": "control.n.09"}, {"name": "convertible_(automobile)", "instance_count": 4, "def": "a car that has top that can be folded or removed", "synonyms": ["convertible_(automobile)"], "image_count": 3, "id": 300, "frequency": "r", "synset": "convertible.n.01"}, {"name": "sofa_bed", "instance_count": 5, "def": "a sofa that can be converted into a bed", "synonyms": ["sofa_bed"], "image_count": 4, "id": 301, "frequency": "r", "synset": "convertible.n.03"}, {"name": "cooker", "instance_count": 1, "def": "a utensil for cooking", "synonyms": ["cooker"], "image_count": 1, "id": 302, "frequency": "r", "synset": "cooker.n.01"}, {"name": "cookie", "instance_count": 1920, "def": "any of various small flat sweet cakes (`biscuit' is the British term)", "synonyms": ["cookie", "cooky", "biscuit_(cookie)"], "image_count": 166, "id": 303, "frequency": "f", "synset": "cookie.n.01"}, {"name": "cooking_utensil", "instance_count": 18, "def": "a kitchen utensil made of material that does not melt easily; used for cooking", "synonyms": ["cooking_utensil"], "image_count": 2, "id": 304, "frequency": "r", "synset": "cooking_utensil.n.01"}, {"name": "cooler_(for_food)", "instance_count": 499, "def": "an insulated box for storing food often with ice", "synonyms": ["cooler_(for_food)", "ice_chest"], "image_count": 266, "id": 305, "frequency": "f", "synset": "cooler.n.01"}, {"name": "cork_(bottle_plug)", "instance_count": 326, "def": "the plug in the mouth of a bottle (especially a wine bottle)", "synonyms": ["cork_(bottle_plug)", "bottle_cork"], "image_count": 101, "id": 306, "frequency": "f", "synset": "cork.n.04"}, {"name": "corkboard", "instance_count": 7, "def": "a sheet consisting of cork granules", "synonyms": ["corkboard"], "image_count": 6, "id": 307, "frequency": "r", "synset": "corkboard.n.01"}, {"name": "corkscrew", "instance_count": 15, "def": "a bottle opener that pulls corks", "synonyms": ["corkscrew", "bottle_screw"], "image_count": 14, "id": 308, "frequency": "c", "synset": "corkscrew.n.01"}, {"name": "edible_corn", "instance_count": 1883, "def": "ears or kernels of corn that can be prepared and served for human food (only mark individual ears or kernels)", "synonyms": ["edible_corn", "corn", "maize"], "image_count": 133, "id": 309, "frequency": "f", "synset": "corn.n.03"}, {"name": "cornbread", "instance_count": 10, "def": "bread made primarily of cornmeal", "synonyms": ["cornbread"], "image_count": 2, "id": 310, "frequency": "r", "synset": "cornbread.n.01"}, {"name": "cornet", "instance_count": 65, "def": "a brass musical instrument with a narrow tube and a flared bell and many valves", "synonyms": ["cornet", "horn", "trumpet"], "image_count": 38, "id": 311, "frequency": "c", "synset": "cornet.n.01"}, {"name": "cornice", "instance_count": 149, "def": "a decorative framework to conceal curtain fixtures at the top of a window casing", "synonyms": ["cornice", "valance", "valance_board", "pelmet"], "image_count": 95, "id": 312, "frequency": "c", "synset": "cornice.n.01"}, {"name": "cornmeal", "instance_count": 1, "def": "coarsely ground corn", "synonyms": ["cornmeal"], "image_count": 1, "id": 313, "frequency": "r", "synset": "cornmeal.n.01"}, {"name": "corset", "instance_count": 12, "def": "a woman's close-fitting foundation garment", "synonyms": ["corset", "girdle"], "image_count": 12, "id": 314, "frequency": "c", "synset": "corset.n.01"}, {"name": "costume", "instance_count": 124, "def": "the attire characteristic of a country or a time or a social class", "synonyms": ["costume"], "image_count": 49, "id": 315, "frequency": "c", "synset": "costume.n.04"}, {"name": "cougar", "instance_count": 6, "def": "large American feline resembling a lion", "synonyms": ["cougar", "puma", "catamount", "mountain_lion", "panther"], "image_count": 5, "id": 316, "frequency": "r", "synset": "cougar.n.01"}, {"name": "coverall", "instance_count": 12, "def": "a loose-fitting protective garment that is worn over other clothing", "synonyms": ["coverall"], "image_count": 5, "id": 317, "frequency": "r", "synset": "coverall.n.01"}, {"name": "cowbell", "instance_count": 29, "def": "a bell hung around the neck of cow so that the cow can be easily located", "synonyms": ["cowbell"], "image_count": 16, "id": 318, "frequency": "c", "synset": "cowbell.n.01"}, {"name": "cowboy_hat", "instance_count": 535, "def": "a hat with a wide brim and a soft crown; worn by American ranch hands", "synonyms": ["cowboy_hat", "ten-gallon_hat"], "image_count": 216, "id": 319, "frequency": "f", "synset": "cowboy_hat.n.01"}, {"name": "crab_(animal)", "instance_count": 50, "def": "decapod having eyes on short stalks and a broad flattened shell and pincers", "synonyms": ["crab_(animal)"], "image_count": 12, "id": 320, "frequency": "c", "synset": "crab.n.01"}, {"name": "crabmeat", "instance_count": 5, "def": "the edible flesh of any of various crabs", "synonyms": ["crabmeat"], "image_count": 1, "id": 321, "frequency": "r", "synset": "crab.n.05"}, {"name": "cracker", "instance_count": 510, "def": "a thin crisp wafer", "synonyms": ["cracker"], "image_count": 54, "id": 322, "frequency": "c", "synset": "cracker.n.01"}, {"name": "crape", "instance_count": 12, "def": "small very thin pancake", "synonyms": ["crape", "crepe", "French_pancake"], "image_count": 5, "id": 323, "frequency": "r", "synset": "crape.n.01"}, {"name": "crate", "instance_count": 1832, "def": "a rugged box (usually made of wood); used for shipping", "synonyms": ["crate"], "image_count": 245, "id": 324, "frequency": "f", "synset": "crate.n.01"}, {"name": "crayon", "instance_count": 59, "def": "writing or drawing implement made of a colored stick of composition wax", "synonyms": ["crayon", "wax_crayon"], "image_count": 12, "id": 325, "frequency": "c", "synset": "crayon.n.01"}, {"name": "cream_pitcher", "instance_count": 10, "def": "a small pitcher for serving cream", "synonyms": ["cream_pitcher"], "image_count": 7, "id": 326, "frequency": "r", "synset": "cream_pitcher.n.01"}, {"name": "crescent_roll", "instance_count": 152, "def": "very rich flaky crescent-shaped roll", "synonyms": ["crescent_roll", "croissant"], "image_count": 35, "id": 327, "frequency": "c", "synset": "crescent_roll.n.01"}, {"name": "crib", "instance_count": 40, "def": "baby bed with high sides made of slats", "synonyms": ["crib", "cot"], "image_count": 36, "id": 328, "frequency": "c", "synset": "crib.n.01"}, {"name": "crock_pot", "instance_count": 128, "def": "an earthen jar (made of baked clay) or a modern electric crockpot", "synonyms": ["crock_pot", "earthenware_jar"], "image_count": 32, "id": 329, "frequency": "c", "synset": "crock.n.03"}, {"name": "crossbar", "instance_count": 6991, "def": "a horizontal bar that goes across something", "synonyms": ["crossbar"], "image_count": 1027, "id": 330, "frequency": "f", "synset": "crossbar.n.01"}, {"name": "crouton", "instance_count": 140, "def": "a small piece of toasted or fried bread; served in soup or salads", "synonyms": ["crouton"], "image_count": 10, "id": 331, "frequency": "r", "synset": "crouton.n.01"}, {"name": "crow", "instance_count": 24, "def": "black birds having a raucous call", "synonyms": ["crow"], "image_count": 12, "id": 332, "frequency": "c", "synset": "crow.n.01"}, {"name": "crowbar", "instance_count": 1, "def": "a heavy iron lever with one end forged into a wedge", "synonyms": ["crowbar", "wrecking_bar", "pry_bar"], "image_count": 1, "id": 333, "frequency": "r", "synset": "crowbar.n.01"}, {"name": "crown", "instance_count": 126, "def": "an ornamental jeweled headdress signifying sovereignty", "synonyms": ["crown"], "image_count": 67, "id": 334, "frequency": "c", "synset": "crown.n.04"}, {"name": "crucifix", "instance_count": 99, "def": "representation of the cross on which Jesus died", "synonyms": ["crucifix"], "image_count": 71, "id": 335, "frequency": "c", "synset": "crucifix.n.01"}, {"name": "cruise_ship", "instance_count": 35, "def": "a passenger ship used commercially for pleasure cruises", "synonyms": ["cruise_ship", "cruise_liner"], "image_count": 30, "id": 336, "frequency": "c", "synset": "cruise_ship.n.01"}, {"name": "police_cruiser", "instance_count": 86, "def": "a car in which policemen cruise the streets", "synonyms": ["police_cruiser", "patrol_car", "police_car", "squad_car"], "image_count": 48, "id": 337, "frequency": "c", "synset": "cruiser.n.01"}, {"name": "crumb", "instance_count": 3021, "def": "small piece of e.g. bread or cake", "synonyms": ["crumb"], "image_count": 249, "id": 338, "frequency": "f", "synset": "crumb.n.03"}, {"name": "crutch", "instance_count": 20, "def": "a wooden or metal staff that fits under the armpit and reaches to the ground", "synonyms": ["crutch"], "image_count": 13, "id": 339, "frequency": "c", "synset": "crutch.n.01"}, {"name": "cub_(animal)", "instance_count": 55, "def": "the young of certain carnivorous mammals such as the bear or wolf or lion", "synonyms": ["cub_(animal)"], "image_count": 29, "id": 340, "frequency": "c", "synset": "cub.n.03"}, {"name": "cube", "instance_count": 189, "def": "a block in the (approximate) shape of a cube", "synonyms": ["cube", "square_block"], "image_count": 14, "id": 341, "frequency": "c", "synset": "cube.n.05"}, {"name": "cucumber", "instance_count": 1533, "def": "cylindrical green fruit with thin green rind and white flesh eaten as a vegetable", "synonyms": ["cucumber", "cuke"], "image_count": 236, "id": 342, "frequency": "f", "synset": "cucumber.n.02"}, {"name": "cufflink", "instance_count": 17, "def": "jewelry consisting of linked buttons used to fasten the cuffs of a shirt", "synonyms": ["cufflink"], "image_count": 15, "id": 343, "frequency": "c", "synset": "cufflink.n.01"}, {"name": "cup", "instance_count": 4637, "def": "a small open container usually used for drinking; usually has a handle", "synonyms": ["cup"], "image_count": 1521, "id": 344, "frequency": "f", "synset": "cup.n.01"}, {"name": "trophy_cup", "instance_count": 80, "def": "a metal award or cup-shaped vessel with handles that is awarded as a trophy to a competition winner", "synonyms": ["trophy_cup"], "image_count": 25, "id": 345, "frequency": "c", "synset": "cup.n.08"}, {"name": "cupboard", "instance_count": 1623, "def": "a small room (or recess) or cabinet used for storage space", "synonyms": ["cupboard", "closet"], "image_count": 249, "id": 346, "frequency": "f", "synset": "cupboard.n.01"}, {"name": "cupcake", "instance_count": 1628, "def": "small cake baked in a muffin tin", "synonyms": ["cupcake"], "image_count": 139, "id": 347, "frequency": "f", "synset": "cupcake.n.01"}, {"name": "hair_curler", "instance_count": 20, "def": "a cylindrical tube around which the hair is wound to curl it", "synonyms": ["hair_curler", "hair_roller", "hair_crimper"], "image_count": 2, "id": 348, "frequency": "r", "synset": "curler.n.01"}, {"name": "curling_iron", "instance_count": 2, "def": "a cylindrical home appliance that heats hair that has been curled around it", "synonyms": ["curling_iron"], "image_count": 2, "id": 349, "frequency": "r", "synset": "curling_iron.n.01"}, {"name": "curtain", "instance_count": 4506, "def": "hanging cloth used as a blind (especially for a window)", "synonyms": ["curtain", "drapery"], "image_count": 1890, "id": 350, "frequency": "f", "synset": "curtain.n.01"}, {"name": "cushion", "instance_count": 7174, "def": "a soft bag filled with air or padding such as feathers or foam rubber", "synonyms": ["cushion"], "image_count": 1240, "id": 351, "frequency": "f", "synset": "cushion.n.03"}, {"name": "cylinder", "instance_count": 3, "def": "a cylindrical container", "synonyms": ["cylinder"], "image_count": 1, "id": 352, "frequency": "r", "synset": "cylinder.n.04"}, {"name": "cymbal", "instance_count": 24, "def": "a percussion instrument consisting of a concave brass disk", "synonyms": ["cymbal"], "image_count": 9, "id": 353, "frequency": "r", "synset": "cymbal.n.01"}, {"name": "dagger", "instance_count": 1, "def": "a short knife with a pointed blade used for piercing or stabbing", "synonyms": ["dagger"], "image_count": 1, "id": 354, "frequency": "r", "synset": "dagger.n.01"}, {"name": "dalmatian", "instance_count": 3, "def": "a large breed having a smooth white coat with black or brown spots", "synonyms": ["dalmatian"], "image_count": 3, "id": 355, "frequency": "r", "synset": "dalmatian.n.02"}, {"name": "dartboard", "instance_count": 11, "def": "a circular board of wood or cork used as the target in the game of darts", "synonyms": ["dartboard"], "image_count": 11, "id": 356, "frequency": "c", "synset": "dartboard.n.01"}, {"name": "date_(fruit)", "instance_count": 103, "def": "sweet edible fruit of the date palm with a single long woody seed", "synonyms": ["date_(fruit)"], "image_count": 4, "id": 357, "frequency": "r", "synset": "date.n.08"}, {"name": "deck_chair", "instance_count": 1787, "def": "a folding chair for use outdoors; a wooden frame supports a length of canvas", "synonyms": ["deck_chair", "beach_chair"], "image_count": 236, "id": 358, "frequency": "f", "synset": "deck_chair.n.01"}, {"name": "deer", "instance_count": 130, "def": "distinguished from Bovidae by the male's having solid deciduous antlers", "synonyms": ["deer", "cervid"], "image_count": 44, "id": 359, "frequency": "c", "synset": "deer.n.01"}, {"name": "dental_floss", "instance_count": 20, "def": "a soft thread for cleaning the spaces between the teeth", "synonyms": ["dental_floss", "floss"], "image_count": 19, "id": 360, "frequency": "c", "synset": "dental_floss.n.01"}, {"name": "desk", "instance_count": 1662, "def": "a piece of furniture with a writing surface and usually drawers or other compartments", "synonyms": ["desk"], "image_count": 1100, "id": 361, "frequency": "f", "synset": "desk.n.01"}, {"name": "detergent", "instance_count": 11, "def": "a surface-active chemical widely used in industry and laundering", "synonyms": ["detergent"], "image_count": 7, "id": 362, "frequency": "r", "synset": "detergent.n.01"}, {"name": "diaper", "instance_count": 89, "def": "garment consisting of a folded cloth drawn up between the legs and fastened at the waist", "synonyms": ["diaper"], "image_count": 69, "id": 363, "frequency": "c", "synset": "diaper.n.01"}, {"name": "diary", "instance_count": 2, "def": "yearly planner book", "synonyms": ["diary", "journal"], "image_count": 2, "id": 364, "frequency": "r", "synset": "diary.n.01"}, {"name": "die", "instance_count": 25, "def": "a small cube with 1 to 6 spots on the six faces; used in gambling", "synonyms": ["die", "dice"], "image_count": 8, "id": 365, "frequency": "r", "synset": "die.n.01"}, {"name": "dinghy", "instance_count": 15, "def": "a small boat of shallow draft with seats and oars with which it is propelled", "synonyms": ["dinghy", "dory", "rowboat"], "image_count": 5, "id": 366, "frequency": "r", "synset": "dinghy.n.01"}, {"name": "dining_table", "instance_count": 312, "def": "a table at which meals are served", "synonyms": ["dining_table"], "image_count": 227, "id": 367, "frequency": "f", "synset": "dining_table.n.01"}, {"name": "tux", "instance_count": 10, "def": "semiformal evening dress for men", "synonyms": ["tux", "tuxedo"], "image_count": 6, "id": 368, "frequency": "r", "synset": "dinner_jacket.n.01"}, {"name": "dish", "instance_count": 532, "def": "a piece of dishware normally used as a container for holding or serving food", "synonyms": ["dish"], "image_count": 106, "id": 369, "frequency": "f", "synset": "dish.n.01"}, {"name": "dish_antenna", "instance_count": 153, "def": "directional antenna consisting of a parabolic reflector", "synonyms": ["dish_antenna"], "image_count": 81, "id": 370, "frequency": "c", "synset": "dish.n.05"}, {"name": "dishrag", "instance_count": 32, "def": "a cloth for washing dishes or cleaning in general", "synonyms": ["dishrag", "dishcloth"], "image_count": 17, "id": 371, "frequency": "c", "synset": "dishrag.n.01"}, {"name": "dishtowel", "instance_count": 223, "def": "a towel for drying dishes", "synonyms": ["dishtowel", "tea_towel"], "image_count": 134, "id": 372, "frequency": "f", "synset": "dishtowel.n.01"}, {"name": "dishwasher", "instance_count": 317, "def": "a machine for washing dishes", "synonyms": ["dishwasher", "dishwashing_machine"], "image_count": 312, "id": 373, "frequency": "f", "synset": "dishwasher.n.01"}, {"name": "dishwasher_detergent", "instance_count": 9, "def": "dishsoap or dish detergent designed for use in dishwashers", "synonyms": ["dishwasher_detergent", "dishwashing_detergent", "dishwashing_liquid", "dishsoap"], "image_count": 8, "id": 374, "frequency": "r", "synset": "dishwasher_detergent.n.01"}, {"name": "dispenser", "instance_count": 610, "def": "a container so designed that the contents can be used in prescribed amounts", "synonyms": ["dispenser"], "image_count": 271, "id": 375, "frequency": "f", "synset": "dispenser.n.01"}, {"name": "diving_board", "instance_count": 2, "def": "a springboard from which swimmers can dive", "synonyms": ["diving_board"], "image_count": 2, "id": 376, "frequency": "r", "synset": "diving_board.n.01"}, {"name": "Dixie_cup", "instance_count": 352, "def": "a disposable cup made of paper; for holding drinks", "synonyms": ["Dixie_cup", "paper_cup"], "image_count": 103, "id": 377, "frequency": "f", "synset": "dixie_cup.n.01"}, {"name": "dog", "instance_count": 2684, "def": "a common domesticated dog", "synonyms": ["dog"], "image_count": 1938, "id": 378, "frequency": "f", "synset": "dog.n.01"}, {"name": "dog_collar", "instance_count": 733, "def": "a collar for a dog", "synonyms": ["dog_collar"], "image_count": 574, "id": 379, "frequency": "f", "synset": "dog_collar.n.01"}, {"name": "doll", "instance_count": 398, "def": "a toy replica of a HUMAN (NOT AN ANIMAL)", "synonyms": ["doll"], "image_count": 120, "id": 380, "frequency": "f", "synset": "doll.n.01"}, {"name": "dollar", "instance_count": 2, "def": "a piece of paper money worth one dollar", "synonyms": ["dollar", "dollar_bill", "one_dollar_bill"], "image_count": 2, "id": 381, "frequency": "r", "synset": "dollar.n.02"}, {"name": "dollhouse", "instance_count": 2, "def": "a house so small that it is likened to a child's plaything", "synonyms": ["dollhouse", "doll's_house"], "image_count": 2, "id": 382, "frequency": "r", "synset": "dollhouse.n.01"}, {"name": "dolphin", "instance_count": 38, "def": "any of various small toothed whales with a beaklike snout; larger than porpoises", "synonyms": ["dolphin"], "image_count": 13, "id": 383, "frequency": "c", "synset": "dolphin.n.02"}, {"name": "domestic_ass", "instance_count": 49, "def": "domestic beast of burden descended from the African wild ass; patient but stubborn", "synonyms": ["domestic_ass", "donkey"], "image_count": 29, "id": 384, "frequency": "c", "synset": "domestic_ass.n.01"}, {"name": "doorknob", "instance_count": 4072, "def": "a knob used to open a door (often called `doorhandle' in Great Britain)", "synonyms": ["doorknob", "doorhandle"], "image_count": 1710, "id": 385, "frequency": "f", "synset": "doorknob.n.01"}, {"name": "doormat", "instance_count": 78, "def": "a mat placed outside an exterior door for wiping the shoes before entering", "synonyms": ["doormat", "welcome_mat"], "image_count": 66, "id": 386, "frequency": "c", "synset": "doormat.n.02"}, {"name": "doughnut", "instance_count": 11911, "def": "a small ring-shaped friedcake", "synonyms": ["doughnut", "donut"], "image_count": 1008, "id": 387, "frequency": "f", "synset": "doughnut.n.02"}, {"name": "dove", "instance_count": 2, "def": "any of numerous small pigeons", "synonyms": ["dove"], "image_count": 1, "id": 388, "frequency": "r", "synset": "dove.n.01"}, {"name": "dragonfly", "instance_count": 8, "def": "slender-bodied non-stinging insect having iridescent wings that are outspread at rest", "synonyms": ["dragonfly"], "image_count": 3, "id": 389, "frequency": "r", "synset": "dragonfly.n.01"}, {"name": "drawer", "instance_count": 7927, "def": "a boxlike container in a piece of furniture; made so as to slide in and out", "synonyms": ["drawer"], "image_count": 1942, "id": 390, "frequency": "f", "synset": "drawer.n.01"}, {"name": "underdrawers", "instance_count": 23, "def": "underpants worn by men", "synonyms": ["underdrawers", "boxers", "boxershorts"], "image_count": 19, "id": 391, "frequency": "c", "synset": "drawers.n.01"}, {"name": "dress", "instance_count": 2842, "def": "a one-piece garment for a woman; has skirt and bodice", "synonyms": ["dress", "frock"], "image_count": 1488, "id": 392, "frequency": "f", "synset": "dress.n.01"}, {"name": "dress_hat", "instance_count": 76, "def": "a man's hat with a tall crown; usually covered with silk or with beaver fur", "synonyms": ["dress_hat", "high_hat", "opera_hat", "silk_hat", "top_hat"], "image_count": 46, "id": 393, "frequency": "c", "synset": "dress_hat.n.01"}, {"name": "dress_suit", "instance_count": 306, "def": "formalwear consisting of full evening dress for men", "synonyms": ["dress_suit"], "image_count": 106, "id": 394, "frequency": "f", "synset": "dress_suit.n.01"}, {"name": "dresser", "instance_count": 152, "def": "a cabinet with shelves", "synonyms": ["dresser"], "image_count": 115, "id": 395, "frequency": "f", "synset": "dresser.n.05"}, {"name": "drill", "instance_count": 24, "def": "a tool with a sharp rotating point for making holes in hard materials", "synonyms": ["drill"], "image_count": 19, "id": 396, "frequency": "c", "synset": "drill.n.01"}, {"name": "drone", "instance_count": 2, "def": "an aircraft without a pilot that is operated by remote control", "synonyms": ["drone"], "image_count": 2, "id": 397, "frequency": "r", "synset": "drone.n.04"}, {"name": "dropper", "instance_count": 1, "def": "pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time", "synonyms": ["dropper", "eye_dropper"], "image_count": 1, "id": 398, "frequency": "r", "synset": "dropper.n.01"}, {"name": "drum_(musical_instrument)", "instance_count": 59, "def": "a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end", "synonyms": ["drum_(musical_instrument)"], "image_count": 28, "id": 399, "frequency": "c", "synset": "drum.n.01"}, {"name": "drumstick", "instance_count": 25, "def": "a stick used for playing a drum", "synonyms": ["drumstick"], "image_count": 9, "id": 400, "frequency": "r", "synset": "drumstick.n.02"}, {"name": "duck", "instance_count": 1090, "def": "small web-footed broad-billed swimming bird", "synonyms": ["duck"], "image_count": 192, "id": 401, "frequency": "f", "synset": "duck.n.01"}, {"name": "duckling", "instance_count": 36, "def": "young duck", "synonyms": ["duckling"], "image_count": 12, "id": 402, "frequency": "c", "synset": "duckling.n.02"}, {"name": "duct_tape", "instance_count": 77, "def": "a wide silvery adhesive tape", "synonyms": ["duct_tape"], "image_count": 21, "id": 403, "frequency": "c", "synset": "duct_tape.n.01"}, {"name": "duffel_bag", "instance_count": 666, "def": "a large cylindrical bag of heavy cloth (does not include suitcases)", "synonyms": ["duffel_bag", "duffle_bag", "duffel", "duffle"], "image_count": 247, "id": 404, "frequency": "f", "synset": "duffel_bag.n.01"}, {"name": "dumbbell", "instance_count": 13, "def": "an exercising weight with two ball-like ends connected by a short handle", "synonyms": ["dumbbell"], "image_count": 6, "id": 405, "frequency": "r", "synset": "dumbbell.n.01"}, {"name": "dumpster", "instance_count": 95, "def": "a container designed to receive and transport and dump waste", "synonyms": ["dumpster"], "image_count": 64, "id": 406, "frequency": "c", "synset": "dumpster.n.01"}, {"name": "dustpan", "instance_count": 7, "def": "a short-handled receptacle into which dust can be swept", "synonyms": ["dustpan"], "image_count": 7, "id": 407, "frequency": "r", "synset": "dustpan.n.02"}, {"name": "eagle", "instance_count": 48, "def": "large birds of prey noted for their broad wings and strong soaring flight", "synonyms": ["eagle"], "image_count": 40, "id": 408, "frequency": "c", "synset": "eagle.n.01"}, {"name": "earphone", "instance_count": 767, "def": "device for listening to audio that is held over or inserted into the ear", "synonyms": ["earphone", "earpiece", "headphone"], "image_count": 542, "id": 409, "frequency": "f", "synset": "earphone.n.01"}, {"name": "earplug", "instance_count": 39, "def": "a soft plug that is inserted into the ear canal to block sound", "synonyms": ["earplug"], "image_count": 2, "id": 410, "frequency": "r", "synset": "earplug.n.01"}, {"name": "earring", "instance_count": 3070, "def": "jewelry to ornament the ear", "synonyms": ["earring"], "image_count": 1898, "id": 411, "frequency": "f", "synset": "earring.n.01"}, {"name": "easel", "instance_count": 43, "def": "an upright tripod for displaying something (usually an artist's canvas)", "synonyms": ["easel"], "image_count": 36, "id": 412, "frequency": "c", "synset": "easel.n.01"}, {"name": "eclair", "instance_count": 39, "def": "oblong cream puff", "synonyms": ["eclair"], "image_count": 4, "id": 413, "frequency": "r", "synset": "eclair.n.01"}, {"name": "eel", "instance_count": 1, "def": "an elongate fish with fatty flesh", "synonyms": ["eel"], "image_count": 1, "id": 414, "frequency": "r", "synset": "eel.n.01"}, {"name": "egg", "instance_count": 813, "def": "oval reproductive body of a fowl (especially a hen) used as food", "synonyms": ["egg", "eggs"], "image_count": 191, "id": 415, "frequency": "f", "synset": "egg.n.02"}, {"name": "egg_roll", "instance_count": 15, "def": "minced vegetables and meat wrapped in a pancake and fried", "synonyms": ["egg_roll", "spring_roll"], "image_count": 6, "id": 416, "frequency": "r", "synset": "egg_roll.n.01"}, {"name": "egg_yolk", "instance_count": 90, "def": "the yellow spherical part of an egg", "synonyms": ["egg_yolk", "yolk_(egg)"], "image_count": 41, "id": 417, "frequency": "c", "synset": "egg_yolk.n.01"}, {"name": "eggbeater", "instance_count": 52, "def": "a mixer for beating eggs or whipping cream", "synonyms": ["eggbeater", "eggwhisk"], "image_count": 39, "id": 418, "frequency": "c", "synset": "eggbeater.n.02"}, {"name": "eggplant", "instance_count": 337, "def": "egg-shaped vegetable having a shiny skin typically dark purple", "synonyms": ["eggplant", "aubergine"], "image_count": 46, "id": 419, "frequency": "c", "synset": "eggplant.n.01"}, {"name": "electric_chair", "instance_count": 1, "def": "a chair-shaped instrument of execution by electrocution", "synonyms": ["electric_chair"], "image_count": 1, "id": 420, "frequency": "r", "synset": "electric_chair.n.01"}, {"name": "refrigerator", "instance_count": 1702, "def": "a refrigerator in which the coolant is pumped around by an electric motor", "synonyms": ["refrigerator"], "image_count": 1451, "id": 421, "frequency": "f", "synset": "electric_refrigerator.n.01"}, {"name": "elephant", "instance_count": 5325, "def": "a common elephant", "synonyms": ["elephant"], "image_count": 1878, "id": 422, "frequency": "f", "synset": "elephant.n.01"}, {"name": "elk", "instance_count": 29, "def": "large northern deer with enormous flattened antlers in the male", "synonyms": ["elk", "moose"], "image_count": 11, "id": 423, "frequency": "c", "synset": "elk.n.01"}, {"name": "envelope", "instance_count": 210, "def": "a flat (usually rectangular) container for a letter, thin package, etc.", "synonyms": ["envelope"], "image_count": 82, "id": 424, "frequency": "c", "synset": "envelope.n.01"}, {"name": "eraser", "instance_count": 41, "def": "an implement used to erase something", "synonyms": ["eraser"], "image_count": 18, "id": 425, "frequency": "c", "synset": "eraser.n.01"}, {"name": "escargot", "instance_count": 5, "def": "edible snail usually served in the shell with a sauce of melted butter and garlic", "synonyms": ["escargot"], "image_count": 1, "id": 426, "frequency": "r", "synset": "escargot.n.01"}, {"name": "eyepatch", "instance_count": 9, "def": "a protective cloth covering for an injured eye", "synonyms": ["eyepatch"], "image_count": 7, "id": 427, "frequency": "r", "synset": "eyepatch.n.01"}, {"name": "falcon", "instance_count": 3, "def": "birds of prey having long pointed powerful wings adapted for swift flight", "synonyms": ["falcon"], "image_count": 3, "id": 428, "frequency": "r", "synset": "falcon.n.01"}, {"name": "fan", "instance_count": 737, "def": "a device for creating a current of air by movement of a surface or surfaces", "synonyms": ["fan"], "image_count": 575, "id": 429, "frequency": "f", "synset": "fan.n.01"}, {"name": "faucet", "instance_count": 3185, "def": "a regulator for controlling the flow of a liquid from a reservoir", "synonyms": ["faucet", "spigot", "tap"], "image_count": 1907, "id": 430, "frequency": "f", "synset": "faucet.n.01"}, {"name": "fedora", "instance_count": 14, "def": "a hat made of felt with a creased crown", "synonyms": ["fedora"], "image_count": 8, "id": 431, "frequency": "r", "synset": "fedora.n.01"}, {"name": "ferret", "instance_count": 5, "def": "domesticated albino variety of the European polecat bred for hunting rats and rabbits", "synonyms": ["ferret"], "image_count": 4, "id": 432, "frequency": "r", "synset": "ferret.n.02"}, {"name": "Ferris_wheel", "instance_count": 32, "def": "a large wheel with suspended seats that remain upright as the wheel rotates", "synonyms": ["Ferris_wheel"], "image_count": 32, "id": 433, "frequency": "c", "synset": "ferris_wheel.n.01"}, {"name": "ferry", "instance_count": 17, "def": "a boat that transports people or vehicles across a body of water and operates on a regular schedule", "synonyms": ["ferry", "ferryboat"], "image_count": 11, "id": 434, "frequency": "c", "synset": "ferry.n.01"}, {"name": "fig_(fruit)", "instance_count": 147, "def": "fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried", "synonyms": ["fig_(fruit)"], "image_count": 4, "id": 435, "frequency": "r", "synset": "fig.n.04"}, {"name": "fighter_jet", "instance_count": 115, "def": "a high-speed military or naval airplane designed to destroy enemy targets", "synonyms": ["fighter_jet", "fighter_aircraft", "attack_aircraft"], "image_count": 54, "id": 436, "frequency": "c", "synset": "fighter.n.02"}, {"name": "figurine", "instance_count": 1056, "def": "a small carved or molded figure", "synonyms": ["figurine"], "image_count": 202, "id": 437, "frequency": "f", "synset": "figurine.n.01"}, {"name": "file_cabinet", "instance_count": 53, "def": "office furniture consisting of a container for keeping papers in order", "synonyms": ["file_cabinet", "filing_cabinet"], "image_count": 32, "id": 438, "frequency": "c", "synset": "file.n.03"}, {"name": "file_(tool)", "instance_count": 3, "def": "a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal", "synonyms": ["file_(tool)"], "image_count": 3, "id": 439, "frequency": "r", "synset": "file.n.04"}, {"name": "fire_alarm", "instance_count": 151, "def": "an alarm that is tripped off by fire or smoke", "synonyms": ["fire_alarm", "smoke_alarm"], "image_count": 130, "id": 440, "frequency": "f", "synset": "fire_alarm.n.02"}, {"name": "fire_engine", "instance_count": 179, "def": "large trucks that carry firefighters and equipment to the site of a fire", "synonyms": ["fire_engine", "fire_truck"], "image_count": 119, "id": 441, "frequency": "f", "synset": "fire_engine.n.01"}, {"name": "fire_extinguisher", "instance_count": 165, "def": "a manually operated device for extinguishing small fires", "synonyms": ["fire_extinguisher", "extinguisher"], "image_count": 141, "id": 442, "frequency": "f", "synset": "fire_extinguisher.n.01"}, {"name": "fire_hose", "instance_count": 67, "def": "a large hose that carries water from a fire hydrant to the site of the fire", "synonyms": ["fire_hose"], "image_count": 29, "id": 443, "frequency": "c", "synset": "fire_hose.n.01"}, {"name": "fireplace", "instance_count": 530, "def": "an open recess in a wall at the base of a chimney where a fire can be built", "synonyms": ["fireplace"], "image_count": 525, "id": 444, "frequency": "f", "synset": "fireplace.n.01"}, {"name": "fireplug", "instance_count": 1458, "def": "an upright hydrant for drawing water to use in fighting a fire", "synonyms": ["fireplug", "fire_hydrant", "hydrant"], "image_count": 1323, "id": 445, "frequency": "f", "synset": "fireplug.n.01"}, {"name": "first-aid_kit", "instance_count": 2, "def": "kit consisting of a set of bandages and medicines for giving first aid", "synonyms": ["first-aid_kit"], "image_count": 2, "id": 446, "frequency": "r", "synset": "first-aid_kit.n.01"}, {"name": "fish", "instance_count": 525, "def": "any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills", "synonyms": ["fish"], "image_count": 113, "id": 447, "frequency": "f", "synset": "fish.n.01"}, {"name": "fish_(food)", "instance_count": 96, "def": "the flesh of fish used as food", "synonyms": ["fish_(food)"], "image_count": 16, "id": 448, "frequency": "c", "synset": "fish.n.02"}, {"name": "fishbowl", "instance_count": 33, "def": "a transparent bowl in which small fish are kept", "synonyms": ["fishbowl", "goldfish_bowl"], "image_count": 7, "id": 449, "frequency": "r", "synset": "fishbowl.n.02"}, {"name": "fishing_rod", "instance_count": 84, "def": "a rod that is used in fishing to extend the fishing line", "synonyms": ["fishing_rod", "fishing_pole"], "image_count": 35, "id": 450, "frequency": "c", "synset": "fishing_rod.n.01"}, {"name": "flag", "instance_count": 7007, "def": "emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)", "synonyms": ["flag"], "image_count": 1908, "id": 451, "frequency": "f", "synset": "flag.n.01"}, {"name": "flagpole", "instance_count": 1082, "def": "a tall staff or pole on which a flag is raised", "synonyms": ["flagpole", "flagstaff"], "image_count": 353, "id": 452, "frequency": "f", "synset": "flagpole.n.02"}, {"name": "flamingo", "instance_count": 309, "def": "large pink web-footed bird with down-bent bill", "synonyms": ["flamingo"], "image_count": 18, "id": 453, "frequency": "c", "synset": "flamingo.n.01"}, {"name": "flannel", "instance_count": 18, "def": "a soft light woolen fabric; used for clothing", "synonyms": ["flannel"], "image_count": 14, "id": 454, "frequency": "c", "synset": "flannel.n.01"}, {"name": "flap", "instance_count": 218, "def": "any broad thin covering attached at one edge, such as a mud flap next to a wheel or a flap on an airplane wing", "synonyms": ["flap"], "image_count": 77, "id": 455, "frequency": "c", "synset": "flap.n.01"}, {"name": "flash", "instance_count": 10, "def": "a lamp for providing momentary light to take a photograph", "synonyms": ["flash", "flashbulb"], "image_count": 8, "id": 456, "frequency": "r", "synset": "flash.n.10"}, {"name": "flashlight", "instance_count": 48, "def": "a small portable battery-powered electric lamp", "synonyms": ["flashlight", "torch"], "image_count": 37, "id": 457, "frequency": "c", "synset": "flashlight.n.01"}, {"name": "fleece", "instance_count": 2, "def": "a soft bulky fabric with deep pile; used chiefly for clothing", "synonyms": ["fleece"], "image_count": 1, "id": 458, "frequency": "r", "synset": "fleece.n.03"}, {"name": "flip-flop_(sandal)", "instance_count": 1103, "def": "a backless sandal held to the foot by a thong between two toes", "synonyms": ["flip-flop_(sandal)"], "image_count": 346, "id": 459, "frequency": "f", "synset": "flip-flop.n.02"}, {"name": "flipper_(footwear)", "instance_count": 49, "def": "a shoe to aid a person in swimming", "synonyms": ["flipper_(footwear)", "fin_(footwear)"], "image_count": 19, "id": 460, "frequency": "c", "synset": "flipper.n.01"}, {"name": "flower_arrangement", "instance_count": 3960, "def": "a decorative arrangement of flowers", "synonyms": ["flower_arrangement", "floral_arrangement"], "image_count": 1779, "id": 461, "frequency": "f", "synset": "flower_arrangement.n.01"}, {"name": "flute_glass", "instance_count": 86, "def": "a tall narrow wineglass", "synonyms": ["flute_glass", "champagne_flute"], "image_count": 23, "id": 462, "frequency": "c", "synset": "flute.n.02"}, {"name": "foal", "instance_count": 30, "def": "a young horse", "synonyms": ["foal"], "image_count": 25, "id": 463, "frequency": "c", "synset": "foal.n.01"}, {"name": "folding_chair", "instance_count": 303, "def": "a chair that can be folded flat for storage", "synonyms": ["folding_chair"], "image_count": 67, "id": 464, "frequency": "c", "synset": "folding_chair.n.01"}, {"name": "food_processor", "instance_count": 22, "def": "a kitchen appliance for shredding, blending, chopping, or slicing food", "synonyms": ["food_processor"], "image_count": 19, "id": 465, "frequency": "c", "synset": "food_processor.n.01"}, {"name": "football_(American)", "instance_count": 35, "def": "the inflated oblong ball used in playing American football", "synonyms": ["football_(American)"], "image_count": 28, "id": 466, "frequency": "c", "synset": "football.n.02"}, {"name": "football_helmet", "instance_count": 7, "def": "a padded helmet with a face mask to protect the head of football players", "synonyms": ["football_helmet"], "image_count": 4, "id": 467, "frequency": "r", "synset": "football_helmet.n.01"}, {"name": "footstool", "instance_count": 41, "def": "a low seat or a stool to rest the feet of a seated person", "synonyms": ["footstool", "footrest"], "image_count": 27, "id": 468, "frequency": "c", "synset": "footstool.n.01"}, {"name": "fork", "instance_count": 3137, "def": "cutlery used for serving and eating food", "synonyms": ["fork"], "image_count": 1861, "id": 469, "frequency": "f", "synset": "fork.n.01"}, {"name": "forklift", "instance_count": 14, "def": "an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them", "synonyms": ["forklift"], "image_count": 11, "id": 470, "frequency": "c", "synset": "forklift.n.01"}, {"name": "freight_car", "instance_count": 121, "def": "a railway car that carries freight", "synonyms": ["freight_car"], "image_count": 13, "id": 471, "frequency": "c", "synset": "freight_car.n.01"}, {"name": "French_toast", "instance_count": 41, "def": "bread slice dipped in egg and milk and fried", "synonyms": ["French_toast"], "image_count": 13, "id": 472, "frequency": "c", "synset": "french_toast.n.01"}, {"name": "freshener", "instance_count": 39, "def": "anything that freshens air by removing or covering odor", "synonyms": ["freshener", "air_freshener"], "image_count": 32, "id": 473, "frequency": "c", "synset": "freshener.n.01"}, {"name": "frisbee", "instance_count": 2332, "def": "a light, plastic disk propelled with a flip of the wrist for recreation or competition", "synonyms": ["frisbee"], "image_count": 1767, "id": 474, "frequency": "f", "synset": "frisbee.n.01"}, {"name": "frog", "instance_count": 84, "def": "a tailless stout-bodied amphibians with long hind limbs for leaping", "synonyms": ["frog", "toad", "toad_frog"], "image_count": 42, "id": 475, "frequency": "c", "synset": "frog.n.01"}, {"name": "fruit_juice", "instance_count": 37, "def": "drink produced by squeezing or crushing fruit", "synonyms": ["fruit_juice"], "image_count": 17, "id": 476, "frequency": "c", "synset": "fruit_juice.n.01"}, {"name": "frying_pan", "instance_count": 310, "def": "a pan used for frying foods", "synonyms": ["frying_pan", "frypan", "skillet"], "image_count": 128, "id": 477, "frequency": "f", "synset": "frying_pan.n.01"}, {"name": "fudge", "instance_count": 4, "def": "soft creamy candy", "synonyms": ["fudge"], "image_count": 1, "id": 478, "frequency": "r", "synset": "fudge.n.01"}, {"name": "funnel", "instance_count": 9, "def": "a cone-shaped utensil used to channel a substance into a container with a small mouth", "synonyms": ["funnel"], "image_count": 9, "id": 479, "frequency": "r", "synset": "funnel.n.02"}, {"name": "futon", "instance_count": 11, "def": "a pad that is used for sleeping on the floor or on a raised frame", "synonyms": ["futon"], "image_count": 10, "id": 480, "frequency": "r", "synset": "futon.n.01"}, {"name": "gag", "instance_count": 4, "def": "restraint put into a person's mouth to prevent speaking or shouting", "synonyms": ["gag", "muzzle"], "image_count": 4, "id": 481, "frequency": "r", "synset": "gag.n.02"}, {"name": "garbage", "instance_count": 18, "def": "a receptacle where waste can be discarded", "synonyms": ["garbage"], "image_count": 9, "id": 482, "frequency": "r", "synset": "garbage.n.03"}, {"name": "garbage_truck", "instance_count": 18, "def": "a truck for collecting domestic refuse", "synonyms": ["garbage_truck"], "image_count": 18, "id": 483, "frequency": "c", "synset": "garbage_truck.n.01"}, {"name": "garden_hose", "instance_count": 50, "def": "a hose used for watering a lawn or garden", "synonyms": ["garden_hose"], "image_count": 41, "id": 484, "frequency": "c", "synset": "garden_hose.n.01"}, {"name": "gargle", "instance_count": 38, "def": "a medicated solution used for gargling and rinsing the mouth", "synonyms": ["gargle", "mouthwash"], "image_count": 28, "id": 485, "frequency": "c", "synset": "gargle.n.01"}, {"name": "gargoyle", "instance_count": 8, "def": "an ornament consisting of a grotesquely carved figure of a person or animal", "synonyms": ["gargoyle"], "image_count": 3, "id": 486, "frequency": "r", "synset": "gargoyle.n.02"}, {"name": "garlic", "instance_count": 487, "def": "aromatic bulb used as seasoning", "synonyms": ["garlic", "ail"], "image_count": 65, "id": 487, "frequency": "c", "synset": "garlic.n.02"}, {"name": "gasmask", "instance_count": 12, "def": "a protective face mask with a filter", "synonyms": ["gasmask", "respirator", "gas_helmet"], "image_count": 9, "id": 488, "frequency": "r", "synset": "gasmask.n.01"}, {"name": "gazelle", "instance_count": 82, "def": "small swift graceful antelope of Africa and Asia having lustrous eyes", "synonyms": ["gazelle"], "image_count": 23, "id": 489, "frequency": "c", "synset": "gazelle.n.01"}, {"name": "gelatin", "instance_count": 248, "def": "an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods", "synonyms": ["gelatin", "jelly"], "image_count": 24, "id": 490, "frequency": "c", "synset": "gelatin.n.02"}, {"name": "gemstone", "instance_count": 2, "def": "a crystalline rock that can be cut and polished for jewelry", "synonyms": ["gemstone"], "image_count": 1, "id": 491, "frequency": "r", "synset": "gem.n.02"}, {"name": "generator", "instance_count": 2, "def": "engine that converts mechanical energy into electrical energy by electromagnetic induction", "synonyms": ["generator"], "image_count": 2, "id": 492, "frequency": "r", "synset": "generator.n.02"}, {"name": "giant_panda", "instance_count": 112, "def": "large black-and-white herbivorous mammal of bamboo forests of China and Tibet", "synonyms": ["giant_panda", "panda", "panda_bear"], "image_count": 59, "id": 493, "frequency": "c", "synset": "giant_panda.n.01"}, {"name": "gift_wrap", "instance_count": 247, "def": "attractive wrapping paper suitable for wrapping gifts", "synonyms": ["gift_wrap"], "image_count": 48, "id": 494, "frequency": "c", "synset": "gift_wrap.n.01"}, {"name": "ginger", "instance_count": 93, "def": "the root of the common ginger plant; used fresh as a seasoning", "synonyms": ["ginger", "gingerroot"], "image_count": 17, "id": 495, "frequency": "c", "synset": "ginger.n.03"}, {"name": "giraffe", "instance_count": 3923, "def": "tall animal having a spotted coat and small horns and very long neck and legs", "synonyms": ["giraffe"], "image_count": 1877, "id": 496, "frequency": "f", "synset": "giraffe.n.01"}, {"name": "cincture", "instance_count": 56, "def": "a band of material around the waist that strengthens a skirt or trousers", "synonyms": ["cincture", "sash", "waistband", "waistcloth"], "image_count": 18, "id": 497, "frequency": "c", "synset": "girdle.n.02"}, {"name": "glass_(drink_container)", "instance_count": 6420, "def": "a container for holding liquids while drinking", "synonyms": ["glass_(drink_container)", "drinking_glass"], "image_count": 1920, "id": 498, "frequency": "f", "synset": "glass.n.02"}, {"name": "globe", "instance_count": 59, "def": "a sphere on which a map (especially of the earth) is represented", "synonyms": ["globe"], "image_count": 50, "id": 499, "frequency": "c", "synset": "globe.n.03"}, {"name": "glove", "instance_count": 5951, "def": "handwear covering the hand", "synonyms": ["glove"], "image_count": 1890, "id": 500, "frequency": "f", "synset": "glove.n.02"}, {"name": "goat", "instance_count": 842, "def": "a common goat", "synonyms": ["goat"], "image_count": 99, "id": 501, "frequency": "c", "synset": "goat.n.01"}, {"name": "goggles", "instance_count": 3202, "def": "tight-fitting spectacles worn to protect the eyes", "synonyms": ["goggles"], "image_count": 1530, "id": 502, "frequency": "f", "synset": "goggles.n.01"}, {"name": "goldfish", "instance_count": 11, "def": "small golden or orange-red freshwater fishes used as pond or aquarium pets", "synonyms": ["goldfish"], "image_count": 3, "id": 503, "frequency": "r", "synset": "goldfish.n.01"}, {"name": "golf_club", "instance_count": 14, "def": "golf equipment used by a golfer to hit a golf ball", "synonyms": ["golf_club", "golf-club"], "image_count": 11, "id": 504, "frequency": "c", "synset": "golf_club.n.02"}, {"name": "golfcart", "instance_count": 25, "def": "a small motor vehicle in which golfers can ride between shots", "synonyms": ["golfcart"], "image_count": 19, "id": 505, "frequency": "c", "synset": "golfcart.n.01"}, {"name": "gondola_(boat)", "instance_count": 8, "def": "long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice", "synonyms": ["gondola_(boat)"], "image_count": 3, "id": 506, "frequency": "r", "synset": "gondola.n.02"}, {"name": "goose", "instance_count": 413, "def": "loud, web-footed long-necked aquatic birds usually larger than ducks", "synonyms": ["goose"], "image_count": 63, "id": 507, "frequency": "c", "synset": "goose.n.01"}, {"name": "gorilla", "instance_count": 10, "def": "largest ape", "synonyms": ["gorilla"], "image_count": 5, "id": 508, "frequency": "r", "synset": "gorilla.n.01"}, {"name": "gourd", "instance_count": 101, "def": "any of numerous inedible fruits with hard rinds", "synonyms": ["gourd"], "image_count": 6, "id": 509, "frequency": "r", "synset": "gourd.n.02"}, {"name": "grape", "instance_count": 6377, "def": "any of various juicy fruit with green or purple skins; grow in clusters", "synonyms": ["grape"], "image_count": 233, "id": 510, "frequency": "f", "synset": "grape.n.01"}, {"name": "grater", "instance_count": 64, "def": "utensil with sharp perforations for shredding foods (as vegetables or cheese)", "synonyms": ["grater"], "image_count": 54, "id": 511, "frequency": "c", "synset": "grater.n.01"}, {"name": "gravestone", "instance_count": 778, "def": "a stone that is used to mark a grave", "synonyms": ["gravestone", "headstone", "tombstone"], "image_count": 36, "id": 512, "frequency": "c", "synset": "gravestone.n.01"}, {"name": "gravy_boat", "instance_count": 10, "def": "a dish (often boat-shaped) for serving gravy or sauce", "synonyms": ["gravy_boat", "gravy_holder"], "image_count": 10, "id": 513, "frequency": "r", "synset": "gravy_boat.n.01"}, {"name": "green_bean", "instance_count": 2571, "def": "a common bean plant cultivated for its slender green edible pods", "synonyms": ["green_bean"], "image_count": 124, "id": 514, "frequency": "f", "synset": "green_bean.n.02"}, {"name": "green_onion", "instance_count": 1618, "def": "a young onion before the bulb has enlarged", "synonyms": ["green_onion", "spring_onion", "scallion"], "image_count": 101, "id": 515, "frequency": "f", "synset": "green_onion.n.01"}, {"name": "griddle", "instance_count": 4, "def": "cooking utensil consisting of a flat heated surface on which food is cooked", "synonyms": ["griddle"], "image_count": 3, "id": 516, "frequency": "r", "synset": "griddle.n.01"}, {"name": "grill", "instance_count": 747, "def": "a framework of metal bars used as a partition or a grate", "synonyms": ["grill", "grille", "grillwork", "radiator_grille"], "image_count": 363, "id": 517, "frequency": "f", "synset": "grill.n.02"}, {"name": "grits", "instance_count": 3, "def": "coarsely ground corn boiled as a breakfast dish", "synonyms": ["grits", "hominy_grits"], "image_count": 3, "id": 518, "frequency": "r", "synset": "grits.n.01"}, {"name": "grizzly", "instance_count": 44, "def": "powerful brownish-yellow bear of the uplands of western North America", "synonyms": ["grizzly", "grizzly_bear"], "image_count": 30, "id": 519, "frequency": "c", "synset": "grizzly.n.01"}, {"name": "grocery_bag", "instance_count": 46, "def": "a sack for holding customer's groceries", "synonyms": ["grocery_bag"], "image_count": 18, "id": 520, "frequency": "c", "synset": "grocery_bag.n.01"}, {"name": "guitar", "instance_count": 315, "def": "a stringed instrument usually having six strings; played by strumming or plucking", "synonyms": ["guitar"], "image_count": 199, "id": 521, "frequency": "f", "synset": "guitar.n.01"}, {"name": "gull", "instance_count": 1398, "def": "mostly white aquatic bird having long pointed wings and short legs", "synonyms": ["gull", "seagull"], "image_count": 97, "id": 522, "frequency": "c", "synset": "gull.n.02"}, {"name": "gun", "instance_count": 68, "def": "a weapon that discharges a bullet at high velocity from a metal tube", "synonyms": ["gun"], "image_count": 32, "id": 523, "frequency": "c", "synset": "gun.n.01"}, {"name": "hairbrush", "instance_count": 165, "def": "a brush used to groom a person's hair", "synonyms": ["hairbrush"], "image_count": 121, "id": 524, "frequency": "f", "synset": "hairbrush.n.01"}, {"name": "hairnet", "instance_count": 53, "def": "a small net that someone wears over their hair to keep it in place", "synonyms": ["hairnet"], "image_count": 16, "id": 525, "frequency": "c", "synset": "hairnet.n.01"}, {"name": "hairpin", "instance_count": 20, "def": "a double pronged pin used to hold women's hair in place", "synonyms": ["hairpin"], "image_count": 12, "id": 526, "frequency": "c", "synset": "hairpin.n.01"}, {"name": "halter_top", "instance_count": 3, "def": "a woman's top that fastens behind the back and neck leaving the back and arms uncovered", "synonyms": ["halter_top"], "image_count": 2, "id": 527, "frequency": "r", "synset": "halter.n.03"}, {"name": "ham", "instance_count": 1765, "def": "meat cut from the thigh of a hog (usually smoked)", "synonyms": ["ham", "jambon", "gammon"], "image_count": 214, "id": 528, "frequency": "f", "synset": "ham.n.01"}, {"name": "hamburger", "instance_count": 126, "def": "a sandwich consisting of a patty of minced beef served on a bun", "synonyms": ["hamburger", "beefburger", "burger"], "image_count": 48, "id": 529, "frequency": "c", "synset": "hamburger.n.01"}, {"name": "hammer", "instance_count": 41, "def": "a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking", "synonyms": ["hammer"], "image_count": 26, "id": 530, "frequency": "c", "synset": "hammer.n.02"}, {"name": "hammock", "instance_count": 15, "def": "a hanging bed of canvas or rope netting (usually suspended between two trees)", "synonyms": ["hammock"], "image_count": 13, "id": 531, "frequency": "c", "synset": "hammock.n.02"}, {"name": "hamper", "instance_count": 5, "def": "a basket usually with a cover", "synonyms": ["hamper"], "image_count": 4, "id": 532, "frequency": "r", "synset": "hamper.n.02"}, {"name": "hamster", "instance_count": 12, "def": "short-tailed burrowing rodent with large cheek pouches", "synonyms": ["hamster"], "image_count": 11, "id": 533, "frequency": "c", "synset": "hamster.n.01"}, {"name": "hair_dryer", "instance_count": 144, "def": "a hand-held electric blower that can blow warm air onto the hair", "synonyms": ["hair_dryer"], "image_count": 123, "id": 534, "frequency": "f", "synset": "hand_blower.n.01"}, {"name": "hand_glass", "instance_count": 7, "def": "a mirror intended to be held in the hand", "synonyms": ["hand_glass", "hand_mirror"], "image_count": 7, "id": 535, "frequency": "r", "synset": "hand_glass.n.01"}, {"name": "hand_towel", "instance_count": 619, "def": "a small towel used to dry the hands or face", "synonyms": ["hand_towel", "face_towel"], "image_count": 200, "id": 536, "frequency": "f", "synset": "hand_towel.n.01"}, {"name": "handcart", "instance_count": 204, "def": "wheeled vehicle that can be pushed by a person", "synonyms": ["handcart", "pushcart", "hand_truck"], "image_count": 91, "id": 537, "frequency": "c", "synset": "handcart.n.01"}, {"name": "handcuff", "instance_count": 10, "def": "shackle that consists of a metal loop that can be locked around the wrist", "synonyms": ["handcuff"], "image_count": 9, "id": 538, "frequency": "r", "synset": "handcuff.n.01"}, {"name": "handkerchief", "instance_count": 86, "def": "a square piece of cloth used for wiping the eyes or nose or as a costume accessory", "synonyms": ["handkerchief"], "image_count": 72, "id": 539, "frequency": "c", "synset": "handkerchief.n.01"}, {"name": "handle", "instance_count": 8314, "def": "the appendage to an object that is designed to be held in order to use or move it", "synonyms": ["handle", "grip", "handgrip"], "image_count": 1886, "id": 540, "frequency": "f", "synset": "handle.n.01"}, {"name": "handsaw", "instance_count": 5, "def": "a saw used with one hand for cutting wood", "synonyms": ["handsaw", "carpenter's_saw"], "image_count": 4, "id": 541, "frequency": "r", "synset": "handsaw.n.01"}, {"name": "hardback_book", "instance_count": 2, "def": "a book with cardboard or cloth or leather covers", "synonyms": ["hardback_book", "hardcover_book"], "image_count": 1, "id": 542, "frequency": "r", "synset": "hardback.n.01"}, {"name": "harmonium", "instance_count": 2, "def": "a free-reed instrument in which air is forced through the reeds by bellows", "synonyms": ["harmonium", "organ_(musical_instrument)", "reed_organ_(musical_instrument)"], "image_count": 1, "id": 543, "frequency": "r", "synset": "harmonium.n.01"}, {"name": "hat", "instance_count": 7213, "def": "headwear that protects the head from bad weather, sun, or worn for fashion", "synonyms": ["hat"], "image_count": 1932, "id": 544, "frequency": "f", "synset": "hat.n.01"}, {"name": "hatbox", "instance_count": 7, "def": "a round piece of luggage for carrying hats", "synonyms": ["hatbox"], "image_count": 4, "id": 545, "frequency": "r", "synset": "hatbox.n.01"}, {"name": "veil", "instance_count": 57, "def": "a garment that covers the head OR face", "synonyms": ["veil"], "image_count": 56, "id": 546, "frequency": "c", "synset": "head_covering.n.01"}, {"name": "headband", "instance_count": 1114, "def": "a band worn around or over the head", "synonyms": ["headband"], "image_count": 854, "id": 547, "frequency": "f", "synset": "headband.n.01"}, {"name": "headboard", "instance_count": 850, "def": "a vertical board or panel forming the head of a bedstead", "synonyms": ["headboard"], "image_count": 755, "id": 548, "frequency": "f", "synset": "headboard.n.01"}, {"name": "headlight", "instance_count": 7326, "def": "a powerful light with reflector; attached to the front of an automobile or locomotive", "synonyms": ["headlight", "headlamp"], "image_count": 1843, "id": 549, "frequency": "f", "synset": "headlight.n.01"}, {"name": "headscarf", "instance_count": 235, "def": "a kerchief worn over the head and tied under the chin", "synonyms": ["headscarf"], "image_count": 96, "id": 550, "frequency": "c", "synset": "headscarf.n.01"}, {"name": "headset", "instance_count": 10, "def": "receiver consisting of a pair of headphones", "synonyms": ["headset"], "image_count": 7, "id": 551, "frequency": "r", "synset": "headset.n.01"}, {"name": "headstall_(for_horses)", "instance_count": 133, "def": "the band that is the part of a bridle that fits around a horse's head", "synonyms": ["headstall_(for_horses)", "headpiece_(for_horses)"], "image_count": 74, "id": 552, "frequency": "c", "synset": "headstall.n.01"}, {"name": "heart", "instance_count": 347, "def": "a muscular organ; its contractions move the blood through the body", "synonyms": ["heart"], "image_count": 66, "id": 553, "frequency": "c", "synset": "heart.n.02"}, {"name": "heater", "instance_count": 64, "def": "device that heats water or supplies warmth to a room", "synonyms": ["heater", "warmer"], "image_count": 57, "id": 554, "frequency": "c", "synset": "heater.n.01"}, {"name": "helicopter", "instance_count": 68, "def": "an aircraft without wings that obtains its lift from the rotation of overhead blades", "synonyms": ["helicopter"], "image_count": 44, "id": 555, "frequency": "c", "synset": "helicopter.n.01"}, {"name": "helmet", "instance_count": 4845, "def": "a protective headgear made of hard material to resist blows", "synonyms": ["helmet"], "image_count": 1905, "id": 556, "frequency": "f", "synset": "helmet.n.02"}, {"name": "heron", "instance_count": 6, "def": "grey or white wading bird with long neck and long legs and (usually) long bill", "synonyms": ["heron"], "image_count": 4, "id": 557, "frequency": "r", "synset": "heron.n.02"}, {"name": "highchair", "instance_count": 98, "def": "a chair for feeding a very young child", "synonyms": ["highchair", "feeding_chair"], "image_count": 90, "id": 558, "frequency": "c", "synset": "highchair.n.01"}, {"name": "hinge", "instance_count": 5283, "def": "a joint that holds two parts together so that one can swing relative to the other", "synonyms": ["hinge"], "image_count": 1635, "id": 559, "frequency": "f", "synset": "hinge.n.01"}, {"name": "hippopotamus", "instance_count": 24, "def": "massive thick-skinned animal living in or around rivers of tropical Africa", "synonyms": ["hippopotamus"], "image_count": 8, "id": 560, "frequency": "r", "synset": "hippopotamus.n.01"}, {"name": "hockey_stick", "instance_count": 15, "def": "sports implement consisting of a stick used by hockey players to move the puck", "synonyms": ["hockey_stick"], "image_count": 5, "id": 561, "frequency": "r", "synset": "hockey_stick.n.01"}, {"name": "hog", "instance_count": 73, "def": "domestic swine", "synonyms": ["hog", "pig"], "image_count": 50, "id": 562, "frequency": "c", "synset": "hog.n.03"}, {"name": "home_plate_(baseball)", "instance_count": 551, "def": "(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score", "synonyms": ["home_plate_(baseball)", "home_base_(baseball)"], "image_count": 545, "id": 563, "frequency": "f", "synset": "home_plate.n.01"}, {"name": "honey", "instance_count": 90, "def": "a sweet yellow liquid produced by bees", "synonyms": ["honey"], "image_count": 20, "id": 564, "frequency": "c", "synset": "honey.n.01"}, {"name": "fume_hood", "instance_count": 208, "def": "metal covering leading to a vent that exhausts smoke or fumes", "synonyms": ["fume_hood", "exhaust_hood"], "image_count": 193, "id": 565, "frequency": "f", "synset": "hood.n.06"}, {"name": "hook", "instance_count": 1157, "def": "a curved or bent implement for suspending or pulling something", "synonyms": ["hook"], "image_count": 285, "id": 566, "frequency": "f", "synset": "hook.n.05"}, {"name": "hookah", "instance_count": 3, "def": "a tobacco pipe with a long flexible tube connected to a container where the smoke is cooled by passing through water", "synonyms": ["hookah", "narghile", "nargileh", "sheesha", "shisha", "water_pipe"], "image_count": 3, "id": 567, "frequency": "r", "synset": "hookah.n.01"}, {"name": "hornet", "instance_count": 1, "def": "large stinging wasp", "synonyms": ["hornet"], "image_count": 1, "id": 568, "frequency": "r", "synset": "hornet.n.01"}, {"name": "horse", "instance_count": 4744, "def": "a common horse", "synonyms": ["horse"], "image_count": 1904, "id": 569, "frequency": "f", "synset": "horse.n.01"}, {"name": "hose", "instance_count": 610, "def": "a flexible pipe for conveying a liquid or gas", "synonyms": ["hose", "hosepipe"], "image_count": 294, "id": 570, "frequency": "f", "synset": "hose.n.03"}, {"name": "hot-air_balloon", "instance_count": 4, "def": "balloon for travel through the air in a basket suspended below a large bag of heated air", "synonyms": ["hot-air_balloon"], "image_count": 3, "id": 571, "frequency": "r", "synset": "hot-air_balloon.n.01"}, {"name": "hotplate", "instance_count": 6, "def": "a portable electric appliance for heating or cooking or keeping food warm", "synonyms": ["hotplate"], "image_count": 5, "id": 572, "frequency": "r", "synset": "hot_plate.n.01"}, {"name": "hot_sauce", "instance_count": 70, "def": "a pungent peppery sauce", "synonyms": ["hot_sauce"], "image_count": 24, "id": 573, "frequency": "c", "synset": "hot_sauce.n.01"}, {"name": "hourglass", "instance_count": 2, "def": "a sandglass timer that runs for sixty minutes", "synonyms": ["hourglass"], "image_count": 2, "id": 574, "frequency": "r", "synset": "hourglass.n.01"}, {"name": "houseboat", "instance_count": 4, "def": "a barge that is designed and equipped for use as a dwelling", "synonyms": ["houseboat"], "image_count": 2, "id": 575, "frequency": "r", "synset": "houseboat.n.01"}, {"name": "hummingbird", "instance_count": 18, "def": "tiny American bird having brilliant iridescent plumage and long slender bills", "synonyms": ["hummingbird"], "image_count": 16, "id": 576, "frequency": "c", "synset": "hummingbird.n.01"}, {"name": "hummus", "instance_count": 9, "def": "a thick spread made from mashed chickpeas", "synonyms": ["hummus", "humus", "hommos", "hoummos", "humous"], "image_count": 8, "id": 577, "frequency": "r", "synset": "hummus.n.01"}, {"name": "polar_bear", "instance_count": 196, "def": "white bear of Arctic regions", "synonyms": ["polar_bear"], "image_count": 154, "id": 578, "frequency": "f", "synset": "ice_bear.n.01"}, {"name": "icecream", "instance_count": 180, "def": "frozen dessert containing cream and sugar and flavoring", "synonyms": ["icecream"], "image_count": 66, "id": 579, "frequency": "c", "synset": "ice_cream.n.01"}, {"name": "popsicle", "instance_count": 1, "def": "ice cream or water ice on a small wooden stick", "synonyms": ["popsicle"], "image_count": 1, "id": 580, "frequency": "r", "synset": "ice_lolly.n.01"}, {"name": "ice_maker", "instance_count": 26, "def": "an appliance included in some electric refrigerators for making ice cubes", "synonyms": ["ice_maker"], "image_count": 24, "id": 581, "frequency": "c", "synset": "ice_maker.n.01"}, {"name": "ice_pack", "instance_count": 4, "def": "a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling", "synonyms": ["ice_pack", "ice_bag"], "image_count": 1, "id": 582, "frequency": "r", "synset": "ice_pack.n.01"}, {"name": "ice_skate", "instance_count": 14, "def": "skate consisting of a boot with a steel blade fitted to the sole", "synonyms": ["ice_skate"], "image_count": 4, "id": 583, "frequency": "r", "synset": "ice_skate.n.01"}, {"name": "igniter", "instance_count": 77, "def": "a substance or device used to start a fire", "synonyms": ["igniter", "ignitor", "lighter"], "image_count": 75, "id": 584, "frequency": "c", "synset": "igniter.n.01"}, {"name": "inhaler", "instance_count": 7, "def": "a dispenser that produces a chemical vapor to be inhaled through mouth or nose", "synonyms": ["inhaler", "inhalator"], "image_count": 6, "id": 585, "frequency": "r", "synset": "inhaler.n.01"}, {"name": "iPod", "instance_count": 172, "def": "a pocket-sized device used to play music files", "synonyms": ["iPod"], "image_count": 126, "id": 586, "frequency": "f", "synset": "ipod.n.01"}, {"name": "iron_(for_clothing)", "instance_count": 38, "def": "home appliance consisting of a flat metal base that is heated and used to smooth cloth", "synonyms": ["iron_(for_clothing)", "smoothing_iron_(for_clothing)"], "image_count": 24, "id": 587, "frequency": "c", "synset": "iron.n.04"}, {"name": "ironing_board", "instance_count": 24, "def": "narrow padded board on collapsible supports; used for ironing clothes", "synonyms": ["ironing_board"], "image_count": 22, "id": 588, "frequency": "c", "synset": "ironing_board.n.01"}, {"name": "jacket", "instance_count": 8013, "def": "a waist-length coat", "synonyms": ["jacket"], "image_count": 1872, "id": 589, "frequency": "f", "synset": "jacket.n.01"}, {"name": "jam", "instance_count": 29, "def": "preserve of crushed fruit", "synonyms": ["jam"], "image_count": 16, "id": 590, "frequency": "c", "synset": "jam.n.01"}, {"name": "jar", "instance_count": 2002, "def": "a vessel (usually cylindrical) with a wide mouth and without handles", "synonyms": ["jar"], "image_count": 423, "id": 591, "frequency": "f", "synset": "jar.n.01"}, {"name": "jean", "instance_count": 5421, "def": "(usually plural) close-fitting trousers of heavy denim for manual work or casual wear", "synonyms": ["jean", "blue_jean", "denim"], "image_count": 1927, "id": 592, "frequency": "f", "synset": "jean.n.01"}, {"name": "jeep", "instance_count": 55, "def": "a car suitable for traveling over rough terrain", "synonyms": ["jeep", "landrover"], "image_count": 38, "id": 593, "frequency": "c", "synset": "jeep.n.01"}, {"name": "jelly_bean", "instance_count": 116, "def": "sugar-glazed jellied candy", "synonyms": ["jelly_bean", "jelly_egg"], "image_count": 3, "id": 594, "frequency": "r", "synset": "jelly_bean.n.01"}, {"name": "jersey", "instance_count": 8117, "def": "a close-fitting pullover shirt", "synonyms": ["jersey", "T-shirt", "tee_shirt"], "image_count": 1945, "id": 595, "frequency": "f", "synset": "jersey.n.03"}, {"name": "jet_plane", "instance_count": 87, "def": "an airplane powered by one or more jet engines", "synonyms": ["jet_plane", "jet-propelled_plane"], "image_count": 35, "id": 596, "frequency": "c", "synset": "jet.n.01"}, {"name": "jewel", "instance_count": 1, "def": "a precious or semiprecious stone incorporated into a piece of jewelry", "synonyms": ["jewel", "gem", "precious_stone"], "image_count": 1, "id": 597, "frequency": "r", "synset": "jewel.n.01"}, {"name": "jewelry", "instance_count": 51, "def": "an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)", "synonyms": ["jewelry", "jewellery"], "image_count": 13, "id": 598, "frequency": "c", "synset": "jewelry.n.01"}, {"name": "joystick", "instance_count": 12, "def": "a control device for computers consisting of a vertical handle that can move freely in two directions", "synonyms": ["joystick"], "image_count": 9, "id": 599, "frequency": "r", "synset": "joystick.n.02"}, {"name": "jumpsuit", "instance_count": 21, "def": "one-piece garment fashioned after a parachutist's uniform", "synonyms": ["jumpsuit"], "image_count": 14, "id": 600, "frequency": "c", "synset": "jump_suit.n.01"}, {"name": "kayak", "instance_count": 124, "def": "a small canoe consisting of a light frame made watertight with animal skins", "synonyms": ["kayak"], "image_count": 37, "id": 601, "frequency": "c", "synset": "kayak.n.01"}, {"name": "keg", "instance_count": 6, "def": "small cask or barrel", "synonyms": ["keg"], "image_count": 3, "id": 602, "frequency": "r", "synset": "keg.n.02"}, {"name": "kennel", "instance_count": 4, "def": "outbuilding that serves as a shelter for a dog", "synonyms": ["kennel", "doghouse"], "image_count": 4, "id": 603, "frequency": "r", "synset": "kennel.n.01"}, {"name": "kettle", "instance_count": 130, "def": "a metal pot for stewing or boiling; usually has a lid", "synonyms": ["kettle", "boiler"], "image_count": 100, "id": 604, "frequency": "c", "synset": "kettle.n.01"}, {"name": "key", "instance_count": 447, "def": "metal instrument used to unlock a lock", "synonyms": ["key"], "image_count": 195, "id": 605, "frequency": "f", "synset": "key.n.01"}, {"name": "keycard", "instance_count": 1, "def": "a plastic card used to gain access typically to a door", "synonyms": ["keycard"], "image_count": 1, "id": 606, "frequency": "r", "synset": "keycard.n.01"}, {"name": "kilt", "instance_count": 19, "def": "a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland", "synonyms": ["kilt"], "image_count": 12, "id": 607, "frequency": "c", "synset": "kilt.n.01"}, {"name": "kimono", "instance_count": 38, "def": "a loose robe; imitated from robes originally worn by Japanese", "synonyms": ["kimono"], "image_count": 24, "id": 608, "frequency": "c", "synset": "kimono.n.01"}, {"name": "kitchen_sink", "instance_count": 519, "def": "a sink in a kitchen", "synonyms": ["kitchen_sink"], "image_count": 489, "id": 609, "frequency": "f", "synset": "kitchen_sink.n.01"}, {"name": "kitchen_table", "instance_count": 11, "def": "a table in the kitchen", "synonyms": ["kitchen_table"], "image_count": 10, "id": 610, "frequency": "r", "synset": "kitchen_table.n.01"}, {"name": "kite", "instance_count": 11174, "def": "plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string", "synonyms": ["kite"], "image_count": 1689, "id": 611, "frequency": "f", "synset": "kite.n.03"}, {"name": "kitten", "instance_count": 60, "def": "young domestic cat", "synonyms": ["kitten", "kitty"], "image_count": 42, "id": 612, "frequency": "c", "synset": "kitten.n.01"}, {"name": "kiwi_fruit", "instance_count": 702, "def": "fuzzy brown egg-shaped fruit with slightly tart green flesh", "synonyms": ["kiwi_fruit"], "image_count": 81, "id": 613, "frequency": "c", "synset": "kiwi.n.03"}, {"name": "knee_pad", "instance_count": 1765, "def": "protective garment consisting of a pad worn by football or baseball or hockey players", "synonyms": ["knee_pad"], "image_count": 894, "id": 614, "frequency": "f", "synset": "knee_pad.n.01"}, {"name": "knife", "instance_count": 3515, "def": "tool with a blade and point used as a cutting instrument", "synonyms": ["knife"], "image_count": 1868, "id": 615, "frequency": "f", "synset": "knife.n.01"}, {"name": "knitting_needle", "instance_count": 16, "def": "needle consisting of a slender rod with pointed ends; usually used in pairs", "synonyms": ["knitting_needle"], "image_count": 7, "id": 616, "frequency": "r", "synset": "knitting_needle.n.01"}, {"name": "knob", "instance_count": 8432, "def": "a round handle often found on a door", "synonyms": ["knob"], "image_count": 1567, "id": 617, "frequency": "f", "synset": "knob.n.02"}, {"name": "knocker_(on_a_door)", "instance_count": 10, "def": "a device (usually metal and ornamental) attached by a hinge to a door", "synonyms": ["knocker_(on_a_door)", "doorknocker"], "image_count": 10, "id": 618, "frequency": "r", "synset": "knocker.n.05"}, {"name": "koala", "instance_count": 15, "def": "sluggish tailless Australian marsupial with grey furry ears and coat", "synonyms": ["koala", "koala_bear"], "image_count": 8, "id": 619, "frequency": "r", "synset": "koala.n.01"}, {"name": "lab_coat", "instance_count": 42, "def": "a light coat worn to protect clothing from substances used while working in a laboratory", "synonyms": ["lab_coat", "laboratory_coat"], "image_count": 7, "id": 620, "frequency": "r", "synset": "lab_coat.n.01"}, {"name": "ladder", "instance_count": 975, "def": "steps consisting of two parallel members connected by rungs", "synonyms": ["ladder"], "image_count": 629, "id": 621, "frequency": "f", "synset": "ladder.n.01"}, {"name": "ladle", "instance_count": 226, "def": "a spoon-shaped vessel with a long handle frequently used to transfer liquids", "synonyms": ["ladle"], "image_count": 89, "id": 622, "frequency": "c", "synset": "ladle.n.01"}, {"name": "ladybug", "instance_count": 68, "def": "small round bright-colored and spotted beetle, typically red and black", "synonyms": ["ladybug", "ladybeetle", "ladybird_beetle"], "image_count": 15, "id": 623, "frequency": "c", "synset": "ladybug.n.01"}, {"name": "lamb_(animal)", "instance_count": 618, "def": "young sheep", "synonyms": ["lamb_(animal)"], "image_count": 134, "id": 624, "frequency": "f", "synset": "lamb.n.01"}, {"name": "lamb-chop", "instance_count": 8, "def": "chop cut from a lamb", "synonyms": ["lamb-chop", "lambchop"], "image_count": 4, "id": 625, "frequency": "r", "synset": "lamb_chop.n.01"}, {"name": "lamp", "instance_count": 4139, "def": "a piece of furniture holding one or more electric light bulbs", "synonyms": ["lamp"], "image_count": 1802, "id": 626, "frequency": "f", "synset": "lamp.n.02"}, {"name": "lamppost", "instance_count": 2234, "def": "a metal post supporting an outdoor lamp (such as a streetlight)", "synonyms": ["lamppost"], "image_count": 595, "id": 627, "frequency": "f", "synset": "lamppost.n.01"}, {"name": "lampshade", "instance_count": 2475, "def": "a protective ornamental shade used to screen a light bulb from direct view", "synonyms": ["lampshade"], "image_count": 1210, "id": 628, "frequency": "f", "synset": "lampshade.n.01"}, {"name": "lantern", "instance_count": 364, "def": "light in a transparent protective case", "synonyms": ["lantern"], "image_count": 48, "id": 629, "frequency": "c", "synset": "lantern.n.01"}, {"name": "lanyard", "instance_count": 1065, "def": "a cord worn around the neck to hold a knife or whistle, etc.", "synonyms": ["lanyard", "laniard"], "image_count": 418, "id": 630, "frequency": "f", "synset": "lanyard.n.02"}, {"name": "laptop_computer", "instance_count": 2852, "def": "a portable computer small enough to use in your lap", "synonyms": ["laptop_computer", "notebook_computer"], "image_count": 1846, "id": 631, "frequency": "f", "synset": "laptop.n.01"}, {"name": "lasagna", "instance_count": 7, "def": "baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables", "synonyms": ["lasagna", "lasagne"], "image_count": 5, "id": 632, "frequency": "r", "synset": "lasagna.n.01"}, {"name": "latch", "instance_count": 702, "def": "a bar that can be lowered or slid into a groove to fasten a door or gate", "synonyms": ["latch"], "image_count": 221, "id": 633, "frequency": "f", "synset": "latch.n.02"}, {"name": "lawn_mower", "instance_count": 12, "def": "garden tool for mowing grass on lawns", "synonyms": ["lawn_mower"], "image_count": 10, "id": 634, "frequency": "r", "synset": "lawn_mower.n.01"}, {"name": "leather", "instance_count": 20, "def": "an animal skin made smooth and flexible by removing the hair and then tanning", "synonyms": ["leather"], "image_count": 7, "id": 635, "frequency": "r", "synset": "leather.n.01"}, {"name": "legging_(clothing)", "instance_count": 154, "def": "a garment covering the leg (usually extending from the knee to the ankle)", "synonyms": ["legging_(clothing)", "leging_(clothing)", "leg_covering"], "image_count": 76, "id": 636, "frequency": "c", "synset": "legging.n.01"}, {"name": "Lego", "instance_count": 331, "def": "a child's plastic construction set for making models from blocks", "synonyms": ["Lego", "Lego_set"], "image_count": 22, "id": 637, "frequency": "c", "synset": "lego.n.01"}, {"name": "legume", "instance_count": 333, "def": "the fruit or seed of bean or pea plants", "synonyms": ["legume"], "image_count": 10, "id": 638, "frequency": "r", "synset": "legume.n.02"}, {"name": "lemon", "instance_count": 2168, "def": "yellow oval fruit with juicy acidic flesh", "synonyms": ["lemon"], "image_count": 341, "id": 639, "frequency": "f", "synset": "lemon.n.01"}, {"name": "lemonade", "instance_count": 2, "def": "sweetened beverage of diluted lemon juice", "synonyms": ["lemonade"], "image_count": 1, "id": 640, "frequency": "r", "synset": "lemonade.n.01"}, {"name": "lettuce", "instance_count": 5500, "def": "leafy plant commonly eaten in salad or on sandwiches", "synonyms": ["lettuce"], "image_count": 705, "id": 641, "frequency": "f", "synset": "lettuce.n.02"}, {"name": "license_plate", "instance_count": 4392, "def": "a plate mounted on the front and back of car and bearing the car's registration number", "synonyms": ["license_plate", "numberplate"], "image_count": 1900, "id": 642, "frequency": "f", "synset": "license_plate.n.01"}, {"name": "life_buoy", "instance_count": 524, "def": "a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)", "synonyms": ["life_buoy", "lifesaver", "life_belt", "life_ring"], "image_count": 188, "id": 643, "frequency": "f", "synset": "life_buoy.n.01"}, {"name": "life_jacket", "instance_count": 689, "def": "life preserver consisting of a sleeveless jacket of buoyant or inflatable design", "synonyms": ["life_jacket", "life_vest"], "image_count": 227, "id": 644, "frequency": "f", "synset": "life_jacket.n.01"}, {"name": "lightbulb", "instance_count": 7075, "def": "lightblub/source of light", "synonyms": ["lightbulb"], "image_count": 861, "id": 645, "frequency": "f", "synset": "light_bulb.n.01"}, {"name": "lightning_rod", "instance_count": 6, "def": "a metallic conductor that is attached to a high point and leads to the ground", "synonyms": ["lightning_rod", "lightning_conductor"], "image_count": 6, "id": 646, "frequency": "r", "synset": "lightning_rod.n.02"}, {"name": "lime", "instance_count": 1134, "def": "the green acidic fruit of any of various lime trees", "synonyms": ["lime"], "image_count": 115, "id": 647, "frequency": "f", "synset": "lime.n.06"}, {"name": "limousine", "instance_count": 6, "def": "long luxurious car; usually driven by a chauffeur", "synonyms": ["limousine"], "image_count": 5, "id": 648, "frequency": "r", "synset": "limousine.n.01"}, {"name": "lion", "instance_count": 69, "def": "large gregarious predatory cat of Africa and India", "synonyms": ["lion"], "image_count": 43, "id": 649, "frequency": "c", "synset": "lion.n.01"}, {"name": "lip_balm", "instance_count": 29, "def": "a balm applied to the lips", "synonyms": ["lip_balm"], "image_count": 14, "id": 650, "frequency": "c", "synset": "lip_balm.n.01"}, {"name": "liquor", "instance_count": 66, "def": "liquor or beer", "synonyms": ["liquor", "spirits", "hard_liquor", "liqueur", "cordial"], "image_count": 6, "id": 651, "frequency": "r", "synset": "liquor.n.01"}, {"name": "lizard", "instance_count": 22, "def": "a reptile with usually two pairs of legs and a tapering tail", "synonyms": ["lizard"], "image_count": 15, "id": 652, "frequency": "c", "synset": "lizard.n.01"}, {"name": "log", "instance_count": 7363, "def": "a segment of the trunk of a tree when stripped of branches", "synonyms": ["log"], "image_count": 1167, "id": 653, "frequency": "f", "synset": "log.n.01"}, {"name": "lollipop", "instance_count": 59, "def": "hard candy on a stick", "synonyms": ["lollipop"], "image_count": 15, "id": 654, "frequency": "c", "synset": "lollipop.n.02"}, {"name": "speaker_(stero_equipment)", "instance_count": 2029, "def": "electronic device that produces sound often as part of a stereo system", "synonyms": ["speaker_(stero_equipment)"], "image_count": 994, "id": 655, "frequency": "f", "synset": "loudspeaker.n.01"}, {"name": "loveseat", "instance_count": 41, "def": "small sofa that seats two people", "synonyms": ["loveseat"], "image_count": 28, "id": 656, "frequency": "c", "synset": "love_seat.n.01"}, {"name": "machine_gun", "instance_count": 5, "def": "a rapidly firing automatic gun", "synonyms": ["machine_gun"], "image_count": 2, "id": 657, "frequency": "r", "synset": "machine_gun.n.01"}, {"name": "magazine", "instance_count": 1379, "def": "a paperback periodic publication", "synonyms": ["magazine"], "image_count": 338, "id": 658, "frequency": "f", "synset": "magazine.n.02"}, {"name": "magnet", "instance_count": 5638, "def": "a device that attracts iron and produces a magnetic field", "synonyms": ["magnet"], "image_count": 334, "id": 659, "frequency": "f", "synset": "magnet.n.01"}, {"name": "mail_slot", "instance_count": 16, "def": "a slot (usually in a door) through which mail can be delivered", "synonyms": ["mail_slot"], "image_count": 15, "id": 660, "frequency": "c", "synset": "mail_slot.n.01"}, {"name": "mailbox_(at_home)", "instance_count": 240, "def": "a private box for delivery of mail", "synonyms": ["mailbox_(at_home)", "letter_box_(at_home)"], "image_count": 102, "id": 661, "frequency": "f", "synset": "mailbox.n.01"}, {"name": "mallard", "instance_count": 2, "def": "wild dabbling duck from which domestic ducks are descended", "synonyms": ["mallard"], "image_count": 1, "id": 662, "frequency": "r", "synset": "mallard.n.01"}, {"name": "mallet", "instance_count": 16, "def": "a sports implement with a long handle and a hammer-like head used to hit a ball", "synonyms": ["mallet"], "image_count": 8, "id": 663, "frequency": "r", "synset": "mallet.n.01"}, {"name": "mammoth", "instance_count": 2, "def": "any of numerous extinct elephants widely distributed in the Pleistocene", "synonyms": ["mammoth"], "image_count": 1, "id": 664, "frequency": "r", "synset": "mammoth.n.01"}, {"name": "manatee", "instance_count": 1, "def": "sirenian mammal of tropical coastal waters of America", "synonyms": ["manatee"], "image_count": 1, "id": 665, "frequency": "r", "synset": "manatee.n.01"}, {"name": "mandarin_orange", "instance_count": 401, "def": "a somewhat flat reddish-orange loose skinned citrus of China", "synonyms": ["mandarin_orange"], "image_count": 28, "id": 666, "frequency": "c", "synset": "mandarin.n.05"}, {"name": "manger", "instance_count": 126, "def": "a container (usually in a barn or stable) from which cattle or horses feed", "synonyms": ["manger", "trough"], "image_count": 91, "id": 667, "frequency": "c", "synset": "manger.n.01"}, {"name": "manhole", "instance_count": 445, "def": "a hole (usually with a flush cover) through which a person can gain access to an underground structure", "synonyms": ["manhole"], "image_count": 260, "id": 668, "frequency": "f", "synset": "manhole.n.01"}, {"name": "map", "instance_count": 186, "def": "a diagrammatic representation of the earth's surface (or part of it)", "synonyms": ["map"], "image_count": 131, "id": 669, "frequency": "f", "synset": "map.n.01"}, {"name": "marker", "instance_count": 501, "def": "a writing implement for making a mark", "synonyms": ["marker"], "image_count": 128, "id": 670, "frequency": "f", "synset": "marker.n.03"}, {"name": "martini", "instance_count": 3, "def": "a cocktail made of gin (or vodka) with dry vermouth", "synonyms": ["martini"], "image_count": 3, "id": 671, "frequency": "r", "synset": "martini.n.01"}, {"name": "mascot", "instance_count": 10, "def": "a person or animal that is adopted by a team or other group as a symbolic figure", "synonyms": ["mascot"], "image_count": 10, "id": 672, "frequency": "r", "synset": "mascot.n.01"}, {"name": "mashed_potato", "instance_count": 58, "def": "potato that has been peeled and boiled and then mashed", "synonyms": ["mashed_potato"], "image_count": 39, "id": 673, "frequency": "c", "synset": "mashed_potato.n.01"}, {"name": "masher", "instance_count": 2, "def": "a kitchen utensil used for mashing (e.g. potatoes)", "synonyms": ["masher"], "image_count": 2, "id": 674, "frequency": "r", "synset": "masher.n.02"}, {"name": "mask", "instance_count": 1595, "def": "a protective covering worn over the face", "synonyms": ["mask", "facemask"], "image_count": 925, "id": 675, "frequency": "f", "synset": "mask.n.04"}, {"name": "mast", "instance_count": 2985, "def": "a vertical spar for supporting sails", "synonyms": ["mast"], "image_count": 354, "id": 676, "frequency": "f", "synset": "mast.n.01"}, {"name": "mat_(gym_equipment)", "instance_count": 114, "def": "sports equipment consisting of a piece of thick padding on the floor for gymnastics", "synonyms": ["mat_(gym_equipment)", "gym_mat"], "image_count": 31, "id": 677, "frequency": "c", "synset": "mat.n.03"}, {"name": "matchbox", "instance_count": 11, "def": "a box for holding matches", "synonyms": ["matchbox"], "image_count": 10, "id": 678, "frequency": "r", "synset": "matchbox.n.01"}, {"name": "mattress", "instance_count": 354, "def": "a thick pad filled with resilient material used as a bed or part of a bed", "synonyms": ["mattress"], "image_count": 215, "id": 679, "frequency": "f", "synset": "mattress.n.01"}, {"name": "measuring_cup", "instance_count": 139, "def": "graduated cup used to measure liquid or granular ingredients", "synonyms": ["measuring_cup"], "image_count": 71, "id": 680, "frequency": "c", "synset": "measuring_cup.n.01"}, {"name": "measuring_stick", "instance_count": 57, "def": "measuring instrument having a sequence of marks at regular intervals", "synonyms": ["measuring_stick", "ruler_(measuring_stick)", "measuring_rod"], "image_count": 43, "id": 681, "frequency": "c", "synset": "measuring_stick.n.01"}, {"name": "meatball", "instance_count": 174, "def": "ground meat formed into a ball and fried or simmered in broth", "synonyms": ["meatball"], "image_count": 28, "id": 682, "frequency": "c", "synset": "meatball.n.01"}, {"name": "medicine", "instance_count": 243, "def": "something that treats or prevents or alleviates the symptoms of disease", "synonyms": ["medicine"], "image_count": 34, "id": 683, "frequency": "c", "synset": "medicine.n.02"}, {"name": "melon", "instance_count": 167, "def": "fruit of the gourd family having a hard rind and sweet juicy flesh", "synonyms": ["melon"], "image_count": 16, "id": 684, "frequency": "c", "synset": "melon.n.01"}, {"name": "microphone", "instance_count": 435, "def": "device for converting sound waves into electrical energy", "synonyms": ["microphone"], "image_count": 273, "id": 685, "frequency": "f", "synset": "microphone.n.01"}, {"name": "microscope", "instance_count": 3, "def": "magnifier of the image of small objects", "synonyms": ["microscope"], "image_count": 2, "id": 686, "frequency": "r", "synset": "microscope.n.01"}, {"name": "microwave_oven", "instance_count": 1105, "def": "kitchen appliance that cooks food by passing an electromagnetic wave through it", "synonyms": ["microwave_oven"], "image_count": 999, "id": 687, "frequency": "f", "synset": "microwave.n.02"}, {"name": "milestone", "instance_count": 5, "def": "stone post at side of a road to show distances", "synonyms": ["milestone", "milepost"], "image_count": 4, "id": 688, "frequency": "r", "synset": "milestone.n.01"}, {"name": "milk", "instance_count": 227, "def": "a white nutritious liquid secreted by mammals and used as food by human beings", "synonyms": ["milk"], "image_count": 107, "id": 689, "frequency": "f", "synset": "milk.n.01"}, {"name": "milk_can", "instance_count": 8, "def": "can for transporting milk", "synonyms": ["milk_can"], "image_count": 2, "id": 690, "frequency": "r", "synset": "milk_can.n.01"}, {"name": "milkshake", "instance_count": 1, "def": "frothy drink of milk and flavoring and sometimes fruit or ice cream", "synonyms": ["milkshake"], "image_count": 1, "id": 691, "frequency": "r", "synset": "milkshake.n.01"}, {"name": "minivan", "instance_count": 1046, "def": "a small box-shaped passenger van", "synonyms": ["minivan"], "image_count": 454, "id": 692, "frequency": "f", "synset": "minivan.n.01"}, {"name": "mint_candy", "instance_count": 27, "def": "a candy that is flavored with a mint oil", "synonyms": ["mint_candy"], "image_count": 9, "id": 693, "frequency": "r", "synset": "mint.n.05"}, {"name": "mirror", "instance_count": 3490, "def": "polished surface that forms images by reflecting light", "synonyms": ["mirror"], "image_count": 1901, "id": 694, "frequency": "f", "synset": "mirror.n.01"}, {"name": "mitten", "instance_count": 156, "def": "glove that encases the thumb separately and the other four fingers together", "synonyms": ["mitten"], "image_count": 61, "id": 695, "frequency": "c", "synset": "mitten.n.01"}, {"name": "mixer_(kitchen_tool)", "instance_count": 108, "def": "a kitchen utensil that is used for mixing foods", "synonyms": ["mixer_(kitchen_tool)", "stand_mixer"], "image_count": 91, "id": 696, "frequency": "c", "synset": "mixer.n.04"}, {"name": "money", "instance_count": 122, "def": "the official currency issued by a government or national bank", "synonyms": ["money"], "image_count": 46, "id": 697, "frequency": "c", "synset": "money.n.03"}, {"name": "monitor_(computer_equipment) computer_monitor", "instance_count": 2955, "def": "a computer monitor", "synonyms": ["monitor_(computer_equipment) computer_monitor"], "image_count": 1402, "id": 698, "frequency": "f", "synset": "monitor.n.04"}, {"name": "monkey", "instance_count": 166, "def": "any of various long-tailed primates", "synonyms": ["monkey"], "image_count": 74, "id": 699, "frequency": "c", "synset": "monkey.n.01"}, {"name": "motor", "instance_count": 985, "def": "machine that converts other forms of energy into mechanical energy and so imparts motion", "synonyms": ["motor"], "image_count": 421, "id": 700, "frequency": "f", "synset": "motor.n.01"}, {"name": "motor_scooter", "instance_count": 720, "def": "a wheeled vehicle with small wheels and a low-powered engine", "synonyms": ["motor_scooter", "scooter"], "image_count": 226, "id": 701, "frequency": "f", "synset": "motor_scooter.n.01"}, {"name": "motor_vehicle", "instance_count": 64, "def": "a self-propelled wheeled vehicle that does not run on rails", "synonyms": ["motor_vehicle", "automotive_vehicle"], "image_count": 10, "id": 702, "frequency": "r", "synset": "motor_vehicle.n.01"}, {"name": "motorcycle", "instance_count": 5247, "def": "a motor vehicle with two wheels and a strong frame", "synonyms": ["motorcycle"], "image_count": 1720, "id": 703, "frequency": "f", "synset": "motorcycle.n.01"}, {"name": "mound_(baseball)", "instance_count": 269, "def": "(baseball) the slight elevation on which the pitcher stands", "synonyms": ["mound_(baseball)", "pitcher's_mound"], "image_count": 261, "id": 704, "frequency": "f", "synset": "mound.n.01"}, {"name": "mouse_(computer_equipment)", "instance_count": 1832, "def": "a computer input device that controls an on-screen pointer (does not include trackpads / touchpads)", "synonyms": ["mouse_(computer_equipment)", "computer_mouse"], "image_count": 1337, "id": 705, "frequency": "f", "synset": "mouse.n.04"}, {"name": "mousepad", "instance_count": 333, "def": "a small portable pad that provides an operating surface for a computer mouse", "synonyms": ["mousepad"], "image_count": 293, "id": 706, "frequency": "f", "synset": "mousepad.n.01"}, {"name": "muffin", "instance_count": 352, "def": "a sweet quick bread baked in a cup-shaped pan", "synonyms": ["muffin"], "image_count": 62, "id": 707, "frequency": "c", "synset": "muffin.n.01"}, {"name": "mug", "instance_count": 1785, "def": "with handle and usually cylindrical", "synonyms": ["mug"], "image_count": 814, "id": 708, "frequency": "f", "synset": "mug.n.04"}, {"name": "mushroom", "instance_count": 6257, "def": "a common mushroom", "synonyms": ["mushroom"], "image_count": 407, "id": 709, "frequency": "f", "synset": "mushroom.n.02"}, {"name": "music_stool", "instance_count": 6, "def": "a stool for piano players; usually adjustable in height", "synonyms": ["music_stool", "piano_stool"], "image_count": 6, "id": 710, "frequency": "r", "synset": "music_stool.n.01"}, {"name": "musical_instrument", "instance_count": 33, "def": "any of various devices or contrivances that can be used to produce musical tones or sounds", "synonyms": ["musical_instrument", "instrument_(musical)"], "image_count": 16, "id": 711, "frequency": "c", "synset": "musical_instrument.n.01"}, {"name": "nailfile", "instance_count": 10, "def": "a small flat file for shaping the nails", "synonyms": ["nailfile"], "image_count": 7, "id": 712, "frequency": "r", "synset": "nailfile.n.01"}, {"name": "napkin", "instance_count": 3979, "def": "a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing", "synonyms": ["napkin", "table_napkin", "serviette"], "image_count": 1791, "id": 713, "frequency": "f", "synset": "napkin.n.01"}, {"name": "neckerchief", "instance_count": 4, "def": "a kerchief worn around the neck", "synonyms": ["neckerchief"], "image_count": 2, "id": 714, "frequency": "r", "synset": "neckerchief.n.01"}, {"name": "necklace", "instance_count": 2709, "def": "jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament", "synonyms": ["necklace"], "image_count": 1915, "id": 715, "frequency": "f", "synset": "necklace.n.01"}, {"name": "necktie", "instance_count": 4069, "def": "neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front", "synonyms": ["necktie", "tie_(necktie)"], "image_count": 1940, "id": 716, "frequency": "f", "synset": "necktie.n.01"}, {"name": "needle", "instance_count": 61, "def": "a sharp pointed implement (usually metal)", "synonyms": ["needle"], "image_count": 13, "id": 717, "frequency": "c", "synset": "needle.n.03"}, {"name": "nest", "instance_count": 20, "def": "a structure in which animals lay eggs or give birth to their young", "synonyms": ["nest"], "image_count": 16, "id": 718, "frequency": "c", "synset": "nest.n.01"}, {"name": "newspaper", "instance_count": 1179, "def": "a daily or weekly publication on folded sheets containing news, articles, and advertisements", "synonyms": ["newspaper", "paper_(newspaper)"], "image_count": 448, "id": 719, "frequency": "f", "synset": "newspaper.n.01"}, {"name": "newsstand", "instance_count": 39, "def": "a stall where newspapers and other periodicals are sold", "synonyms": ["newsstand"], "image_count": 12, "id": 720, "frequency": "c", "synset": "newsstand.n.01"}, {"name": "nightshirt", "instance_count": 35, "def": "garments designed to be worn in bed", "synonyms": ["nightshirt", "nightwear", "sleepwear", "nightclothes"], "image_count": 18, "id": 721, "frequency": "c", "synset": "nightwear.n.01"}, {"name": "nosebag_(for_animals)", "instance_count": 4, "def": "a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head", "synonyms": ["nosebag_(for_animals)", "feedbag"], "image_count": 4, "id": 722, "frequency": "r", "synset": "nosebag.n.01"}, {"name": "noseband_(for_animals)", "instance_count": 120, "def": "a strap that is the part of a bridle that goes over the animal's nose", "synonyms": ["noseband_(for_animals)", "nosepiece_(for_animals)"], "image_count": 71, "id": 723, "frequency": "c", "synset": "noseband.n.01"}, {"name": "notebook", "instance_count": 290, "def": "a book with blank pages for recording notes or memoranda", "synonyms": ["notebook"], "image_count": 189, "id": 724, "frequency": "f", "synset": "notebook.n.01"}, {"name": "notepad", "instance_count": 187, "def": "a pad of paper for keeping notes", "synonyms": ["notepad"], "image_count": 74, "id": 725, "frequency": "c", "synset": "notepad.n.01"}, {"name": "nut", "instance_count": 790, "def": "a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt", "synonyms": ["nut"], "image_count": 103, "id": 726, "frequency": "f", "synset": "nut.n.03"}, {"name": "nutcracker", "instance_count": 7, "def": "a hand tool used to crack nuts open", "synonyms": ["nutcracker"], "image_count": 3, "id": 727, "frequency": "r", "synset": "nutcracker.n.01"}, {"name": "oar", "instance_count": 488, "def": "an implement used to propel or steer a boat", "synonyms": ["oar"], "image_count": 110, "id": 728, "frequency": "f", "synset": "oar.n.01"}, {"name": "octopus_(food)", "instance_count": 5, "def": "tentacles of octopus prepared as food", "synonyms": ["octopus_(food)"], "image_count": 5, "id": 729, "frequency": "r", "synset": "octopus.n.01"}, {"name": "octopus_(animal)", "instance_count": 17, "def": "bottom-living cephalopod having a soft oval body with eight long tentacles", "synonyms": ["octopus_(animal)"], "image_count": 9, "id": 730, "frequency": "r", "synset": "octopus.n.02"}, {"name": "oil_lamp", "instance_count": 28, "def": "a lamp that burns oil (as kerosine) for light", "synonyms": ["oil_lamp", "kerosene_lamp", "kerosine_lamp"], "image_count": 15, "id": 731, "frequency": "c", "synset": "oil_lamp.n.01"}, {"name": "olive_oil", "instance_count": 36, "def": "oil from olives", "synonyms": ["olive_oil"], "image_count": 25, "id": 732, "frequency": "c", "synset": "olive_oil.n.01"}, {"name": "omelet", "instance_count": 10, "def": "beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly", "synonyms": ["omelet", "omelette"], "image_count": 7, "id": 733, "frequency": "r", "synset": "omelet.n.01"}, {"name": "onion", "instance_count": 9779, "def": "the bulb of an onion plant", "synonyms": ["onion"], "image_count": 647, "id": 734, "frequency": "f", "synset": "onion.n.01"}, {"name": "orange_(fruit)", "instance_count": 13034, "def": "orange (FRUIT of an orange tree)", "synonyms": ["orange_(fruit)"], "image_count": 824, "id": 735, "frequency": "f", "synset": "orange.n.01"}, {"name": "orange_juice", "instance_count": 223, "def": "bottled or freshly squeezed juice of oranges", "synonyms": ["orange_juice"], "image_count": 100, "id": 736, "frequency": "c", "synset": "orange_juice.n.01"}, {"name": "ostrich", "instance_count": 71, "def": "fast-running African flightless bird with two-toed feet; largest living bird", "synonyms": ["ostrich"], "image_count": 47, "id": 737, "frequency": "c", "synset": "ostrich.n.02"}, {"name": "ottoman", "instance_count": 157, "def": "a thick standalone cushion used as a seat or footrest, often next to a chair", "synonyms": ["ottoman", "pouf", "pouffe", "hassock"], "image_count": 121, "id": 738, "frequency": "f", "synset": "ottoman.n.03"}, {"name": "oven", "instance_count": 929, "def": "kitchen appliance used for baking or roasting", "synonyms": ["oven"], "image_count": 731, "id": 739, "frequency": "f", "synset": "oven.n.01"}, {"name": "overalls_(clothing)", "instance_count": 76, "def": "work clothing consisting of denim trousers usually with a bib and shoulder straps", "synonyms": ["overalls_(clothing)"], "image_count": 73, "id": 740, "frequency": "c", "synset": "overall.n.01"}, {"name": "owl", "instance_count": 73, "def": "nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes", "synonyms": ["owl"], "image_count": 49, "id": 741, "frequency": "c", "synset": "owl.n.01"}, {"name": "packet", "instance_count": 109, "def": "a small package or bundle", "synonyms": ["packet"], "image_count": 23, "id": 742, "frequency": "c", "synset": "packet.n.03"}, {"name": "inkpad", "instance_count": 12, "def": "absorbent material saturated with ink used to transfer ink evenly to a rubber stamp", "synonyms": ["inkpad", "inking_pad", "stamp_pad"], "image_count": 4, "id": 743, "frequency": "r", "synset": "pad.n.03"}, {"name": "pad", "instance_count": 264, "def": "mostly arm/knee pads labeled", "synonyms": ["pad"], "image_count": 62, "id": 744, "frequency": "c", "synset": "pad.n.04"}, {"name": "paddle", "instance_count": 306, "def": "a short light oar used without an oarlock to propel a canoe or small boat", "synonyms": ["paddle", "boat_paddle"], "image_count": 118, "id": 745, "frequency": "f", "synset": "paddle.n.04"}, {"name": "padlock", "instance_count": 184, "def": "a detachable, portable lock", "synonyms": ["padlock"], "image_count": 99, "id": 746, "frequency": "c", "synset": "padlock.n.01"}, {"name": "paintbrush", "instance_count": 91, "def": "a brush used as an applicator to apply paint", "synonyms": ["paintbrush"], "image_count": 40, "id": 747, "frequency": "c", "synset": "paintbrush.n.01"}, {"name": "painting", "instance_count": 2645, "def": "graphic art consisting of an artistic composition made by applying paints to a surface", "synonyms": ["painting"], "image_count": 1036, "id": 748, "frequency": "f", "synset": "painting.n.01"}, {"name": "pajamas", "instance_count": 163, "def": "loose-fitting nightclothes worn for sleeping or lounging", "synonyms": ["pajamas", "pyjamas"], "image_count": 105, "id": 749, "frequency": "f", "synset": "pajama.n.02"}, {"name": "palette", "instance_count": 68, "def": "board that provides a flat surface on which artists mix paints and the range of colors used", "synonyms": ["palette", "pallet"], "image_count": 21, "id": 750, "frequency": "c", "synset": "palette.n.02"}, {"name": "pan_(for_cooking)", "instance_count": 643, "def": "cooking utensil consisting of a wide metal vessel", "synonyms": ["pan_(for_cooking)", "cooking_pan"], "image_count": 229, "id": 751, "frequency": "f", "synset": "pan.n.01"}, {"name": "pan_(metal_container)", "instance_count": 21, "def": "shallow container made of metal", "synonyms": ["pan_(metal_container)"], "image_count": 7, "id": 752, "frequency": "r", "synset": "pan.n.03"}, {"name": "pancake", "instance_count": 295, "def": "a flat cake of thin batter fried on both sides on a griddle", "synonyms": ["pancake"], "image_count": 72, "id": 753, "frequency": "c", "synset": "pancake.n.01"}, {"name": "pantyhose", "instance_count": 11, "def": "a woman's tights consisting of underpants and stockings", "synonyms": ["pantyhose"], "image_count": 9, "id": 754, "frequency": "r", "synset": "pantyhose.n.01"}, {"name": "papaya", "instance_count": 206, "def": "large oval melon-like tropical fruit with yellowish flesh", "synonyms": ["papaya"], "image_count": 10, "id": 755, "frequency": "r", "synset": "papaya.n.02"}, {"name": "paper_plate", "instance_count": 957, "def": "a disposable plate made of cardboard", "synonyms": ["paper_plate"], "image_count": 328, "id": 756, "frequency": "f", "synset": "paper_plate.n.01"}, {"name": "paper_towel", "instance_count": 600, "def": "a disposable towel made of absorbent paper", "synonyms": ["paper_towel"], "image_count": 468, "id": 757, "frequency": "f", "synset": "paper_towel.n.01"}, {"name": "paperback_book", "instance_count": 3, "def": "a book with paper covers", "synonyms": ["paperback_book", "paper-back_book", "softback_book", "soft-cover_book"], "image_count": 1, "id": 758, "frequency": "r", "synset": "paperback_book.n.01"}, {"name": "paperweight", "instance_count": 4, "def": "a weight used to hold down a stack of papers", "synonyms": ["paperweight"], "image_count": 2, "id": 759, "frequency": "r", "synset": "paperweight.n.01"}, {"name": "parachute", "instance_count": 61, "def": "rescue equipment consisting of a device that fills with air and retards your fall", "synonyms": ["parachute"], "image_count": 24, "id": 760, "frequency": "c", "synset": "parachute.n.01"}, {"name": "parakeet", "instance_count": 46, "def": "any of numerous small slender long-tailed parrots", "synonyms": ["parakeet", "parrakeet", "parroket", "paraquet", "paroquet", "parroquet"], "image_count": 11, "id": 761, "frequency": "c", "synset": "parakeet.n.01"}, {"name": "parasail_(sports)", "instance_count": 385, "def": "parachute that will lift a person up into the air when it is towed by a motorboat or a car", "synonyms": ["parasail_(sports)"], "image_count": 72, "id": 762, "frequency": "c", "synset": "parasail.n.01"}, {"name": "parasol", "instance_count": 45, "def": "a handheld collapsible source of shade", "synonyms": ["parasol", "sunshade"], "image_count": 17, "id": 763, "frequency": "c", "synset": "parasol.n.01"}, {"name": "parchment", "instance_count": 17, "def": "a superior paper resembling sheepskin", "synonyms": ["parchment"], "image_count": 10, "id": 764, "frequency": "r", "synset": "parchment.n.01"}, {"name": "parka", "instance_count": 89, "def": "a kind of heavy jacket (`windcheater' is a British term)", "synonyms": ["parka", "anorak"], "image_count": 17, "id": 765, "frequency": "c", "synset": "parka.n.01"}, {"name": "parking_meter", "instance_count": 1075, "def": "a coin-operated timer located next to a parking space", "synonyms": ["parking_meter"], "image_count": 489, "id": 766, "frequency": "f", "synset": "parking_meter.n.01"}, {"name": "parrot", "instance_count": 76, "def": "usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds", "synonyms": ["parrot"], "image_count": 47, "id": 767, "frequency": "c", "synset": "parrot.n.01"}, {"name": "passenger_car_(part_of_a_train)", "instance_count": 465, "def": "a railcar where passengers ride", "synonyms": ["passenger_car_(part_of_a_train)", "coach_(part_of_a_train)"], "image_count": 93, "id": 768, "frequency": "c", "synset": "passenger_car.n.01"}, {"name": "passenger_ship", "instance_count": 1, "def": "a ship built to carry passengers", "synonyms": ["passenger_ship"], "image_count": 1, "id": 769, "frequency": "r", "synset": "passenger_ship.n.01"}, {"name": "passport", "instance_count": 12, "def": "a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country", "synonyms": ["passport"], "image_count": 12, "id": 770, "frequency": "c", "synset": "passport.n.02"}, {"name": "pastry", "instance_count": 4972, "def": "any of various baked foods made of dough or batter", "synonyms": ["pastry"], "image_count": 228, "id": 771, "frequency": "f", "synset": "pastry.n.02"}, {"name": "patty_(food)", "instance_count": 20, "def": "small flat mass of chopped food", "synonyms": ["patty_(food)"], "image_count": 5, "id": 772, "frequency": "r", "synset": "patty.n.01"}, {"name": "pea_(food)", "instance_count": 1869, "def": "seed of a pea plant used for food", "synonyms": ["pea_(food)"], "image_count": 76, "id": 773, "frequency": "c", "synset": "pea.n.01"}, {"name": "peach", "instance_count": 1041, "def": "downy juicy fruit with sweet yellowish or whitish flesh", "synonyms": ["peach"], "image_count": 71, "id": 774, "frequency": "c", "synset": "peach.n.03"}, {"name": "peanut_butter", "instance_count": 50, "def": "a spread made from ground peanuts", "synonyms": ["peanut_butter"], "image_count": 30, "id": 775, "frequency": "c", "synset": "peanut_butter.n.01"}, {"name": "pear", "instance_count": 1069, "def": "sweet juicy gritty-textured fruit available in many varieties", "synonyms": ["pear"], "image_count": 109, "id": 776, "frequency": "f", "synset": "pear.n.01"}, {"name": "peeler_(tool_for_fruit_and_vegetables)", "instance_count": 18, "def": "a device for peeling vegetables or fruits", "synonyms": ["peeler_(tool_for_fruit_and_vegetables)"], "image_count": 14, "id": 777, "frequency": "c", "synset": "peeler.n.03"}, {"name": "wooden_leg", "instance_count": 1, "def": "a prosthesis that replaces a missing leg", "synonyms": ["wooden_leg", "pegleg"], "image_count": 1, "id": 778, "frequency": "r", "synset": "peg.n.04"}, {"name": "pegboard", "instance_count": 9, "def": "a board perforated with regularly spaced holes into which pegs can be fitted", "synonyms": ["pegboard"], "image_count": 8, "id": 779, "frequency": "r", "synset": "pegboard.n.01"}, {"name": "pelican", "instance_count": 76, "def": "large long-winged warm-water seabird having a large bill with a distensible pouch for fish", "synonyms": ["pelican"], "image_count": 26, "id": 780, "frequency": "c", "synset": "pelican.n.01"}, {"name": "pen", "instance_count": 987, "def": "a writing implement with a point from which ink flows", "synonyms": ["pen"], "image_count": 339, "id": 781, "frequency": "f", "synset": "pen.n.01"}, {"name": "pencil", "instance_count": 543, "def": "a thin cylindrical pointed writing implement made of wood and graphite", "synonyms": ["pencil"], "image_count": 153, "id": 782, "frequency": "f", "synset": "pencil.n.01"}, {"name": "pencil_box", "instance_count": 2, "def": "a box for holding pencils", "synonyms": ["pencil_box", "pencil_case"], "image_count": 2, "id": 783, "frequency": "r", "synset": "pencil_box.n.01"}, {"name": "pencil_sharpener", "instance_count": 4, "def": "a rotary implement for sharpening the point on pencils", "synonyms": ["pencil_sharpener"], "image_count": 3, "id": 784, "frequency": "r", "synset": "pencil_sharpener.n.01"}, {"name": "pendulum", "instance_count": 18, "def": "an apparatus consisting of an object mounted so that it swings freely under the influence of gravity", "synonyms": ["pendulum"], "image_count": 8, "id": 785, "frequency": "r", "synset": "pendulum.n.01"}, {"name": "penguin", "instance_count": 229, "def": "short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers", "synonyms": ["penguin"], "image_count": 47, "id": 786, "frequency": "c", "synset": "penguin.n.01"}, {"name": "pennant", "instance_count": 235, "def": "a flag longer than it is wide (and often tapering)", "synonyms": ["pennant"], "image_count": 8, "id": 787, "frequency": "r", "synset": "pennant.n.02"}, {"name": "penny_(coin)", "instance_count": 15, "def": "a coin worth one-hundredth of the value of the basic unit", "synonyms": ["penny_(coin)"], "image_count": 6, "id": 788, "frequency": "r", "synset": "penny.n.02"}, {"name": "pepper", "instance_count": 697, "def": "pungent seasoning from the berry of the common pepper plant; whole or ground", "synonyms": ["pepper", "peppercorn"], "image_count": 116, "id": 789, "frequency": "f", "synset": "pepper.n.03"}, {"name": "pepper_mill", "instance_count": 91, "def": "a mill for grinding pepper", "synonyms": ["pepper_mill", "pepper_grinder"], "image_count": 69, "id": 790, "frequency": "c", "synset": "pepper_mill.n.01"}, {"name": "perfume", "instance_count": 28, "def": "a toiletry that emits and diffuses a fragrant odor", "synonyms": ["perfume"], "image_count": 13, "id": 791, "frequency": "c", "synset": "perfume.n.02"}, {"name": "persimmon", "instance_count": 22, "def": "orange fruit resembling a plum; edible when fully ripe", "synonyms": ["persimmon"], "image_count": 6, "id": 792, "frequency": "r", "synset": "persimmon.n.02"}, {"name": "person", "instance_count": 13439, "def": "a human being", "synonyms": ["person", "baby", "child", "boy", "girl", "man", "woman", "human"], "image_count": 1928, "id": 793, "frequency": "f", "synset": "person.n.01"}, {"name": "pet", "instance_count": 103, "def": "a domesticated animal kept for companionship or amusement", "synonyms": ["pet"], "image_count": 79, "id": 794, "frequency": "c", "synset": "pet.n.01"}, {"name": "pew_(church_bench)", "instance_count": 194, "def": "long bench with backs; used in church by the congregation", "synonyms": ["pew_(church_bench)", "church_bench"], "image_count": 14, "id": 795, "frequency": "c", "synset": "pew.n.01"}, {"name": "phonebook", "instance_count": 24, "def": "a directory containing an alphabetical list of telephone subscribers and their telephone numbers", "synonyms": ["phonebook", "telephone_book", "telephone_directory"], "image_count": 7, "id": 796, "frequency": "r", "synset": "phonebook.n.01"}, {"name": "phonograph_record", "instance_count": 138, "def": "sound recording consisting of a typically black disk with a continuous groove", "synonyms": ["phonograph_record", "phonograph_recording", "record_(phonograph_recording)"], "image_count": 20, "id": 797, "frequency": "c", "synset": "phonograph_record.n.01"}, {"name": "piano", "instance_count": 126, "def": "a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds", "synonyms": ["piano"], "image_count": 114, "id": 798, "frequency": "f", "synset": "piano.n.01"}, {"name": "pickle", "instance_count": 632, "def": "vegetables (especially cucumbers) preserved in brine or vinegar", "synonyms": ["pickle"], "image_count": 221, "id": 799, "frequency": "f", "synset": "pickle.n.01"}, {"name": "pickup_truck", "instance_count": 838, "def": "a light truck with an open body and low sides and a tailboard", "synonyms": ["pickup_truck"], "image_count": 502, "id": 800, "frequency": "f", "synset": "pickup.n.01"}, {"name": "pie", "instance_count": 228, "def": "dish baked in pastry-lined pan often with a pastry top", "synonyms": ["pie"], "image_count": 62, "id": 801, "frequency": "c", "synset": "pie.n.01"}, {"name": "pigeon", "instance_count": 1850, "def": "wild and domesticated birds having a heavy body and short legs", "synonyms": ["pigeon"], "image_count": 87, "id": 802, "frequency": "c", "synset": "pigeon.n.01"}, {"name": "piggy_bank", "instance_count": 5, "def": "a child's coin bank (often shaped like a pig)", "synonyms": ["piggy_bank", "penny_bank"], "image_count": 4, "id": 803, "frequency": "r", "synset": "piggy_bank.n.01"}, {"name": "pillow", "instance_count": 6115, "def": "a cushion to support the head of a sleeping person", "synonyms": ["pillow"], "image_count": 1912, "id": 804, "frequency": "f", "synset": "pillow.n.01"}, {"name": "pin_(non_jewelry)", "instance_count": 112, "def": "a small slender (often pointed) piece of wood or metal used to support or fasten or attach things", "synonyms": ["pin_(non_jewelry)"], "image_count": 7, "id": 805, "frequency": "r", "synset": "pin.n.09"}, {"name": "pineapple", "instance_count": 1636, "def": "large sweet fleshy tropical fruit with a tuft of stiff leaves", "synonyms": ["pineapple"], "image_count": 186, "id": 806, "frequency": "f", "synset": "pineapple.n.02"}, {"name": "pinecone", "instance_count": 141, "def": "the seed-producing cone of a pine tree", "synonyms": ["pinecone"], "image_count": 18, "id": 807, "frequency": "c", "synset": "pinecone.n.01"}, {"name": "ping-pong_ball", "instance_count": 4, "def": "light hollow ball used in playing table tennis", "synonyms": ["ping-pong_ball"], "image_count": 4, "id": 808, "frequency": "r", "synset": "ping-pong_ball.n.01"}, {"name": "pinwheel", "instance_count": 172, "def": "a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind", "synonyms": ["pinwheel"], "image_count": 3, "id": 809, "frequency": "r", "synset": "pinwheel.n.03"}, {"name": "tobacco_pipe", "instance_count": 7, "def": "a tube with a small bowl at one end; used for smoking tobacco", "synonyms": ["tobacco_pipe"], "image_count": 7, "id": 810, "frequency": "r", "synset": "pipe.n.01"}, {"name": "pipe", "instance_count": 4762, "def": "a long tube made of metal or plastic that is used to carry water or oil or gas etc.", "synonyms": ["pipe", "piping"], "image_count": 1413, "id": 811, "frequency": "f", "synset": "pipe.n.02"}, {"name": "pistol", "instance_count": 9, "def": "a firearm that is held and fired with one hand", "synonyms": ["pistol", "handgun"], "image_count": 7, "id": 812, "frequency": "r", "synset": "pistol.n.01"}, {"name": "pita_(bread)", "instance_count": 28, "def": "usually small round bread that can open into a pocket for filling", "synonyms": ["pita_(bread)", "pocket_bread"], "image_count": 12, "id": 813, "frequency": "c", "synset": "pita.n.01"}, {"name": "pitcher_(vessel_for_liquid)", "instance_count": 488, "def": "an open vessel with a handle and a spout for pouring", "synonyms": ["pitcher_(vessel_for_liquid)", "ewer"], "image_count": 248, "id": 814, "frequency": "f", "synset": "pitcher.n.02"}, {"name": "pitchfork", "instance_count": 4, "def": "a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay", "synonyms": ["pitchfork"], "image_count": 4, "id": 815, "frequency": "r", "synset": "pitchfork.n.01"}, {"name": "pizza", "instance_count": 4103, "def": "Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese", "synonyms": ["pizza"], "image_count": 1881, "id": 816, "frequency": "f", "synset": "pizza.n.01"}, {"name": "place_mat", "instance_count": 1123, "def": "a mat placed on a table for an individual place setting", "synonyms": ["place_mat"], "image_count": 529, "id": 817, "frequency": "f", "synset": "place_mat.n.01"}, {"name": "plate", "instance_count": 5214, "def": "dish on which food is served or from which food is eaten", "synonyms": ["plate"], "image_count": 1932, "id": 818, "frequency": "f", "synset": "plate.n.04"}, {"name": "platter", "instance_count": 148, "def": "a large shallow dish used for serving food", "synonyms": ["platter"], "image_count": 50, "id": 819, "frequency": "c", "synset": "platter.n.01"}, {"name": "playpen", "instance_count": 3, "def": "a portable enclosure in which babies may be left to play", "synonyms": ["playpen"], "image_count": 3, "id": 820, "frequency": "r", "synset": "playpen.n.01"}, {"name": "pliers", "instance_count": 49, "def": "a gripping hand tool with two hinged arms and (usually) serrated jaws", "synonyms": ["pliers", "plyers"], "image_count": 28, "id": 821, "frequency": "c", "synset": "pliers.n.01"}, {"name": "plow_(farm_equipment)", "instance_count": 12, "def": "a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing", "synonyms": ["plow_(farm_equipment)", "plough_(farm_equipment)"], "image_count": 10, "id": 822, "frequency": "r", "synset": "plow.n.01"}, {"name": "plume", "instance_count": 11, "def": "a feather or cluster of feathers worn as an ornament", "synonyms": ["plume"], "image_count": 5, "id": 823, "frequency": "r", "synset": "plume.n.02"}, {"name": "pocket_watch", "instance_count": 20, "def": "a watch that is carried in a small watch pocket", "synonyms": ["pocket_watch"], "image_count": 5, "id": 824, "frequency": "r", "synset": "pocket_watch.n.01"}, {"name": "pocketknife", "instance_count": 21, "def": "a knife with a blade that folds into the handle; suitable for carrying in the pocket", "synonyms": ["pocketknife"], "image_count": 18, "id": 825, "frequency": "c", "synset": "pocketknife.n.01"}, {"name": "poker_(fire_stirring_tool)", "instance_count": 34, "def": "fire iron consisting of a metal rod with a handle; used to stir a fire", "synonyms": ["poker_(fire_stirring_tool)", "stove_poker", "fire_hook"], "image_count": 14, "id": 826, "frequency": "c", "synset": "poker.n.01"}, {"name": "pole", "instance_count": 14276, "def": "a long (usually round) rod of wood or metal or plastic", "synonyms": ["pole", "post"], "image_count": 1890, "id": 827, "frequency": "f", "synset": "pole.n.01"}, {"name": "polo_shirt", "instance_count": 1695, "def": "a shirt with short sleeves designed for comfort and casual wear", "synonyms": ["polo_shirt", "sport_shirt"], "image_count": 660, "id": 828, "frequency": "f", "synset": "polo_shirt.n.01"}, {"name": "poncho", "instance_count": 14, "def": "a blanket-like cloak with a hole in the center for the head", "synonyms": ["poncho"], "image_count": 8, "id": 829, "frequency": "r", "synset": "poncho.n.01"}, {"name": "pony", "instance_count": 57, "def": "any of various breeds of small gentle horses usually less than five feet high at the shoulder", "synonyms": ["pony"], "image_count": 25, "id": 830, "frequency": "c", "synset": "pony.n.05"}, {"name": "pool_table", "instance_count": 10, "def": "game equipment consisting of a heavy table on which pool is played", "synonyms": ["pool_table", "billiard_table", "snooker_table"], "image_count": 10, "id": 831, "frequency": "r", "synset": "pool_table.n.01"}, {"name": "pop_(soda)", "instance_count": 951, "def": "a sweet drink containing carbonated water and flavoring", "synonyms": ["pop_(soda)", "soda_(pop)", "tonic", "soft_drink"], "image_count": 218, "id": 832, "frequency": "f", "synset": "pop.n.02"}, {"name": "postbox_(public)", "instance_count": 57, "def": "public box for deposit of mail", "synonyms": ["postbox_(public)", "mailbox_(public)"], "image_count": 36, "id": 833, "frequency": "c", "synset": "postbox.n.01"}, {"name": "postcard", "instance_count": 276, "def": "a card for sending messages by post without an envelope", "synonyms": ["postcard", "postal_card", "mailing-card"], "image_count": 16, "id": 834, "frequency": "c", "synset": "postcard.n.01"}, {"name": "poster", "instance_count": 3378, "def": "a sign posted in a public place as an advertisement", "synonyms": ["poster", "placard"], "image_count": 808, "id": 835, "frequency": "f", "synset": "poster.n.01"}, {"name": "pot", "instance_count": 1719, "def": "metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid", "synonyms": ["pot"], "image_count": 479, "id": 836, "frequency": "f", "synset": "pot.n.01"}, {"name": "flowerpot", "instance_count": 3902, "def": "a container in which plants are cultivated", "synonyms": ["flowerpot"], "image_count": 1404, "id": 837, "frequency": "f", "synset": "pot.n.04"}, {"name": "potato", "instance_count": 4393, "def": "an edible tuber native to South America", "synonyms": ["potato"], "image_count": 307, "id": 838, "frequency": "f", "synset": "potato.n.01"}, {"name": "potholder", "instance_count": 112, "def": "an insulated pad for holding hot pots", "synonyms": ["potholder"], "image_count": 57, "id": 839, "frequency": "c", "synset": "potholder.n.01"}, {"name": "pottery", "instance_count": 272, "def": "ceramic ware made from clay and baked in a kiln", "synonyms": ["pottery", "clayware"], "image_count": 28, "id": 840, "frequency": "c", "synset": "pottery.n.01"}, {"name": "pouch", "instance_count": 131, "def": "a small or medium size container for holding or carrying things", "synonyms": ["pouch"], "image_count": 80, "id": 841, "frequency": "c", "synset": "pouch.n.01"}, {"name": "power_shovel", "instance_count": 16, "def": "a machine for excavating", "synonyms": ["power_shovel", "excavator", "digger"], "image_count": 11, "id": 842, "frequency": "c", "synset": "power_shovel.n.01"}, {"name": "prawn", "instance_count": 779, "def": "any of various edible decapod crustaceans", "synonyms": ["prawn", "shrimp"], "image_count": 92, "id": 843, "frequency": "c", "synset": "prawn.n.01"}, {"name": "pretzel", "instance_count": 179, "def": "glazed and salted cracker typically in the shape of a loose knot", "synonyms": ["pretzel"], "image_count": 20, "id": 844, "frequency": "c", "synset": "pretzel.n.01"}, {"name": "printer", "instance_count": 217, "def": "a machine that prints", "synonyms": ["printer", "printing_machine"], "image_count": 194, "id": 845, "frequency": "f", "synset": "printer.n.03"}, {"name": "projectile_(weapon)", "instance_count": 64, "def": "a weapon that is forcibly thrown or projected at a targets", "synonyms": ["projectile_(weapon)", "missile"], "image_count": 23, "id": 846, "frequency": "c", "synset": "projectile.n.01"}, {"name": "projector", "instance_count": 54, "def": "an optical instrument that projects an enlarged image onto a screen", "synonyms": ["projector"], "image_count": 52, "id": 847, "frequency": "c", "synset": "projector.n.02"}, {"name": "propeller", "instance_count": 1458, "def": "a mechanical device that rotates to push against air or water", "synonyms": ["propeller", "propellor"], "image_count": 673, "id": 848, "frequency": "f", "synset": "propeller.n.01"}, {"name": "prune", "instance_count": 8, "def": "dried plum", "synonyms": ["prune"], "image_count": 2, "id": 849, "frequency": "r", "synset": "prune.n.01"}, {"name": "pudding", "instance_count": 2, "def": "any of various soft thick unsweetened baked dishes", "synonyms": ["pudding"], "image_count": 2, "id": 850, "frequency": "r", "synset": "pudding.n.01"}, {"name": "puffer_(fish)", "instance_count": 2, "def": "fishes whose elongated spiny body can inflate itself with water or air to form a globe", "synonyms": ["puffer_(fish)", "pufferfish", "blowfish", "globefish"], "image_count": 1, "id": 851, "frequency": "r", "synset": "puffer.n.02"}, {"name": "puffin", "instance_count": 4, "def": "seabirds having short necks and brightly colored compressed bills", "synonyms": ["puffin"], "image_count": 2, "id": 852, "frequency": "r", "synset": "puffin.n.01"}, {"name": "pug-dog", "instance_count": 13, "def": "small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle", "synonyms": ["pug-dog"], "image_count": 8, "id": 853, "frequency": "r", "synset": "pug.n.01"}, {"name": "pumpkin", "instance_count": 1192, "def": "usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn", "synonyms": ["pumpkin"], "image_count": 80, "id": 854, "frequency": "c", "synset": "pumpkin.n.02"}, {"name": "puncher", "instance_count": 6, "def": "a tool for making holes or indentations", "synonyms": ["puncher"], "image_count": 3, "id": 855, "frequency": "r", "synset": "punch.n.03"}, {"name": "puppet", "instance_count": 18, "def": "a small figure of a person operated from above with strings by a puppeteer", "synonyms": ["puppet", "marionette"], "image_count": 3, "id": 856, "frequency": "r", "synset": "puppet.n.01"}, {"name": "puppy", "instance_count": 57, "def": "a young dog", "synonyms": ["puppy"], "image_count": 15, "id": 857, "frequency": "c", "synset": "puppy.n.01"}, {"name": "quesadilla", "instance_count": 6, "def": "a tortilla that is filled with cheese and heated", "synonyms": ["quesadilla"], "image_count": 2, "id": 858, "frequency": "r", "synset": "quesadilla.n.01"}, {"name": "quiche", "instance_count": 33, "def": "a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)", "synonyms": ["quiche"], "image_count": 10, "id": 859, "frequency": "r", "synset": "quiche.n.02"}, {"name": "quilt", "instance_count": 513, "def": "bedding made of two layers of cloth filled with stuffing and stitched together", "synonyms": ["quilt", "comforter"], "image_count": 386, "id": 860, "frequency": "f", "synset": "quilt.n.01"}, {"name": "rabbit", "instance_count": 139, "def": "any of various burrowing animals of the family Leporidae having long ears and short tails", "synonyms": ["rabbit"], "image_count": 65, "id": 861, "frequency": "c", "synset": "rabbit.n.01"}, {"name": "race_car", "instance_count": 6, "def": "a fast car that competes in races", "synonyms": ["race_car", "racing_car"], "image_count": 3, "id": 862, "frequency": "r", "synset": "racer.n.02"}, {"name": "racket", "instance_count": 64, "def": "a sports implement used to strike a ball in various games", "synonyms": ["racket", "racquet"], "image_count": 35, "id": 863, "frequency": "c", "synset": "racket.n.04"}, {"name": "radar", "instance_count": 13, "def": "measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects", "synonyms": ["radar"], "image_count": 5, "id": 864, "frequency": "r", "synset": "radar.n.01"}, {"name": "radiator", "instance_count": 195, "def": "a mechanism consisting of a metal honeycomb through which hot fluids circulate", "synonyms": ["radiator"], "image_count": 180, "id": 865, "frequency": "f", "synset": "radiator.n.03"}, {"name": "radio_receiver", "instance_count": 123, "def": "an electronic receiver that detects and demodulates and amplifies transmitted radio signals", "synonyms": ["radio_receiver", "radio_set", "radio", "tuner_(radio)"], "image_count": 99, "id": 866, "frequency": "c", "synset": "radio_receiver.n.01"}, {"name": "radish", "instance_count": 519, "def": "pungent edible root of any of various cultivated radish plants", "synonyms": ["radish", "daikon"], "image_count": 49, "id": 867, "frequency": "c", "synset": "radish.n.03"}, {"name": "raft", "instance_count": 66, "def": "a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers", "synonyms": ["raft"], "image_count": 28, "id": 868, "frequency": "c", "synset": "raft.n.01"}, {"name": "rag_doll", "instance_count": 3, "def": "a cloth doll that is stuffed and (usually) painted", "synonyms": ["rag_doll"], "image_count": 1, "id": 869, "frequency": "r", "synset": "rag_doll.n.01"}, {"name": "raincoat", "instance_count": 303, "def": "a water-resistant coat", "synonyms": ["raincoat", "waterproof_jacket"], "image_count": 52, "id": 870, "frequency": "c", "synset": "raincoat.n.01"}, {"name": "ram_(animal)", "instance_count": 132, "def": "uncastrated adult male sheep", "synonyms": ["ram_(animal)"], "image_count": 36, "id": 871, "frequency": "c", "synset": "ram.n.05"}, {"name": "raspberry", "instance_count": 778, "def": "red or black edible aggregate berries usually smaller than the related blackberries", "synonyms": ["raspberry"], "image_count": 70, "id": 872, "frequency": "c", "synset": "raspberry.n.02"}, {"name": "rat", "instance_count": 6, "def": "any of various long-tailed rodents similar to but larger than a mouse", "synonyms": ["rat"], "image_count": 6, "id": 873, "frequency": "r", "synset": "rat.n.01"}, {"name": "razorblade", "instance_count": 35, "def": "a blade that has very sharp edge", "synonyms": ["razorblade"], "image_count": 29, "id": 874, "frequency": "c", "synset": "razorblade.n.01"}, {"name": "reamer_(juicer)", "instance_count": 26, "def": "a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit", "synonyms": ["reamer_(juicer)", "juicer", "juice_reamer"], "image_count": 24, "id": 875, "frequency": "c", "synset": "reamer.n.01"}, {"name": "rearview_mirror", "instance_count": 3650, "def": "vehicle mirror (side or rearview)", "synonyms": ["rearview_mirror"], "image_count": 1115, "id": 876, "frequency": "f", "synset": "rearview_mirror.n.01"}, {"name": "receipt", "instance_count": 89, "def": "an acknowledgment (usually tangible) that payment has been made", "synonyms": ["receipt"], "image_count": 61, "id": 877, "frequency": "c", "synset": "receipt.n.02"}, {"name": "recliner", "instance_count": 28, "def": "an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it", "synonyms": ["recliner", "reclining_chair", "lounger_(chair)"], "image_count": 18, "id": 878, "frequency": "c", "synset": "recliner.n.01"}, {"name": "record_player", "instance_count": 22, "def": "machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically", "synonyms": ["record_player", "phonograph_(record_player)", "turntable"], "image_count": 18, "id": 879, "frequency": "c", "synset": "record_player.n.01"}, {"name": "reflector", "instance_count": 3426, "def": "device that reflects light, radiation, etc.", "synonyms": ["reflector"], "image_count": 665, "id": 880, "frequency": "f", "synset": "reflector.n.01"}, {"name": "remote_control", "instance_count": 2467, "def": "a device that can be used to control a machine or apparatus from a distance", "synonyms": ["remote_control"], "image_count": 1096, "id": 881, "frequency": "f", "synset": "remote_control.n.01"}, {"name": "rhinoceros", "instance_count": 50, "def": "massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout", "synonyms": ["rhinoceros"], "image_count": 29, "id": 882, "frequency": "c", "synset": "rhinoceros.n.01"}, {"name": "rib_(food)", "instance_count": 32, "def": "cut of meat including one or more ribs", "synonyms": ["rib_(food)"], "image_count": 8, "id": 883, "frequency": "r", "synset": "rib.n.03"}, {"name": "rifle", "instance_count": 37, "def": "a shoulder firearm with a long barrel", "synonyms": ["rifle"], "image_count": 14, "id": 884, "frequency": "c", "synset": "rifle.n.01"}, {"name": "ring", "instance_count": 2314, "def": "jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger", "synonyms": ["ring"], "image_count": 1622, "id": 885, "frequency": "f", "synset": "ring.n.08"}, {"name": "river_boat", "instance_count": 3, "def": "a boat used on rivers or to ply a river", "synonyms": ["river_boat"], "image_count": 2, "id": 886, "frequency": "r", "synset": "river_boat.n.01"}, {"name": "road_map", "instance_count": 3, "def": "(NOT A ROAD) a MAP showing roads (for automobile travel)", "synonyms": ["road_map"], "image_count": 3, "id": 887, "frequency": "r", "synset": "road_map.n.02"}, {"name": "robe", "instance_count": 77, "def": "any loose flowing garment", "synonyms": ["robe"], "image_count": 32, "id": 888, "frequency": "c", "synset": "robe.n.01"}, {"name": "rocking_chair", "instance_count": 70, "def": "a chair mounted on rockers", "synonyms": ["rocking_chair"], "image_count": 55, "id": 889, "frequency": "c", "synset": "rocking_chair.n.01"}, {"name": "rodent", "instance_count": 2, "def": "relatively small placental mammals having a single pair of constantly growing incisor teeth specialized for gnawing", "synonyms": ["rodent"], "image_count": 1, "id": 890, "frequency": "r", "synset": "rodent.n.01"}, {"name": "roller_skate", "instance_count": 35, "def": "a shoe with pairs of rollers (small hard wheels) fixed to the sole", "synonyms": ["roller_skate"], "image_count": 10, "id": 891, "frequency": "r", "synset": "roller_skate.n.01"}, {"name": "Rollerblade", "instance_count": 31, "def": "an in-line variant of a roller skate", "synonyms": ["Rollerblade"], "image_count": 10, "id": 892, "frequency": "r", "synset": "rollerblade.n.01"}, {"name": "rolling_pin", "instance_count": 52, "def": "utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough", "synonyms": ["rolling_pin"], "image_count": 47, "id": 893, "frequency": "c", "synset": "rolling_pin.n.01"}, {"name": "root_beer", "instance_count": 3, "def": "carbonated drink containing extracts of roots and herbs", "synonyms": ["root_beer"], "image_count": 3, "id": 894, "frequency": "r", "synset": "root_beer.n.01"}, {"name": "router_(computer_equipment)", "instance_count": 41, "def": "a device that forwards data packets between computer networks", "synonyms": ["router_(computer_equipment)"], "image_count": 29, "id": 895, "frequency": "c", "synset": "router.n.02"}, {"name": "rubber_band", "instance_count": 574, "def": "a narrow band of elastic rubber used to hold things (such as papers) together", "synonyms": ["rubber_band", "elastic_band"], "image_count": 342, "id": 896, "frequency": "f", "synset": "rubber_band.n.01"}, {"name": "runner_(carpet)", "instance_count": 32, "def": "a long narrow carpet", "synonyms": ["runner_(carpet)"], "image_count": 25, "id": 897, "frequency": "c", "synset": "runner.n.08"}, {"name": "plastic_bag", "instance_count": 3631, "def": "a bag made of paper or plastic for holding customer's purchases", "synonyms": ["plastic_bag", "paper_bag"], "image_count": 1469, "id": 898, "frequency": "f", "synset": "sack.n.01"}, {"name": "saddle_(on_an_animal)", "instance_count": 955, "def": "a seat for the rider of a horse or camel", "synonyms": ["saddle_(on_an_animal)"], "image_count": 521, "id": 899, "frequency": "f", "synset": "saddle.n.01"}, {"name": "saddle_blanket", "instance_count": 648, "def": "stable gear consisting of a blanket placed under the saddle", "synonyms": ["saddle_blanket", "saddlecloth", "horse_blanket"], "image_count": 347, "id": 900, "frequency": "f", "synset": "saddle_blanket.n.01"}, {"name": "saddlebag", "instance_count": 56, "def": "a large bag (or pair of bags) hung over a saddle", "synonyms": ["saddlebag"], "image_count": 35, "id": 901, "frequency": "c", "synset": "saddlebag.n.01"}, {"name": "safety_pin", "instance_count": 15, "def": "a pin in the form of a clasp; has a guard so the point of the pin will not stick the user", "synonyms": ["safety_pin"], "image_count": 7, "id": 902, "frequency": "r", "synset": "safety_pin.n.01"}, {"name": "sail", "instance_count": 863, "def": "a large piece of fabric by means of which wind is used to propel a sailing vessel", "synonyms": ["sail"], "image_count": 207, "id": 903, "frequency": "f", "synset": "sail.n.01"}, {"name": "salad", "instance_count": 171, "def": "food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens", "synonyms": ["salad"], "image_count": 108, "id": 904, "frequency": "f", "synset": "salad.n.01"}, {"name": "salad_plate", "instance_count": 6, "def": "a plate or bowl for individual servings of salad", "synonyms": ["salad_plate", "salad_bowl"], "image_count": 2, "id": 905, "frequency": "r", "synset": "salad_plate.n.01"}, {"name": "salami", "instance_count": 290, "def": "highly seasoned fatty sausage of pork and beef usually dried", "synonyms": ["salami"], "image_count": 34, "id": 906, "frequency": "c", "synset": "salami.n.01"}, {"name": "salmon_(fish)", "instance_count": 27, "def": "any of various large food and game fishes of northern waters", "synonyms": ["salmon_(fish)"], "image_count": 12, "id": 907, "frequency": "c", "synset": "salmon.n.01"}, {"name": "salmon_(food)", "instance_count": 14, "def": "flesh of any of various marine or freshwater fish of the family Salmonidae", "synonyms": ["salmon_(food)"], "image_count": 10, "id": 908, "frequency": "r", "synset": "salmon.n.03"}, {"name": "salsa", "instance_count": 22, "def": "spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods", "synonyms": ["salsa"], "image_count": 13, "id": 909, "frequency": "c", "synset": "salsa.n.01"}, {"name": "saltshaker", "instance_count": 543, "def": "a shaker with a perforated top for sprinkling salt", "synonyms": ["saltshaker"], "image_count": 361, "id": 910, "frequency": "f", "synset": "saltshaker.n.01"}, {"name": "sandal_(type_of_shoe)", "instance_count": 3145, "def": "a shoe consisting of a sole fastened by straps to the foot", "synonyms": ["sandal_(type_of_shoe)"], "image_count": 1023, "id": 911, "frequency": "f", "synset": "sandal.n.01"}, {"name": "sandwich", "instance_count": 2315, "def": "two (or more) slices of bread with a filling between them", "synonyms": ["sandwich"], "image_count": 782, "id": 912, "frequency": "f", "synset": "sandwich.n.01"}, {"name": "satchel", "instance_count": 3, "def": "luggage consisting of a small case with a flat bottom and (usually) a shoulder strap", "synonyms": ["satchel"], "image_count": 2, "id": 913, "frequency": "r", "synset": "satchel.n.01"}, {"name": "saucepan", "instance_count": 26, "def": "a deep pan with a handle; used for stewing or boiling", "synonyms": ["saucepan"], "image_count": 5, "id": 914, "frequency": "r", "synset": "saucepan.n.01"}, {"name": "saucer", "instance_count": 555, "def": "a small shallow dish for holding a cup at the table", "synonyms": ["saucer"], "image_count": 247, "id": 915, "frequency": "f", "synset": "saucer.n.02"}, {"name": "sausage", "instance_count": 2704, "def": "highly seasoned minced meat stuffed in casings", "synonyms": ["sausage"], "image_count": 221, "id": 916, "frequency": "f", "synset": "sausage.n.01"}, {"name": "sawhorse", "instance_count": 5, "def": "a framework for holding wood that is being sawed", "synonyms": ["sawhorse", "sawbuck"], "image_count": 4, "id": 917, "frequency": "r", "synset": "sawhorse.n.01"}, {"name": "saxophone", "instance_count": 13, "def": "a wind instrument with a `J'-shaped form typically made of brass", "synonyms": ["saxophone"], "image_count": 8, "id": 918, "frequency": "r", "synset": "sax.n.02"}, {"name": "scale_(measuring_instrument)", "instance_count": 178, "def": "a measuring instrument for weighing; shows amount of mass", "synonyms": ["scale_(measuring_instrument)"], "image_count": 158, "id": 919, "frequency": "f", "synset": "scale.n.07"}, {"name": "scarecrow", "instance_count": 4, "def": "an effigy in the shape of a man to frighten birds away from seeds", "synonyms": ["scarecrow", "strawman"], "image_count": 3, "id": 920, "frequency": "r", "synset": "scarecrow.n.01"}, {"name": "scarf", "instance_count": 1310, "def": "a garment worn around the head or neck or shoulders for warmth or decoration", "synonyms": ["scarf"], "image_count": 752, "id": 921, "frequency": "f", "synset": "scarf.n.01"}, {"name": "school_bus", "instance_count": 142, "def": "a bus used to transport children to or from school", "synonyms": ["school_bus"], "image_count": 64, "id": 922, "frequency": "c", "synset": "school_bus.n.01"}, {"name": "scissors", "instance_count": 1376, "def": "a tool having two crossed pivoting blades with looped handles", "synonyms": ["scissors"], "image_count": 707, "id": 923, "frequency": "f", "synset": "scissors.n.01"}, {"name": "scoreboard", "instance_count": 161, "def": "a large board for displaying the score of a contest (and some other information)", "synonyms": ["scoreboard"], "image_count": 143, "id": 924, "frequency": "f", "synset": "scoreboard.n.01"}, {"name": "scraper", "instance_count": 1, "def": "any of various hand tools for scraping", "synonyms": ["scraper"], "image_count": 1, "id": 925, "frequency": "r", "synset": "scraper.n.01"}, {"name": "screwdriver", "instance_count": 88, "def": "a hand tool for driving screws; has a tip that fits into the head of a screw", "synonyms": ["screwdriver"], "image_count": 49, "id": 926, "frequency": "c", "synset": "screwdriver.n.01"}, {"name": "scrubbing_brush", "instance_count": 141, "def": "a brush with short stiff bristles for heavy cleaning", "synonyms": ["scrubbing_brush"], "image_count": 126, "id": 927, "frequency": "f", "synset": "scrub_brush.n.01"}, {"name": "sculpture", "instance_count": 202, "def": "a three-dimensional work of art", "synonyms": ["sculpture"], "image_count": 76, "id": 928, "frequency": "c", "synset": "sculpture.n.01"}, {"name": "seabird", "instance_count": 126, "def": "a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.", "synonyms": ["seabird", "seafowl"], "image_count": 11, "id": 929, "frequency": "c", "synset": "seabird.n.01"}, {"name": "seahorse", "instance_count": 23, "def": "small fish with horse-like heads bent sharply downward and curled tails", "synonyms": ["seahorse"], "image_count": 11, "id": 930, "frequency": "c", "synset": "seahorse.n.02"}, {"name": "seaplane", "instance_count": 4, "def": "an airplane that can land on or take off from water", "synonyms": ["seaplane", "hydroplane"], "image_count": 4, "id": 931, "frequency": "r", "synset": "seaplane.n.01"}, {"name": "seashell", "instance_count": 451, "def": "the shell of a marine organism", "synonyms": ["seashell"], "image_count": 39, "id": 932, "frequency": "c", "synset": "seashell.n.01"}, {"name": "sewing_machine", "instance_count": 11, "def": "a textile machine used as a home appliance for sewing", "synonyms": ["sewing_machine"], "image_count": 11, "id": 933, "frequency": "c", "synset": "sewing_machine.n.01"}, {"name": "shaker", "instance_count": 24, "def": "a container in which something can be shaken", "synonyms": ["shaker"], "image_count": 13, "id": 934, "frequency": "c", "synset": "shaker.n.03"}, {"name": "shampoo", "instance_count": 254, "def": "cleansing agent consisting of soaps or detergents used for washing the hair", "synonyms": ["shampoo"], "image_count": 91, "id": 935, "frequency": "c", "synset": "shampoo.n.01"}, {"name": "shark", "instance_count": 20, "def": "typically large carnivorous fishes with sharpe teeth", "synonyms": ["shark"], "image_count": 14, "id": 936, "frequency": "c", "synset": "shark.n.01"}, {"name": "sharpener", "instance_count": 7, "def": "any implement that is used to make something (an edge or a point) sharper", "synonyms": ["sharpener"], "image_count": 5, "id": 937, "frequency": "r", "synset": "sharpener.n.01"}, {"name": "Sharpie", "instance_count": 5, "def": "a pen with indelible ink that will write on any surface", "synonyms": ["Sharpie"], "image_count": 3, "id": 938, "frequency": "r", "synset": "sharpie.n.03"}, {"name": "shaver_(electric)", "instance_count": 12, "def": "a razor powered by an electric motor", "synonyms": ["shaver_(electric)", "electric_shaver", "electric_razor"], "image_count": 10, "id": 939, "frequency": "r", "synset": "shaver.n.03"}, {"name": "shaving_cream", "instance_count": 33, "def": "toiletry consisting that forms a rich lather for softening the beard before shaving", "synonyms": ["shaving_cream", "shaving_soap"], "image_count": 18, "id": 940, "frequency": "c", "synset": "shaving_cream.n.01"}, {"name": "shawl", "instance_count": 9, "def": "cloak consisting of an oblong piece of cloth used to cover the head and shoulders", "synonyms": ["shawl"], "image_count": 9, "id": 941, "frequency": "r", "synset": "shawl.n.01"}, {"name": "shears", "instance_count": 38, "def": "large scissors with strong blades", "synonyms": ["shears"], "image_count": 6, "id": 942, "frequency": "r", "synset": "shears.n.01"}, {"name": "sheep", "instance_count": 13304, "def": "woolly usually horned ruminant mammal related to the goat", "synonyms": ["sheep"], "image_count": 951, "id": 943, "frequency": "f", "synset": "sheep.n.01"}, {"name": "shepherd_dog", "instance_count": 2, "def": "any of various usually long-haired breeds of dog reared to herd and guard sheep", "synonyms": ["shepherd_dog", "sheepdog"], "image_count": 2, "id": 944, "frequency": "r", "synset": "shepherd_dog.n.01"}, {"name": "sherbert", "instance_count": 2, "def": "a frozen dessert made primarily of fruit juice and sugar", "synonyms": ["sherbert", "sherbet"], "image_count": 1, "id": 945, "frequency": "r", "synset": "sherbert.n.01"}, {"name": "shield", "instance_count": 41, "def": "armor carried on the arm to intercept blows", "synonyms": ["shield"], "image_count": 19, "id": 946, "frequency": "c", "synset": "shield.n.02"}, {"name": "shirt", "instance_count": 10177, "def": "a garment worn on the upper half of the body", "synonyms": ["shirt"], "image_count": 1942, "id": 947, "frequency": "f", "synset": "shirt.n.01"}, {"name": "shoe", "instance_count": 9374, "def": "common footwear covering the foot", "synonyms": ["shoe", "sneaker_(type_of_shoe)", "tennis_shoe"], "image_count": 1916, "id": 948, "frequency": "f", "synset": "shoe.n.01"}, {"name": "shopping_bag", "instance_count": 377, "def": "a bag made of plastic or strong paper (often with handles); used to transport goods after shopping", "synonyms": ["shopping_bag"], "image_count": 139, "id": 949, "frequency": "f", "synset": "shopping_bag.n.01"}, {"name": "shopping_cart", "instance_count": 90, "def": "a handcart that holds groceries or other goods while shopping", "synonyms": ["shopping_cart"], "image_count": 43, "id": 950, "frequency": "c", "synset": "shopping_cart.n.01"}, {"name": "short_pants", "instance_count": 5305, "def": "trousers that end at or above the knee", "synonyms": ["short_pants", "shorts_(clothing)", "trunks_(clothing)"], "image_count": 1969, "id": 951, "frequency": "f", "synset": "short_pants.n.01"}, {"name": "shot_glass", "instance_count": 24, "def": "a small glass adequate to hold a single swallow of whiskey", "synonyms": ["shot_glass"], "image_count": 5, "id": 952, "frequency": "r", "synset": "shot_glass.n.01"}, {"name": "shoulder_bag", "instance_count": 331, "def": "a large handbag that can be carried by a strap looped over the shoulder", "synonyms": ["shoulder_bag"], "image_count": 134, "id": 953, "frequency": "f", "synset": "shoulder_bag.n.01"}, {"name": "shovel", "instance_count": 110, "def": "a hand tool for lifting loose material such as snow, dirt, etc.", "synonyms": ["shovel"], "image_count": 74, "id": 954, "frequency": "c", "synset": "shovel.n.01"}, {"name": "shower_head", "instance_count": 450, "def": "a plumbing fixture that sprays water over you", "synonyms": ["shower_head"], "image_count": 381, "id": 955, "frequency": "f", "synset": "shower.n.01"}, {"name": "shower_cap", "instance_count": 1, "def": "a tight cap worn to keep hair dry while showering", "synonyms": ["shower_cap"], "image_count": 1, "id": 956, "frequency": "r", "synset": "shower_cap.n.01"}, {"name": "shower_curtain", "instance_count": 479, "def": "a curtain that keeps water from splashing out of the shower area", "synonyms": ["shower_curtain"], "image_count": 381, "id": 957, "frequency": "f", "synset": "shower_curtain.n.01"}, {"name": "shredder_(for_paper)", "instance_count": 6, "def": "a device that shreds documents", "synonyms": ["shredder_(for_paper)"], "image_count": 6, "id": 958, "frequency": "r", "synset": "shredder.n.01"}, {"name": "signboard", "instance_count": 8091, "def": "structure displaying a board on which advertisements can be posted", "synonyms": ["signboard"], "image_count": 1826, "id": 959, "frequency": "f", "synset": "signboard.n.01"}, {"name": "silo", "instance_count": 95, "def": "a cylindrical tower used for storing goods", "synonyms": ["silo"], "image_count": 28, "id": 960, "frequency": "c", "synset": "silo.n.01"}, {"name": "sink", "instance_count": 2182, "def": "plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe", "synonyms": ["sink"], "image_count": 1635, "id": 961, "frequency": "f", "synset": "sink.n.01"}, {"name": "skateboard", "instance_count": 3597, "def": "a board with wheels that is ridden in a standing or crouching position and propelled by foot", "synonyms": ["skateboard"], "image_count": 1967, "id": 962, "frequency": "f", "synset": "skateboard.n.01"}, {"name": "skewer", "instance_count": 81, "def": "a long pin for holding meat in position while it is being roasted", "synonyms": ["skewer"], "image_count": 16, "id": 963, "frequency": "c", "synset": "skewer.n.01"}, {"name": "ski", "instance_count": 8496, "def": "sports equipment for skiing on snow", "synonyms": ["ski"], "image_count": 1926, "id": 964, "frequency": "f", "synset": "ski.n.01"}, {"name": "ski_boot", "instance_count": 8124, "def": "a stiff boot that is fastened to a ski with a ski binding", "synonyms": ["ski_boot"], "image_count": 1789, "id": 965, "frequency": "f", "synset": "ski_boot.n.01"}, {"name": "ski_parka", "instance_count": 1727, "def": "a parka to be worn while skiing", "synonyms": ["ski_parka", "ski_jacket"], "image_count": 401, "id": 966, "frequency": "f", "synset": "ski_parka.n.01"}, {"name": "ski_pole", "instance_count": 8263, "def": "a pole with metal points used as an aid in skiing", "synonyms": ["ski_pole"], "image_count": 1968, "id": 967, "frequency": "f", "synset": "ski_pole.n.01"}, {"name": "skirt", "instance_count": 1784, "def": "a garment hanging from the waist; worn mainly by girls and women", "synonyms": ["skirt"], "image_count": 1167, "id": 968, "frequency": "f", "synset": "skirt.n.02"}, {"name": "skullcap", "instance_count": 1, "def": "rounded brimless cap fitting the crown of the head", "synonyms": ["skullcap"], "image_count": 1, "id": 969, "frequency": "r", "synset": "skullcap.n.01"}, {"name": "sled", "instance_count": 102, "def": "a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.", "synonyms": ["sled", "sledge", "sleigh"], "image_count": 56, "id": 970, "frequency": "c", "synset": "sled.n.01"}, {"name": "sleeping_bag", "instance_count": 33, "def": "large padded bag designed to be slept in outdoors", "synonyms": ["sleeping_bag"], "image_count": 17, "id": 971, "frequency": "c", "synset": "sleeping_bag.n.01"}, {"name": "sling_(bandage)", "instance_count": 1, "def": "bandage to support an injured forearm; slung over the shoulder or neck", "synonyms": ["sling_(bandage)", "triangular_bandage"], "image_count": 1, "id": 972, "frequency": "r", "synset": "sling.n.05"}, {"name": "slipper_(footwear)", "instance_count": 121, "def": "low footwear that can be slipped on and off easily; usually worn indoors", "synonyms": ["slipper_(footwear)", "carpet_slipper_(footwear)"], "image_count": 58, "id": 973, "frequency": "c", "synset": "slipper.n.01"}, {"name": "smoothie", "instance_count": 53, "def": "a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk", "synonyms": ["smoothie"], "image_count": 9, "id": 974, "frequency": "r", "synset": "smoothie.n.02"}, {"name": "snake", "instance_count": 16, "def": "limbless scaly elongate reptile; some are venomous", "synonyms": ["snake", "serpent"], "image_count": 8, "id": 975, "frequency": "r", "synset": "snake.n.01"}, {"name": "snowboard", "instance_count": 2119, "def": "a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes", "synonyms": ["snowboard"], "image_count": 1124, "id": 976, "frequency": "f", "synset": "snowboard.n.01"}, {"name": "snowman", "instance_count": 61, "def": "a figure of a person made of packed snow", "synonyms": ["snowman"], "image_count": 31, "id": 977, "frequency": "c", "synset": "snowman.n.01"}, {"name": "snowmobile", "instance_count": 23, "def": "tracked vehicle for travel on snow having skis in front", "synonyms": ["snowmobile"], "image_count": 16, "id": 978, "frequency": "c", "synset": "snowmobile.n.01"}, {"name": "soap", "instance_count": 895, "def": "a cleansing agent made from the salts of vegetable or animal fats", "synonyms": ["soap"], "image_count": 491, "id": 979, "frequency": "f", "synset": "soap.n.01"}, {"name": "soccer_ball", "instance_count": 670, "def": "an inflated ball used in playing soccer (called `football' outside of the United States)", "synonyms": ["soccer_ball"], "image_count": 432, "id": 980, "frequency": "f", "synset": "soccer_ball.n.01"}, {"name": "sock", "instance_count": 6866, "def": "cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee", "synonyms": ["sock"], "image_count": 1945, "id": 981, "frequency": "f", "synset": "sock.n.01"}, {"name": "sofa", "instance_count": 2408, "def": "an upholstered seat for more than one person", "synonyms": ["sofa", "couch", "lounge"], "image_count": 1899, "id": 982, "frequency": "f", "synset": "sofa.n.01"}, {"name": "softball", "instance_count": 5, "def": "ball used in playing softball", "synonyms": ["softball"], "image_count": 5, "id": 983, "frequency": "r", "synset": "softball.n.01"}, {"name": "solar_array", "instance_count": 52, "def": "electrical device consisting of a large array of connected solar cells", "synonyms": ["solar_array", "solar_battery", "solar_panel"], "image_count": 28, "id": 984, "frequency": "c", "synset": "solar_array.n.01"}, {"name": "sombrero", "instance_count": 22, "def": "a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico", "synonyms": ["sombrero"], "image_count": 7, "id": 985, "frequency": "r", "synset": "sombrero.n.02"}, {"name": "soup", "instance_count": 193, "def": "liquid food especially of meat or fish or vegetable stock often containing pieces of solid food", "synonyms": ["soup"], "image_count": 146, "id": 986, "frequency": "f", "synset": "soup.n.01"}, {"name": "soup_bowl", "instance_count": 2, "def": "a bowl for serving soup", "synonyms": ["soup_bowl"], "image_count": 1, "id": 987, "frequency": "r", "synset": "soup_bowl.n.01"}, {"name": "soupspoon", "instance_count": 44, "def": "a spoon with a rounded bowl for eating soup", "synonyms": ["soupspoon"], "image_count": 25, "id": 988, "frequency": "c", "synset": "soupspoon.n.01"}, {"name": "sour_cream", "instance_count": 49, "def": "soured light cream", "synonyms": ["sour_cream", "soured_cream"], "image_count": 22, "id": 989, "frequency": "c", "synset": "sour_cream.n.01"}, {"name": "soya_milk", "instance_count": 2, "def": "a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu", "synonyms": ["soya_milk", "soybean_milk", "soymilk"], "image_count": 1, "id": 990, "frequency": "r", "synset": "soya_milk.n.01"}, {"name": "space_shuttle", "instance_count": 10, "def": "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", "synonyms": ["space_shuttle"], "image_count": 10, "id": 991, "frequency": "r", "synset": "space_shuttle.n.01"}, {"name": "sparkler_(fireworks)", "instance_count": 12, "def": "a firework that burns slowly and throws out a shower of sparks", "synonyms": ["sparkler_(fireworks)"], "image_count": 9, "id": 992, "frequency": "r", "synset": "sparkler.n.02"}, {"name": "spatula", "instance_count": 508, "def": "a hand tool with a thin flexible blade used to mix or spread soft substances", "synonyms": ["spatula"], "image_count": 308, "id": 993, "frequency": "f", "synset": "spatula.n.02"}, {"name": "spear", "instance_count": 9, "def": "a long pointed rod used as a tool or weapon", "synonyms": ["spear", "lance"], "image_count": 4, "id": 994, "frequency": "r", "synset": "spear.n.01"}, {"name": "spectacles", "instance_count": 3040, "def": "optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision", "synonyms": ["spectacles", "specs", "eyeglasses", "glasses"], "image_count": 1969, "id": 995, "frequency": "f", "synset": "spectacles.n.01"}, {"name": "spice_rack", "instance_count": 54, "def": "a rack for displaying containers filled with spices", "synonyms": ["spice_rack"], "image_count": 45, "id": 996, "frequency": "c", "synset": "spice_rack.n.01"}, {"name": "spider", "instance_count": 19, "def": "predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body", "synonyms": ["spider"], "image_count": 12, "id": 997, "frequency": "c", "synset": "spider.n.01"}, {"name": "crawfish", "instance_count": 5, "def": "large edible marine crustacean having a spiny carapace but lacking the large pincers of true lobsters", "synonyms": ["crawfish", "crayfish"], "image_count": 1, "id": 998, "frequency": "r", "synset": "spiny_lobster.n.02"}, {"name": "sponge", "instance_count": 116, "def": "a porous mass usable to absorb water typically used for cleaning", "synonyms": ["sponge"], "image_count": 85, "id": 999, "frequency": "c", "synset": "sponge.n.01"}, {"name": "spoon", "instance_count": 2111, "def": "a piece of cutlery with a shallow bowl-shaped container and a handle", "synonyms": ["spoon"], "image_count": 1127, "id": 1000, "frequency": "f", "synset": "spoon.n.01"}, {"name": "sportswear", "instance_count": 85, "def": "attire worn for sport or for casual wear", "synonyms": ["sportswear", "athletic_wear", "activewear"], "image_count": 11, "id": 1001, "frequency": "c", "synset": "sportswear.n.01"}, {"name": "spotlight", "instance_count": 403, "def": "a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer", "synonyms": ["spotlight"], "image_count": 60, "id": 1002, "frequency": "c", "synset": "spotlight.n.02"}, {"name": "squid_(food)", "instance_count": 6, "def": "(Italian cuisine) squid prepared as food", "synonyms": ["squid_(food)", "calamari", "calamary"], "image_count": 1, "id": 1003, "frequency": "r", "synset": "squid.n.01"}, {"name": "squirrel", "instance_count": 19, "def": "a kind of arboreal rodent having a long bushy tail", "synonyms": ["squirrel"], "image_count": 16, "id": 1004, "frequency": "c", "synset": "squirrel.n.01"}, {"name": "stagecoach", "instance_count": 1, "def": "a large coach-and-four formerly used to carry passengers and mail on regular routes between towns", "synonyms": ["stagecoach"], "image_count": 1, "id": 1005, "frequency": "r", "synset": "stagecoach.n.01"}, {"name": "stapler_(stapling_machine)", "instance_count": 68, "def": "a machine that inserts staples into sheets of paper in order to fasten them together", "synonyms": ["stapler_(stapling_machine)"], "image_count": 65, "id": 1006, "frequency": "c", "synset": "stapler.n.01"}, {"name": "starfish", "instance_count": 28, "def": "echinoderms characterized by five arms extending from a central disk", "synonyms": ["starfish", "sea_star"], "image_count": 13, "id": 1007, "frequency": "c", "synset": "starfish.n.01"}, {"name": "statue_(sculpture)", "instance_count": 1934, "def": "a sculpture representing a human or animal", "synonyms": ["statue_(sculpture)"], "image_count": 655, "id": 1008, "frequency": "f", "synset": "statue.n.01"}, {"name": "steak_(food)", "instance_count": 139, "def": "a slice of meat cut from the fleshy part of an animal or large fish", "synonyms": ["steak_(food)"], "image_count": 51, "id": 1009, "frequency": "c", "synset": "steak.n.01"}, {"name": "steak_knife", "instance_count": 1, "def": "a sharp table knife used in eating steak", "synonyms": ["steak_knife"], "image_count": 1, "id": 1010, "frequency": "r", "synset": "steak_knife.n.01"}, {"name": "steering_wheel", "instance_count": 901, "def": "a handwheel that is used for steering", "synonyms": ["steering_wheel"], "image_count": 673, "id": 1011, "frequency": "f", "synset": "steering_wheel.n.01"}, {"name": "stepladder", "instance_count": 5, "def": "a folding portable ladder hinged at the top", "synonyms": ["stepladder"], "image_count": 5, "id": 1012, "frequency": "r", "synset": "step_ladder.n.01"}, {"name": "step_stool", "instance_count": 43, "def": "a stool that has one or two steps that fold under the seat", "synonyms": ["step_stool"], "image_count": 36, "id": 1013, "frequency": "c", "synset": "step_stool.n.01"}, {"name": "stereo_(sound_system)", "instance_count": 77, "def": "electronic device for playing audio", "synonyms": ["stereo_(sound_system)"], "image_count": 54, "id": 1014, "frequency": "c", "synset": "stereo.n.01"}, {"name": "stew", "instance_count": 7, "def": "food prepared by stewing especially meat or fish with vegetables", "synonyms": ["stew"], "image_count": 5, "id": 1015, "frequency": "r", "synset": "stew.n.02"}, {"name": "stirrer", "instance_count": 18, "def": "an implement used for stirring", "synonyms": ["stirrer"], "image_count": 8, "id": 1016, "frequency": "r", "synset": "stirrer.n.02"}, {"name": "stirrup", "instance_count": 625, "def": "support consisting of metal loops into which rider's feet go", "synonyms": ["stirrup"], "image_count": 305, "id": 1017, "frequency": "f", "synset": "stirrup.n.01"}, {"name": "stool", "instance_count": 583, "def": "a simple seat without a back or arms", "synonyms": ["stool"], "image_count": 297, "id": 1018, "frequency": "f", "synset": "stool.n.01"}, {"name": "stop_sign", "instance_count": 1349, "def": "a traffic sign to notify drivers that they must come to a complete stop", "synonyms": ["stop_sign"], "image_count": 1053, "id": 1019, "frequency": "f", "synset": "stop_sign.n.01"}, {"name": "brake_light", "instance_count": 1334, "def": "a red light on the rear of a motor vehicle that signals when the brakes are applied", "synonyms": ["brake_light"], "image_count": 223, "id": 1020, "frequency": "f", "synset": "stoplight.n.01"}, {"name": "stove", "instance_count": 1133, "def": "a kitchen appliance used for cooking food", "synonyms": ["stove", "kitchen_stove", "range_(kitchen_appliance)", "kitchen_range", "cooking_stove"], "image_count": 1037, "id": 1021, "frequency": "f", "synset": "stove.n.01"}, {"name": "strainer", "instance_count": 99, "def": "a filter to retain larger pieces while smaller pieces and liquids pass through", "synonyms": ["strainer"], "image_count": 63, "id": 1022, "frequency": "c", "synset": "strainer.n.01"}, {"name": "strap", "instance_count": 7435, "def": "an elongated strip of material for binding things together or holding", "synonyms": ["strap"], "image_count": 1881, "id": 1023, "frequency": "f", "synset": "strap.n.01"}, {"name": "straw_(for_drinking)", "instance_count": 1154, "def": "a thin paper or plastic tube used to suck liquids into the mouth", "synonyms": ["straw_(for_drinking)", "drinking_straw"], "image_count": 507, "id": 1024, "frequency": "f", "synset": "straw.n.04"}, {"name": "strawberry", "instance_count": 4386, "def": "sweet fleshy red fruit", "synonyms": ["strawberry"], "image_count": 333, "id": 1025, "frequency": "f", "synset": "strawberry.n.01"}, {"name": "street_sign", "instance_count": 8350, "def": "a sign visible from the street", "synonyms": ["street_sign"], "image_count": 1911, "id": 1026, "frequency": "f", "synset": "street_sign.n.01"}, {"name": "streetlight", "instance_count": 7381, "def": "a lamp supported on a lamppost; for illuminating a street", "synonyms": ["streetlight", "street_lamp"], "image_count": 1765, "id": 1027, "frequency": "f", "synset": "streetlight.n.01"}, {"name": "string_cheese", "instance_count": 1, "def": "cheese formed in long strings twisted together", "synonyms": ["string_cheese"], "image_count": 1, "id": 1028, "frequency": "r", "synset": "string_cheese.n.01"}, {"name": "stylus", "instance_count": 11, "def": "a pointed tool for writing or drawing or engraving, including pens", "synonyms": ["stylus"], "image_count": 5, "id": 1029, "frequency": "r", "synset": "stylus.n.02"}, {"name": "subwoofer", "instance_count": 1, "def": "a loudspeaker that is designed to reproduce very low bass frequencies", "synonyms": ["subwoofer"], "image_count": 1, "id": 1030, "frequency": "r", "synset": "subwoofer.n.01"}, {"name": "sugar_bowl", "instance_count": 10, "def": "a dish in which sugar is served", "synonyms": ["sugar_bowl"], "image_count": 9, "id": 1031, "frequency": "r", "synset": "sugar_bowl.n.01"}, {"name": "sugarcane_(plant)", "instance_count": 31, "def": "juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice", "synonyms": ["sugarcane_(plant)"], "image_count": 2, "id": 1032, "frequency": "r", "synset": "sugarcane.n.01"}, {"name": "suit_(clothing)", "instance_count": 461, "def": "a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color", "synonyms": ["suit_(clothing)"], "image_count": 151, "id": 1033, "frequency": "f", "synset": "suit.n.01"}, {"name": "sunflower", "instance_count": 618, "def": "any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays", "synonyms": ["sunflower"], "image_count": 82, "id": 1034, "frequency": "c", "synset": "sunflower.n.01"}, {"name": "sunglasses", "instance_count": 5603, "def": "spectacles that are darkened or polarized to protect the eyes from the glare of the sun", "synonyms": ["sunglasses"], "image_count": 1931, "id": 1035, "frequency": "f", "synset": "sunglasses.n.01"}, {"name": "sunhat", "instance_count": 170, "def": "a hat with a broad brim that protects the face from direct exposure to the sun", "synonyms": ["sunhat"], "image_count": 41, "id": 1036, "frequency": "c", "synset": "sunhat.n.01"}, {"name": "surfboard", "instance_count": 3835, "def": "a narrow buoyant board for riding surf", "synonyms": ["surfboard"], "image_count": 1895, "id": 1037, "frequency": "f", "synset": "surfboard.n.01"}, {"name": "sushi", "instance_count": 337, "def": "rice (with raw fish) wrapped in seaweed", "synonyms": ["sushi"], "image_count": 24, "id": 1038, "frequency": "c", "synset": "sushi.n.01"}, {"name": "mop", "instance_count": 22, "def": "cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors", "synonyms": ["mop"], "image_count": 22, "id": 1039, "frequency": "c", "synset": "swab.n.02"}, {"name": "sweat_pants", "instance_count": 56, "def": "loose-fitting trousers with elastic cuffs; worn by athletes", "synonyms": ["sweat_pants"], "image_count": 35, "id": 1040, "frequency": "c", "synset": "sweat_pants.n.01"}, {"name": "sweatband", "instance_count": 145, "def": "a band of material tied around the forehead or wrist to absorb sweat", "synonyms": ["sweatband"], "image_count": 69, "id": 1041, "frequency": "c", "synset": "sweatband.n.02"}, {"name": "sweater", "instance_count": 1894, "def": "a crocheted or knitted garment covering the upper part of the body", "synonyms": ["sweater"], "image_count": 962, "id": 1042, "frequency": "f", "synset": "sweater.n.01"}, {"name": "sweatshirt", "instance_count": 1482, "def": "cotton knit pullover with long sleeves worn during athletic activity", "synonyms": ["sweatshirt"], "image_count": 588, "id": 1043, "frequency": "f", "synset": "sweatshirt.n.01"}, {"name": "sweet_potato", "instance_count": 137, "def": "the edible tuberous root of the sweet potato vine", "synonyms": ["sweet_potato"], "image_count": 21, "id": 1044, "frequency": "c", "synset": "sweet_potato.n.02"}, {"name": "swimsuit", "instance_count": 3141, "def": "garment worn for swimming", "synonyms": ["swimsuit", "swimwear", "bathing_suit", "swimming_costume", "bathing_costume", "swimming_trunks", "bathing_trunks"], "image_count": 825, "id": 1045, "frequency": "f", "synset": "swimsuit.n.01"}, {"name": "sword", "instance_count": 72, "def": "a cutting or thrusting weapon that has a long metal blade", "synonyms": ["sword"], "image_count": 52, "id": 1046, "frequency": "c", "synset": "sword.n.01"}, {"name": "syringe", "instance_count": 14, "def": "a medical instrument used to inject or withdraw fluids", "synonyms": ["syringe"], "image_count": 5, "id": 1047, "frequency": "r", "synset": "syringe.n.01"}, {"name": "Tabasco_sauce", "instance_count": 5, "def": "very spicy sauce (trade name Tabasco) made from fully-aged red peppers", "synonyms": ["Tabasco_sauce"], "image_count": 5, "id": 1048, "frequency": "r", "synset": "tabasco.n.02"}, {"name": "table-tennis_table", "instance_count": 5, "def": "a table used for playing table tennis", "synonyms": ["table-tennis_table", "ping-pong_table"], "image_count": 5, "id": 1049, "frequency": "r", "synset": "table-tennis_table.n.01"}, {"name": "table", "instance_count": 2804, "def": "a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs", "synonyms": ["table"], "image_count": 1860, "id": 1050, "frequency": "f", "synset": "table.n.02"}, {"name": "table_lamp", "instance_count": 81, "def": "a lamp that sits on a table", "synonyms": ["table_lamp"], "image_count": 56, "id": 1051, "frequency": "c", "synset": "table_lamp.n.01"}, {"name": "tablecloth", "instance_count": 2496, "def": "a covering spread over a dining table", "synonyms": ["tablecloth"], "image_count": 1582, "id": 1052, "frequency": "f", "synset": "tablecloth.n.01"}, {"name": "tachometer", "instance_count": 10, "def": "measuring instrument for indicating speed of rotation", "synonyms": ["tachometer"], "image_count": 7, "id": 1053, "frequency": "r", "synset": "tachometer.n.01"}, {"name": "taco", "instance_count": 21, "def": "a small tortilla cupped around a filling", "synonyms": ["taco"], "image_count": 2, "id": 1054, "frequency": "r", "synset": "taco.n.02"}, {"name": "tag", "instance_count": 7550, "def": "a label associated with something for the purpose of identification or information", "synonyms": ["tag"], "image_count": 1562, "id": 1055, "frequency": "f", "synset": "tag.n.02"}, {"name": "taillight", "instance_count": 9222, "def": "lamp (usually red) mounted at the rear of a motor vehicle", "synonyms": ["taillight", "rear_light"], "image_count": 1885, "id": 1056, "frequency": "f", "synset": "taillight.n.01"}, {"name": "tambourine", "instance_count": 1, "def": "a shallow drum with a single drumhead and with metallic disks in the sides", "synonyms": ["tambourine"], "image_count": 1, "id": 1057, "frequency": "r", "synset": "tambourine.n.01"}, {"name": "army_tank", "instance_count": 7, "def": "an enclosed armored military vehicle; has a cannon and moves on caterpillar treads", "synonyms": ["army_tank", "armored_combat_vehicle", "armoured_combat_vehicle"], "image_count": 5, "id": 1058, "frequency": "r", "synset": "tank.n.01"}, {"name": "tank_(storage_vessel)", "instance_count": 304, "def": "a large (usually metallic) vessel for holding gases or liquids", "synonyms": ["tank_(storage_vessel)", "storage_tank"], "image_count": 137, "id": 1059, "frequency": "f", "synset": "tank.n.02"}, {"name": "tank_top_(clothing)", "instance_count": 1799, "def": "a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening", "synonyms": ["tank_top_(clothing)"], "image_count": 1094, "id": 1060, "frequency": "f", "synset": "tank_top.n.01"}, {"name": "tape_(sticky_cloth_or_paper)", "instance_count": 560, "def": "a long thin piece of cloth or paper as used for binding or fastening", "synonyms": ["tape_(sticky_cloth_or_paper)"], "image_count": 134, "id": 1061, "frequency": "f", "synset": "tape.n.01"}, {"name": "tape_measure", "instance_count": 35, "def": "measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths", "synonyms": ["tape_measure", "measuring_tape"], "image_count": 29, "id": 1062, "frequency": "c", "synset": "tape.n.04"}, {"name": "tapestry", "instance_count": 29, "def": "a heavy textile with a woven design; used for curtains and upholstery", "synonyms": ["tapestry"], "image_count": 22, "id": 1063, "frequency": "c", "synset": "tapestry.n.02"}, {"name": "tarp", "instance_count": 1315, "def": "waterproofed canvas", "synonyms": ["tarp"], "image_count": 522, "id": 1064, "frequency": "f", "synset": "tarpaulin.n.01"}, {"name": "tartan", "instance_count": 68, "def": "a cloth having a crisscross design", "synonyms": ["tartan", "plaid"], "image_count": 50, "id": 1065, "frequency": "c", "synset": "tartan.n.01"}, {"name": "tassel", "instance_count": 276, "def": "adornment consisting of a bunch of cords fastened at one end", "synonyms": ["tassel"], "image_count": 68, "id": 1066, "frequency": "c", "synset": "tassel.n.01"}, {"name": "tea_bag", "instance_count": 42, "def": "a measured amount of tea in a bag for an individual serving of tea", "synonyms": ["tea_bag"], "image_count": 16, "id": 1067, "frequency": "c", "synset": "tea_bag.n.01"}, {"name": "teacup", "instance_count": 152, "def": "a cup from which tea is drunk", "synonyms": ["teacup"], "image_count": 40, "id": 1068, "frequency": "c", "synset": "teacup.n.02"}, {"name": "teakettle", "instance_count": 40, "def": "kettle for boiling water to make tea", "synonyms": ["teakettle"], "image_count": 35, "id": 1069, "frequency": "c", "synset": "teakettle.n.01"}, {"name": "teapot", "instance_count": 209, "def": "pot for brewing tea; usually has a spout and handle", "synonyms": ["teapot"], "image_count": 135, "id": 1070, "frequency": "f", "synset": "teapot.n.01"}, {"name": "teddy_bear", "instance_count": 4886, "def": "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", "synonyms": ["teddy_bear"], "image_count": 1413, "id": 1071, "frequency": "f", "synset": "teddy.n.01"}, {"name": "telephone", "instance_count": 945, "def": "electronic device for communicating by voice over long distances (includes wired and wireless/cell phones)", "synonyms": ["telephone", "phone", "telephone_set"], "image_count": 772, "id": 1072, "frequency": "f", "synset": "telephone.n.01"}, {"name": "telephone_booth", "instance_count": 62, "def": "booth for using a telephone", "synonyms": ["telephone_booth", "phone_booth", "call_box", "telephone_box", "telephone_kiosk"], "image_count": 50, "id": 1073, "frequency": "c", "synset": "telephone_booth.n.01"}, {"name": "telephone_pole", "instance_count": 3725, "def": "tall pole supporting telephone wires", "synonyms": ["telephone_pole", "telegraph_pole", "telegraph_post"], "image_count": 1015, "id": 1074, "frequency": "f", "synset": "telephone_pole.n.01"}, {"name": "telephoto_lens", "instance_count": 1, "def": "a camera lens that magnifies the image", "synonyms": ["telephoto_lens", "zoom_lens"], "image_count": 1, "id": 1075, "frequency": "r", "synset": "telephoto_lens.n.01"}, {"name": "television_camera", "instance_count": 117, "def": "television equipment for capturing and recording video", "synonyms": ["television_camera", "tv_camera"], "image_count": 65, "id": 1076, "frequency": "c", "synset": "television_camera.n.01"}, {"name": "television_set", "instance_count": 2205, "def": "an electronic device that receives television signals and displays them on a screen", "synonyms": ["television_set", "tv", "tv_set"], "image_count": 1900, "id": 1077, "frequency": "f", "synset": "television_receiver.n.01"}, {"name": "tennis_ball", "instance_count": 2835, "def": "ball about the size of a fist used in playing tennis", "synonyms": ["tennis_ball"], "image_count": 1302, "id": 1078, "frequency": "f", "synset": "tennis_ball.n.01"}, {"name": "tennis_racket", "instance_count": 3035, "def": "a racket used to play tennis", "synonyms": ["tennis_racket"], "image_count": 1977, "id": 1079, "frequency": "f", "synset": "tennis_racket.n.01"}, {"name": "tequila", "instance_count": 2, "def": "Mexican liquor made from fermented juices of an agave plant", "synonyms": ["tequila"], "image_count": 2, "id": 1080, "frequency": "r", "synset": "tequila.n.01"}, {"name": "thermometer", "instance_count": 33, "def": "measuring instrument for measuring temperature", "synonyms": ["thermometer"], "image_count": 29, "id": 1081, "frequency": "c", "synset": "thermometer.n.01"}, {"name": "thermos_bottle", "instance_count": 49, "def": "vacuum flask that preserves temperature of hot or cold drinks", "synonyms": ["thermos_bottle"], "image_count": 36, "id": 1082, "frequency": "c", "synset": "thermos.n.01"}, {"name": "thermostat", "instance_count": 153, "def": "a regulator for automatically regulating temperature by starting or stopping the supply of heat", "synonyms": ["thermostat"], "image_count": 138, "id": 1083, "frequency": "f", "synset": "thermostat.n.01"}, {"name": "thimble", "instance_count": 6, "def": "a small metal cap to protect the finger while sewing; can be used as a small container", "synonyms": ["thimble"], "image_count": 4, "id": 1084, "frequency": "r", "synset": "thimble.n.02"}, {"name": "thread", "instance_count": 320, "def": "a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving", "synonyms": ["thread", "yarn"], "image_count": 67, "id": 1085, "frequency": "c", "synset": "thread.n.01"}, {"name": "thumbtack", "instance_count": 224, "def": "a tack for attaching papers to a bulletin board or drawing board", "synonyms": ["thumbtack", "drawing_pin", "pushpin"], "image_count": 26, "id": 1086, "frequency": "c", "synset": "thumbtack.n.01"}, {"name": "tiara", "instance_count": 31, "def": "a jeweled headdress worn by women on formal occasions", "synonyms": ["tiara"], "image_count": 25, "id": 1087, "frequency": "c", "synset": "tiara.n.01"}, {"name": "tiger", "instance_count": 67, "def": "large feline of forests in most of Asia having a tawny coat with black stripes", "synonyms": ["tiger"], "image_count": 33, "id": 1088, "frequency": "c", "synset": "tiger.n.02"}, {"name": "tights_(clothing)", "instance_count": 45, "def": "skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls", "synonyms": ["tights_(clothing)", "leotards"], "image_count": 37, "id": 1089, "frequency": "c", "synset": "tights.n.01"}, {"name": "timer", "instance_count": 62, "def": "a timepiece that measures a time interval and signals its end", "synonyms": ["timer", "stopwatch"], "image_count": 50, "id": 1090, "frequency": "c", "synset": "timer.n.01"}, {"name": "tinfoil", "instance_count": 421, "def": "foil made of tin or an alloy of tin and lead", "synonyms": ["tinfoil"], "image_count": 270, "id": 1091, "frequency": "f", "synset": "tinfoil.n.01"}, {"name": "tinsel", "instance_count": 70, "def": "a showy decoration that is basically valueless", "synonyms": ["tinsel"], "image_count": 12, "id": 1092, "frequency": "c", "synset": "tinsel.n.01"}, {"name": "tissue_paper", "instance_count": 587, "def": "a soft thin (usually translucent) paper", "synonyms": ["tissue_paper"], "image_count": 316, "id": 1093, "frequency": "f", "synset": "tissue.n.02"}, {"name": "toast_(food)", "instance_count": 125, "def": "slice of bread that has been toasted", "synonyms": ["toast_(food)"], "image_count": 41, "id": 1094, "frequency": "c", "synset": "toast.n.01"}, {"name": "toaster", "instance_count": 240, "def": "a kitchen appliance (usually electric) for toasting bread", "synonyms": ["toaster"], "image_count": 224, "id": 1095, "frequency": "f", "synset": "toaster.n.02"}, {"name": "toaster_oven", "instance_count": 114, "def": "kitchen appliance consisting of a small electric oven for toasting or warming food", "synonyms": ["toaster_oven"], "image_count": 105, "id": 1096, "frequency": "f", "synset": "toaster_oven.n.01"}, {"name": "toilet", "instance_count": 2295, "def": "a plumbing fixture for defecation and urination", "synonyms": ["toilet"], "image_count": 1925, "id": 1097, "frequency": "f", "synset": "toilet.n.02"}, {"name": "toilet_tissue", "instance_count": 1683, "def": "a soft thin absorbent paper for use in toilets", "synonyms": ["toilet_tissue", "toilet_paper", "bathroom_tissue"], "image_count": 1021, "id": 1098, "frequency": "f", "synset": "toilet_tissue.n.01"}, {"name": "tomato", "instance_count": 12338, "def": "mildly acid red or yellow pulpy fruit eaten as a vegetable", "synonyms": ["tomato"], "image_count": 1213, "id": 1099, "frequency": "f", "synset": "tomato.n.01"}, {"name": "tongs", "instance_count": 294, "def": "any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below", "synonyms": ["tongs"], "image_count": 172, "id": 1100, "frequency": "f", "synset": "tongs.n.01"}, {"name": "toolbox", "instance_count": 39, "def": "a box or chest or cabinet for holding hand tools", "synonyms": ["toolbox"], "image_count": 28, "id": 1101, "frequency": "c", "synset": "toolbox.n.01"}, {"name": "toothbrush", "instance_count": 1683, "def": "small brush; has long handle; used to clean teeth", "synonyms": ["toothbrush"], "image_count": 745, "id": 1102, "frequency": "f", "synset": "toothbrush.n.01"}, {"name": "toothpaste", "instance_count": 326, "def": "a dentifrice in the form of a paste", "synonyms": ["toothpaste"], "image_count": 187, "id": 1103, "frequency": "f", "synset": "toothpaste.n.01"}, {"name": "toothpick", "instance_count": 423, "def": "pick consisting of a small strip of wood or plastic; used to pick food from between the teeth", "synonyms": ["toothpick"], "image_count": 147, "id": 1104, "frequency": "f", "synset": "toothpick.n.01"}, {"name": "cover", "instance_count": 306, "def": "covering for a hole (especially a hole in the top of a container)", "synonyms": ["cover"], "image_count": 136, "id": 1105, "frequency": "f", "synset": "top.n.09"}, {"name": "tortilla", "instance_count": 135, "def": "thin unleavened pancake made from cornmeal or wheat flour", "synonyms": ["tortilla"], "image_count": 34, "id": 1106, "frequency": "c", "synset": "tortilla.n.01"}, {"name": "tow_truck", "instance_count": 45, "def": "a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)", "synonyms": ["tow_truck"], "image_count": 41, "id": 1107, "frequency": "c", "synset": "tow_truck.n.01"}, {"name": "towel", "instance_count": 2212, "def": "a rectangular piece of absorbent cloth (or paper) for drying or wiping", "synonyms": ["towel"], "image_count": 636, "id": 1108, "frequency": "f", "synset": "towel.n.01"}, {"name": "towel_rack", "instance_count": 987, "def": "a rack consisting of one or more bars on which towels can be hung", "synonyms": ["towel_rack", "towel_rail", "towel_bar"], "image_count": 570, "id": 1109, "frequency": "f", "synset": "towel_rack.n.01"}, {"name": "toy", "instance_count": 6756, "def": "a device regarded as providing amusement", "synonyms": ["toy"], "image_count": 1149, "id": 1110, "frequency": "f", "synset": "toy.n.03"}, {"name": "tractor_(farm_equipment)", "instance_count": 80, "def": "a wheeled vehicle with large wheels; used in farming and other applications", "synonyms": ["tractor_(farm_equipment)"], "image_count": 61, "id": 1111, "frequency": "c", "synset": "tractor.n.01"}, {"name": "traffic_light", "instance_count": 7298, "def": "a device to control vehicle traffic often consisting of three or more lights", "synonyms": ["traffic_light"], "image_count": 1890, "id": 1112, "frequency": "f", "synset": "traffic_light.n.01"}, {"name": "dirt_bike", "instance_count": 47, "def": "a lightweight motorcycle equipped with rugged tires and suspension for off-road use", "synonyms": ["dirt_bike"], "image_count": 18, "id": 1113, "frequency": "c", "synset": "trail_bike.n.01"}, {"name": "trailer_truck", "instance_count": 297, "def": "a truck consisting of a tractor and trailer together", "synonyms": ["trailer_truck", "tractor_trailer", "trucking_rig", "articulated_lorry", "semi_truck"], "image_count": 143, "id": 1114, "frequency": "f", "synset": "trailer_truck.n.01"}, {"name": "train_(railroad_vehicle)", "instance_count": 2192, "def": "public or private transport provided by a line of railway cars coupled together and drawn by a locomotive", "synonyms": ["train_(railroad_vehicle)", "railroad_train"], "image_count": 1517, "id": 1115, "frequency": "f", "synset": "train.n.01"}, {"name": "trampoline", "instance_count": 7, "def": "gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame", "synonyms": ["trampoline"], "image_count": 7, "id": 1116, "frequency": "r", "synset": "trampoline.n.01"}, {"name": "tray", "instance_count": 2397, "def": "an open receptacle for holding or displaying or serving articles or food", "synonyms": ["tray"], "image_count": 943, "id": 1117, "frequency": "f", "synset": "tray.n.01"}, {"name": "trench_coat", "instance_count": 16, "def": "a military style raincoat; belted with deep pockets", "synonyms": ["trench_coat"], "image_count": 6, "id": 1118, "frequency": "r", "synset": "trench_coat.n.01"}, {"name": "triangle_(musical_instrument)", "instance_count": 1, "def": "a percussion instrument consisting of a metal bar bent in the shape of an open triangle", "synonyms": ["triangle_(musical_instrument)"], "image_count": 1, "id": 1119, "frequency": "r", "synset": "triangle.n.05"}, {"name": "tricycle", "instance_count": 15, "def": "a vehicle with three wheels that is moved by foot pedals", "synonyms": ["tricycle"], "image_count": 11, "id": 1120, "frequency": "c", "synset": "tricycle.n.01"}, {"name": "tripod", "instance_count": 132, "def": "a three-legged rack used for support", "synonyms": ["tripod"], "image_count": 101, "id": 1121, "frequency": "f", "synset": "tripod.n.01"}, {"name": "trousers", "instance_count": 7806, "def": "a garment extending from the waist to the knee or ankle, covering each leg separately", "synonyms": ["trousers", "pants_(clothing)"], "image_count": 1909, "id": 1122, "frequency": "f", "synset": "trouser.n.01"}, {"name": "truck", "instance_count": 1797, "def": "an automotive vehicle suitable for hauling", "synonyms": ["truck"], "image_count": 800, "id": 1123, "frequency": "f", "synset": "truck.n.01"}, {"name": "truffle_(chocolate)", "instance_count": 4, "def": "creamy chocolate candy", "synonyms": ["truffle_(chocolate)", "chocolate_truffle"], "image_count": 1, "id": 1124, "frequency": "r", "synset": "truffle.n.03"}, {"name": "trunk", "instance_count": 334, "def": "luggage consisting of a large strong case used when traveling or for storage", "synonyms": ["trunk"], "image_count": 44, "id": 1125, "frequency": "c", "synset": "trunk.n.02"}, {"name": "vat", "instance_count": 15, "def": "a large vessel for holding or storing liquids", "synonyms": ["vat"], "image_count": 3, "id": 1126, "frequency": "r", "synset": "tub.n.02"}, {"name": "turban", "instance_count": 124, "def": "a traditional headdress consisting of a long scarf wrapped around the head", "synonyms": ["turban"], "image_count": 44, "id": 1127, "frequency": "c", "synset": "turban.n.01"}, {"name": "turkey_(food)", "instance_count": 120, "def": "flesh of large domesticated fowl usually roasted", "synonyms": ["turkey_(food)"], "image_count": 31, "id": 1128, "frequency": "c", "synset": "turkey.n.04"}, {"name": "turnip", "instance_count": 109, "def": "widely cultivated plant having a large fleshy edible white or yellow root", "synonyms": ["turnip"], "image_count": 7, "id": 1129, "frequency": "r", "synset": "turnip.n.01"}, {"name": "turtle", "instance_count": 31, "def": "any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming", "synonyms": ["turtle"], "image_count": 20, "id": 1130, "frequency": "c", "synset": "turtle.n.02"}, {"name": "turtleneck_(clothing)", "instance_count": 13, "def": "a sweater or jersey with a high close-fitting collar", "synonyms": ["turtleneck_(clothing)", "polo-neck"], "image_count": 11, "id": 1131, "frequency": "c", "synset": "turtleneck.n.01"}, {"name": "typewriter", "instance_count": 14, "def": "hand-operated character printer for printing written messages one character at a time", "synonyms": ["typewriter"], "image_count": 13, "id": 1132, "frequency": "c", "synset": "typewriter.n.01"}, {"name": "umbrella", "instance_count": 9161, "def": "a lightweight handheld collapsible canopy", "synonyms": ["umbrella"], "image_count": 1924, "id": 1133, "frequency": "f", "synset": "umbrella.n.01"}, {"name": "underwear", "instance_count": 164, "def": "undergarment worn next to the skin and under the outer garments", "synonyms": ["underwear", "underclothes", "underclothing", "underpants"], "image_count": 113, "id": 1134, "frequency": "f", "synset": "underwear.n.01"}, {"name": "unicycle", "instance_count": 2, "def": "a vehicle with a single wheel that is driven by pedals", "synonyms": ["unicycle"], "image_count": 2, "id": 1135, "frequency": "r", "synset": "unicycle.n.01"}, {"name": "urinal", "instance_count": 381, "def": "a plumbing fixture (usually attached to the wall) used by men to urinate", "synonyms": ["urinal"], "image_count": 139, "id": 1136, "frequency": "f", "synset": "urinal.n.01"}, {"name": "urn", "instance_count": 81, "def": "a large vase that usually has a pedestal or feet", "synonyms": ["urn"], "image_count": 12, "id": 1137, "frequency": "c", "synset": "urn.n.01"}, {"name": "vacuum_cleaner", "instance_count": 38, "def": "an electrical home appliance that cleans by suction", "synonyms": ["vacuum_cleaner"], "image_count": 37, "id": 1138, "frequency": "c", "synset": "vacuum.n.04"}, {"name": "vase", "instance_count": 4971, "def": "an open jar of glass or porcelain used as an ornament or to hold flowers", "synonyms": ["vase"], "image_count": 1866, "id": 1139, "frequency": "f", "synset": "vase.n.01"}, {"name": "vending_machine", "instance_count": 65, "def": "a slot machine for selling goods", "synonyms": ["vending_machine"], "image_count": 47, "id": 1140, "frequency": "c", "synset": "vending_machine.n.01"}, {"name": "vent", "instance_count": 3370, "def": "a hole for the escape of gas or air", "synonyms": ["vent", "blowhole", "air_vent"], "image_count": 1468, "id": 1141, "frequency": "f", "synset": "vent.n.01"}, {"name": "vest", "instance_count": 1313, "def": "a man's sleeveless garment worn underneath a coat", "synonyms": ["vest", "waistcoat"], "image_count": 729, "id": 1142, "frequency": "f", "synset": "vest.n.01"}, {"name": "videotape", "instance_count": 228, "def": "a video recording made on magnetic tape", "synonyms": ["videotape"], "image_count": 24, "id": 1143, "frequency": "c", "synset": "videotape.n.01"}, {"name": "vinegar", "instance_count": 1, "def": "sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative", "synonyms": ["vinegar"], "image_count": 1, "id": 1144, "frequency": "r", "synset": "vinegar.n.01"}, {"name": "violin", "instance_count": 10, "def": "bowed stringed instrument that is the highest member of the violin family", "synonyms": ["violin", "fiddle"], "image_count": 10, "id": 1145, "frequency": "r", "synset": "violin.n.01"}, {"name": "vodka", "instance_count": 3, "def": "unaged colorless liquor originating in Russia", "synonyms": ["vodka"], "image_count": 3, "id": 1146, "frequency": "r", "synset": "vodka.n.01"}, {"name": "volleyball", "instance_count": 33, "def": "an inflated ball used in playing volleyball", "synonyms": ["volleyball"], "image_count": 14, "id": 1147, "frequency": "c", "synset": "volleyball.n.02"}, {"name": "vulture", "instance_count": 16, "def": "any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion", "synonyms": ["vulture"], "image_count": 4, "id": 1148, "frequency": "r", "synset": "vulture.n.01"}, {"name": "waffle", "instance_count": 61, "def": "pancake batter baked in a waffle iron", "synonyms": ["waffle"], "image_count": 29, "id": 1149, "frequency": "c", "synset": "waffle.n.01"}, {"name": "waffle_iron", "instance_count": 4, "def": "a kitchen appliance for baking waffles", "synonyms": ["waffle_iron"], "image_count": 4, "id": 1150, "frequency": "r", "synset": "waffle_iron.n.01"}, {"name": "wagon", "instance_count": 121, "def": "any of various kinds of wheeled vehicles drawn by an animal or a tractor", "synonyms": ["wagon"], "image_count": 70, "id": 1151, "frequency": "c", "synset": "wagon.n.01"}, {"name": "wagon_wheel", "instance_count": 209, "def": "a wheel of a wagon", "synonyms": ["wagon_wheel"], "image_count": 46, "id": 1152, "frequency": "c", "synset": "wagon_wheel.n.01"}, {"name": "walking_stick", "instance_count": 21, "def": "a stick carried in the hand for support in walking", "synonyms": ["walking_stick"], "image_count": 14, "id": 1153, "frequency": "c", "synset": "walking_stick.n.01"}, {"name": "wall_clock", "instance_count": 100, "def": "a clock mounted on a wall", "synonyms": ["wall_clock"], "image_count": 48, "id": 1154, "frequency": "c", "synset": "wall_clock.n.01"}, {"name": "wall_socket", "instance_count": 3069, "def": "receptacle providing a place in a wiring system where current can be taken to run electrical devices", "synonyms": ["wall_socket", "wall_plug", "electric_outlet", "electrical_outlet", "outlet", "electric_receptacle"], "image_count": 1855, "id": 1155, "frequency": "f", "synset": "wall_socket.n.01"}, {"name": "wallet", "instance_count": 123, "def": "a pocket-size case for holding papers and paper money", "synonyms": ["wallet", "billfold"], "image_count": 113, "id": 1156, "frequency": "f", "synset": "wallet.n.01"}, {"name": "walrus", "instance_count": 1, "def": "either of two large northern marine mammals having ivory tusks and tough hide over thick blubber", "synonyms": ["walrus"], "image_count": 1, "id": 1157, "frequency": "r", "synset": "walrus.n.01"}, {"name": "wardrobe", "instance_count": 1, "def": "a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes", "synonyms": ["wardrobe"], "image_count": 1, "id": 1158, "frequency": "r", "synset": "wardrobe.n.01"}, {"name": "washbasin", "instance_count": 15, "def": "a bathroom sink that is permanently installed and connected to a water supply and drainpipe; where you can wash your hands and face", "synonyms": ["washbasin", "basin_(for_washing)", "washbowl", "washstand", "handbasin"], "image_count": 10, "id": 1159, "frequency": "r", "synset": "washbasin.n.01"}, {"name": "automatic_washer", "instance_count": 68, "def": "a home appliance for washing clothes and linens automatically", "synonyms": ["automatic_washer", "washing_machine"], "image_count": 54, "id": 1160, "frequency": "c", "synset": "washer.n.03"}, {"name": "watch", "instance_count": 2703, "def": "a small, portable timepiece", "synonyms": ["watch", "wristwatch"], "image_count": 1923, "id": 1161, "frequency": "f", "synset": "watch.n.01"}, {"name": "water_bottle", "instance_count": 1449, "def": "a bottle for holding water", "synonyms": ["water_bottle"], "image_count": 630, "id": 1162, "frequency": "f", "synset": "water_bottle.n.01"}, {"name": "water_cooler", "instance_count": 39, "def": "a device for cooling and dispensing drinking water", "synonyms": ["water_cooler"], "image_count": 31, "id": 1163, "frequency": "c", "synset": "water_cooler.n.01"}, {"name": "water_faucet", "instance_count": 109, "def": "a faucet for drawing water from a pipe or cask", "synonyms": ["water_faucet", "water_tap", "tap_(water_faucet)"], "image_count": 69, "id": 1164, "frequency": "c", "synset": "water_faucet.n.01"}, {"name": "water_heater", "instance_count": 7, "def": "a heater and storage tank to supply heated water", "synonyms": ["water_heater", "hot-water_heater"], "image_count": 7, "id": 1165, "frequency": "r", "synset": "water_heater.n.01"}, {"name": "water_jug", "instance_count": 23, "def": "a jug that holds water", "synonyms": ["water_jug"], "image_count": 11, "id": 1166, "frequency": "c", "synset": "water_jug.n.01"}, {"name": "water_gun", "instance_count": 1, "def": "plaything consisting of a toy pistol that squirts water", "synonyms": ["water_gun", "squirt_gun"], "image_count": 1, "id": 1167, "frequency": "r", "synset": "water_pistol.n.01"}, {"name": "water_scooter", "instance_count": 54, "def": "a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)", "synonyms": ["water_scooter", "sea_scooter", "jet_ski"], "image_count": 30, "id": 1168, "frequency": "c", "synset": "water_scooter.n.01"}, {"name": "water_ski", "instance_count": 98, "def": "broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)", "synonyms": ["water_ski"], "image_count": 50, "id": 1169, "frequency": "c", "synset": "water_ski.n.01"}, {"name": "water_tower", "instance_count": 60, "def": "a large reservoir for water", "synonyms": ["water_tower"], "image_count": 45, "id": 1170, "frequency": "c", "synset": "water_tower.n.01"}, {"name": "watering_can", "instance_count": 44, "def": "a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants", "synonyms": ["watering_can"], "image_count": 28, "id": 1171, "frequency": "c", "synset": "watering_can.n.01"}, {"name": "watermelon", "instance_count": 814, "def": "large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp", "synonyms": ["watermelon"], "image_count": 114, "id": 1172, "frequency": "f", "synset": "watermelon.n.02"}, {"name": "weathervane", "instance_count": 237, "def": "mechanical device attached to an elevated structure; rotates freely to show the direction of the wind", "synonyms": ["weathervane", "vane_(weathervane)", "wind_vane"], "image_count": 193, "id": 1173, "frequency": "f", "synset": "weathervane.n.01"}, {"name": "webcam", "instance_count": 27, "def": "a digital camera designed to take digital photographs and transmit them over the internet", "synonyms": ["webcam"], "image_count": 21, "id": 1174, "frequency": "c", "synset": "webcam.n.01"}, {"name": "wedding_cake", "instance_count": 140, "def": "a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception", "synonyms": ["wedding_cake", "bridecake"], "image_count": 91, "id": 1175, "frequency": "c", "synset": "wedding_cake.n.01"}, {"name": "wedding_ring", "instance_count": 49, "def": "a ring given to the bride and/or groom at the wedding", "synonyms": ["wedding_ring", "wedding_band"], "image_count": 31, "id": 1176, "frequency": "c", "synset": "wedding_ring.n.01"}, {"name": "wet_suit", "instance_count": 2907, "def": "a close-fitting garment made of a permeable material; worn in cold water to retain body heat", "synonyms": ["wet_suit"], "image_count": 1469, "id": 1177, "frequency": "f", "synset": "wet_suit.n.01"}, {"name": "wheel", "instance_count": 11272, "def": "a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle", "synonyms": ["wheel"], "image_count": 1924, "id": 1178, "frequency": "f", "synset": "wheel.n.01"}, {"name": "wheelchair", "instance_count": 107, "def": "a movable chair mounted on large wheels", "synonyms": ["wheelchair"], "image_count": 87, "id": 1179, "frequency": "c", "synset": "wheelchair.n.01"}, {"name": "whipped_cream", "instance_count": 201, "def": "cream that has been beaten until light and fluffy", "synonyms": ["whipped_cream"], "image_count": 77, "id": 1180, "frequency": "c", "synset": "whipped_cream.n.01"}, {"name": "whistle", "instance_count": 13, "def": "a small wind instrument that produces a whistling sound by blowing into it", "synonyms": ["whistle"], "image_count": 11, "id": 1181, "frequency": "c", "synset": "whistle.n.03"}, {"name": "wig", "instance_count": 69, "def": "hairpiece covering the head and made of real or synthetic hair", "synonyms": ["wig"], "image_count": 47, "id": 1182, "frequency": "c", "synset": "wig.n.01"}, {"name": "wind_chime", "instance_count": 28, "def": "a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle", "synonyms": ["wind_chime"], "image_count": 21, "id": 1183, "frequency": "c", "synset": "wind_chime.n.01"}, {"name": "windmill", "instance_count": 202, "def": "A mill or turbine that is powered by wind", "synonyms": ["windmill"], "image_count": 47, "id": 1184, "frequency": "c", "synset": "windmill.n.01"}, {"name": "window_box_(for_plants)", "instance_count": 253, "def": "a container for growing plants on a windowsill", "synonyms": ["window_box_(for_plants)"], "image_count": 70, "id": 1185, "frequency": "c", "synset": "window_box.n.01"}, {"name": "windshield_wiper", "instance_count": 4793, "def": "a mechanical device that cleans the windshield", "synonyms": ["windshield_wiper", "windscreen_wiper", "wiper_(for_windshield/screen)"], "image_count": 1838, "id": 1186, "frequency": "f", "synset": "windshield_wiper.n.01"}, {"name": "windsock", "instance_count": 26, "def": "a truncated cloth cone mounted on a mast/pole; shows wind direction", "synonyms": ["windsock", "air_sock", "air-sleeve", "wind_sleeve", "wind_cone"], "image_count": 19, "id": 1187, "frequency": "c", "synset": "windsock.n.01"}, {"name": "wine_bottle", "instance_count": 4449, "def": "a bottle for holding wine", "synonyms": ["wine_bottle"], "image_count": 531, "id": 1188, "frequency": "f", "synset": "wine_bottle.n.01"}, {"name": "wine_bucket", "instance_count": 21, "def": "a bucket of ice used to chill a bottle of wine", "synonyms": ["wine_bucket", "wine_cooler"], "image_count": 11, "id": 1189, "frequency": "c", "synset": "wine_bucket.n.01"}, {"name": "wineglass", "instance_count": 4259, "def": "a glass that has a stem and in which wine is served", "synonyms": ["wineglass"], "image_count": 941, "id": 1190, "frequency": "f", "synset": "wineglass.n.01"}, {"name": "blinder_(for_horses)", "instance_count": 271, "def": "blinds that prevent a horse from seeing something on either side", "synonyms": ["blinder_(for_horses)"], "image_count": 113, "id": 1191, "frequency": "f", "synset": "winker.n.02"}, {"name": "wok", "instance_count": 60, "def": "pan with a convex bottom; used for frying in Chinese cooking", "synonyms": ["wok"], "image_count": 26, "id": 1192, "frequency": "c", "synset": "wok.n.01"}, {"name": "wolf", "instance_count": 16, "def": "a wild carnivorous mammal of the dog family, living and hunting in packs", "synonyms": ["wolf"], "image_count": 5, "id": 1193, "frequency": "r", "synset": "wolf.n.01"}, {"name": "wooden_spoon", "instance_count": 123, "def": "a spoon made of wood", "synonyms": ["wooden_spoon"], "image_count": 56, "id": 1194, "frequency": "c", "synset": "wooden_spoon.n.02"}, {"name": "wreath", "instance_count": 119, "def": "an arrangement of flowers, leaves, or stems fastened in a ring", "synonyms": ["wreath"], "image_count": 73, "id": 1195, "frequency": "c", "synset": "wreath.n.01"}, {"name": "wrench", "instance_count": 80, "def": "a hand tool that is used to hold or twist a nut or bolt", "synonyms": ["wrench", "spanner"], "image_count": 32, "id": 1196, "frequency": "c", "synset": "wrench.n.03"}, {"name": "wristband", "instance_count": 268, "def": "band consisting of a part of a sleeve that covers the wrist", "synonyms": ["wristband"], "image_count": 128, "id": 1197, "frequency": "f", "synset": "wristband.n.01"}, {"name": "wristlet", "instance_count": 1330, "def": "a band or bracelet worn around the wrist", "synonyms": ["wristlet", "wrist_band"], "image_count": 623, "id": 1198, "frequency": "f", "synset": "wristlet.n.01"}, {"name": "yacht", "instance_count": 50, "def": "an expensive vessel propelled by sail or power and used for cruising or racing", "synonyms": ["yacht"], "image_count": 12, "id": 1199, "frequency": "c", "synset": "yacht.n.01"}, {"name": "yogurt", "instance_count": 116, "def": "a custard-like food made from curdled milk", "synonyms": ["yogurt", "yoghurt", "yoghourt"], "image_count": 52, "id": 1200, "frequency": "c", "synset": "yogurt.n.01"}, {"name": "yoke_(animal_equipment)", "instance_count": 20, "def": "gear joining two animals at the neck; NOT egg yolk", "synonyms": ["yoke_(animal_equipment)"], "image_count": 11, "id": 1201, "frequency": "c", "synset": "yoke.n.07"}, {"name": "zebra", "instance_count": 5443, "def": "any of several fleet black-and-white striped African equines", "synonyms": ["zebra"], "image_count": 1674, "id": 1202, "frequency": "f", "synset": "zebra.n.01"}, {"name": "zucchini", "instance_count": 798, "def": "small cucumber-shaped vegetable marrow; typically dark green", "synonyms": ["zucchini", "courgette"], "image_count": 81, "id": 1203, "frequency": "c", "synset": "zucchini.n.02"}]
\ No newline at end of file
diff --git a/model/vision/grit_src/third_party/CenterNet2/datasets/prepare_ade20k_sem_seg.py b/model/vision/grit_src/third_party/CenterNet2/datasets/prepare_ade20k_sem_seg.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b4a58d8f2877544498e328b6d269f23aa1eb59f
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/datasets/prepare_ade20k_sem_seg.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+import numpy as np
+import os
+from pathlib import Path
+import tqdm
+from PIL import Image
+
+
+def convert(input, output):
+ img = np.asarray(Image.open(input))
+ assert img.dtype == np.uint8
+ img = img - 1 # 0 (ignore) becomes 255. others are shifted by 1
+ Image.fromarray(img).save(output)
+
+
+if __name__ == "__main__":
+ dataset_dir = Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "ADEChallengeData2016"
+ for name in ["training", "validation"]:
+ annotation_dir = dataset_dir / "annotations" / name
+ output_dir = dataset_dir / "annotations_detectron2" / name
+ output_dir.mkdir(parents=True, exist_ok=True)
+ for file in tqdm.tqdm(list(annotation_dir.iterdir())):
+ output_file = output_dir / file.name
+ convert(file, output_file)
diff --git a/model/vision/grit_src/third_party/CenterNet2/datasets/prepare_cocofied_lvis.py b/model/vision/grit_src/third_party/CenterNet2/datasets/prepare_cocofied_lvis.py
new file mode 100644
index 0000000000000000000000000000000000000000..245c88482a9e2405e5a912b5c560aed78a614a13
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/datasets/prepare_cocofied_lvis.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import copy
+import json
+import os
+from collections import defaultdict
+
+# This mapping is extracted from the official LVIS mapping:
+# https://github.com/lvis-dataset/lvis-api/blob/master/data/coco_to_synset.json
+COCO_SYNSET_CATEGORIES = [
+ {"synset": "person.n.01", "coco_cat_id": 1},
+ {"synset": "bicycle.n.01", "coco_cat_id": 2},
+ {"synset": "car.n.01", "coco_cat_id": 3},
+ {"synset": "motorcycle.n.01", "coco_cat_id": 4},
+ {"synset": "airplane.n.01", "coco_cat_id": 5},
+ {"synset": "bus.n.01", "coco_cat_id": 6},
+ {"synset": "train.n.01", "coco_cat_id": 7},
+ {"synset": "truck.n.01", "coco_cat_id": 8},
+ {"synset": "boat.n.01", "coco_cat_id": 9},
+ {"synset": "traffic_light.n.01", "coco_cat_id": 10},
+ {"synset": "fireplug.n.01", "coco_cat_id": 11},
+ {"synset": "stop_sign.n.01", "coco_cat_id": 13},
+ {"synset": "parking_meter.n.01", "coco_cat_id": 14},
+ {"synset": "bench.n.01", "coco_cat_id": 15},
+ {"synset": "bird.n.01", "coco_cat_id": 16},
+ {"synset": "cat.n.01", "coco_cat_id": 17},
+ {"synset": "dog.n.01", "coco_cat_id": 18},
+ {"synset": "horse.n.01", "coco_cat_id": 19},
+ {"synset": "sheep.n.01", "coco_cat_id": 20},
+ {"synset": "beef.n.01", "coco_cat_id": 21},
+ {"synset": "elephant.n.01", "coco_cat_id": 22},
+ {"synset": "bear.n.01", "coco_cat_id": 23},
+ {"synset": "zebra.n.01", "coco_cat_id": 24},
+ {"synset": "giraffe.n.01", "coco_cat_id": 25},
+ {"synset": "backpack.n.01", "coco_cat_id": 27},
+ {"synset": "umbrella.n.01", "coco_cat_id": 28},
+ {"synset": "bag.n.04", "coco_cat_id": 31},
+ {"synset": "necktie.n.01", "coco_cat_id": 32},
+ {"synset": "bag.n.06", "coco_cat_id": 33},
+ {"synset": "frisbee.n.01", "coco_cat_id": 34},
+ {"synset": "ski.n.01", "coco_cat_id": 35},
+ {"synset": "snowboard.n.01", "coco_cat_id": 36},
+ {"synset": "ball.n.06", "coco_cat_id": 37},
+ {"synset": "kite.n.03", "coco_cat_id": 38},
+ {"synset": "baseball_bat.n.01", "coco_cat_id": 39},
+ {"synset": "baseball_glove.n.01", "coco_cat_id": 40},
+ {"synset": "skateboard.n.01", "coco_cat_id": 41},
+ {"synset": "surfboard.n.01", "coco_cat_id": 42},
+ {"synset": "tennis_racket.n.01", "coco_cat_id": 43},
+ {"synset": "bottle.n.01", "coco_cat_id": 44},
+ {"synset": "wineglass.n.01", "coco_cat_id": 46},
+ {"synset": "cup.n.01", "coco_cat_id": 47},
+ {"synset": "fork.n.01", "coco_cat_id": 48},
+ {"synset": "knife.n.01", "coco_cat_id": 49},
+ {"synset": "spoon.n.01", "coco_cat_id": 50},
+ {"synset": "bowl.n.03", "coco_cat_id": 51},
+ {"synset": "banana.n.02", "coco_cat_id": 52},
+ {"synset": "apple.n.01", "coco_cat_id": 53},
+ {"synset": "sandwich.n.01", "coco_cat_id": 54},
+ {"synset": "orange.n.01", "coco_cat_id": 55},
+ {"synset": "broccoli.n.01", "coco_cat_id": 56},
+ {"synset": "carrot.n.01", "coco_cat_id": 57},
+ {"synset": "frank.n.02", "coco_cat_id": 58},
+ {"synset": "pizza.n.01", "coco_cat_id": 59},
+ {"synset": "doughnut.n.02", "coco_cat_id": 60},
+ {"synset": "cake.n.03", "coco_cat_id": 61},
+ {"synset": "chair.n.01", "coco_cat_id": 62},
+ {"synset": "sofa.n.01", "coco_cat_id": 63},
+ {"synset": "pot.n.04", "coco_cat_id": 64},
+ {"synset": "bed.n.01", "coco_cat_id": 65},
+ {"synset": "dining_table.n.01", "coco_cat_id": 67},
+ {"synset": "toilet.n.02", "coco_cat_id": 70},
+ {"synset": "television_receiver.n.01", "coco_cat_id": 72},
+ {"synset": "laptop.n.01", "coco_cat_id": 73},
+ {"synset": "mouse.n.04", "coco_cat_id": 74},
+ {"synset": "remote_control.n.01", "coco_cat_id": 75},
+ {"synset": "computer_keyboard.n.01", "coco_cat_id": 76},
+ {"synset": "cellular_telephone.n.01", "coco_cat_id": 77},
+ {"synset": "microwave.n.02", "coco_cat_id": 78},
+ {"synset": "oven.n.01", "coco_cat_id": 79},
+ {"synset": "toaster.n.02", "coco_cat_id": 80},
+ {"synset": "sink.n.01", "coco_cat_id": 81},
+ {"synset": "electric_refrigerator.n.01", "coco_cat_id": 82},
+ {"synset": "book.n.01", "coco_cat_id": 84},
+ {"synset": "clock.n.01", "coco_cat_id": 85},
+ {"synset": "vase.n.01", "coco_cat_id": 86},
+ {"synset": "scissors.n.01", "coco_cat_id": 87},
+ {"synset": "teddy.n.01", "coco_cat_id": 88},
+ {"synset": "hand_blower.n.01", "coco_cat_id": 89},
+ {"synset": "toothbrush.n.01", "coco_cat_id": 90},
+]
+
+
+def cocofy_lvis(input_filename, output_filename):
+ """
+ Filter LVIS instance segmentation annotations to remove all categories that are not included in
+ COCO. The new json files can be used to evaluate COCO AP using `lvis-api`. The category ids in
+ the output json are the incontiguous COCO dataset ids.
+
+ Args:
+ input_filename (str): path to the LVIS json file.
+ output_filename (str): path to the COCOfied json file.
+ """
+
+ with open(input_filename, "r") as f:
+ lvis_json = json.load(f)
+
+ lvis_annos = lvis_json.pop("annotations")
+ cocofied_lvis = copy.deepcopy(lvis_json)
+ lvis_json["annotations"] = lvis_annos
+
+ # Mapping from lvis cat id to coco cat id via synset
+ lvis_cat_id_to_synset = {cat["id"]: cat["synset"] for cat in lvis_json["categories"]}
+ synset_to_coco_cat_id = {x["synset"]: x["coco_cat_id"] for x in COCO_SYNSET_CATEGORIES}
+ # Synsets that we will keep in the dataset
+ synsets_to_keep = set(synset_to_coco_cat_id.keys())
+ coco_cat_id_with_instances = defaultdict(int)
+
+ new_annos = []
+ ann_id = 1
+ for ann in lvis_annos:
+ lvis_cat_id = ann["category_id"]
+ synset = lvis_cat_id_to_synset[lvis_cat_id]
+ if synset not in synsets_to_keep:
+ continue
+ coco_cat_id = synset_to_coco_cat_id[synset]
+ new_ann = copy.deepcopy(ann)
+ new_ann["category_id"] = coco_cat_id
+ new_ann["id"] = ann_id
+ ann_id += 1
+ new_annos.append(new_ann)
+ coco_cat_id_with_instances[coco_cat_id] += 1
+ cocofied_lvis["annotations"] = new_annos
+
+ for image in cocofied_lvis["images"]:
+ for key in ["not_exhaustive_category_ids", "neg_category_ids"]:
+ new_category_list = []
+ for lvis_cat_id in image[key]:
+ synset = lvis_cat_id_to_synset[lvis_cat_id]
+ if synset not in synsets_to_keep:
+ continue
+ coco_cat_id = synset_to_coco_cat_id[synset]
+ new_category_list.append(coco_cat_id)
+ coco_cat_id_with_instances[coco_cat_id] += 1
+ image[key] = new_category_list
+
+ coco_cat_id_with_instances = set(coco_cat_id_with_instances.keys())
+
+ new_categories = []
+ for cat in lvis_json["categories"]:
+ synset = cat["synset"]
+ if synset not in synsets_to_keep:
+ continue
+ coco_cat_id = synset_to_coco_cat_id[synset]
+ if coco_cat_id not in coco_cat_id_with_instances:
+ continue
+ new_cat = copy.deepcopy(cat)
+ new_cat["id"] = coco_cat_id
+ new_categories.append(new_cat)
+ cocofied_lvis["categories"] = new_categories
+
+ with open(output_filename, "w") as f:
+ json.dump(cocofied_lvis, f)
+ print("{} is COCOfied and stored in {}.".format(input_filename, output_filename))
+
+
+if __name__ == "__main__":
+ dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "lvis")
+ for s in ["lvis_v0.5_train", "lvis_v0.5_val"]:
+ print("Start COCOfing {}.".format(s))
+ cocofy_lvis(
+ os.path.join(dataset_dir, "{}.json".format(s)),
+ os.path.join(dataset_dir, "{}_cocofied.json".format(s)),
+ )
diff --git a/model/vision/grit_src/third_party/CenterNet2/datasets/prepare_for_tests.sh b/model/vision/grit_src/third_party/CenterNet2/datasets/prepare_for_tests.sh
new file mode 100644
index 0000000000000000000000000000000000000000..67e875a41da652b2fcae6631b76d94584935ddb9
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/datasets/prepare_for_tests.sh
@@ -0,0 +1,31 @@
+#!/bin/bash -e
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+# Download the mini dataset (coco val2017_100, with only 100 images)
+# to be used in unittests & integration tests.
+
+cd "${0%/*}"
+
+BASE=https://dl.fbaipublicfiles.com/detectron2
+ROOT=${DETECTRON2_DATASETS:-./}
+ROOT=${ROOT/#\~/$HOME} # expand ~ to HOME
+mkdir -p $ROOT/coco/annotations
+
+for anno in instances_val2017_100 \
+ person_keypoints_val2017_100 ; do
+
+ dest=$ROOT/coco/annotations/$anno.json
+ [[ -s $dest ]] && {
+ echo "$dest exists. Skipping ..."
+ } || {
+ wget $BASE/annotations/coco/$anno.json -O $dest
+ }
+done
+
+dest=$ROOT/coco/val2017_100.tgz
+[[ -d $ROOT/coco/val2017 ]] && {
+ echo "$ROOT/coco/val2017 exists. Skipping ..."
+} || {
+ wget $BASE/annotations/coco/val2017_100.tgz -O $dest
+ tar xzf $dest -C $ROOT/coco/ && rm -f $dest
+}
diff --git a/model/vision/grit_src/third_party/CenterNet2/datasets/prepare_panoptic_fpn.py b/model/vision/grit_src/third_party/CenterNet2/datasets/prepare_panoptic_fpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..597d791afab1bcc0013203a66c7fba225065eebe
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/datasets/prepare_panoptic_fpn.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import functools
+import json
+import multiprocessing as mp
+import numpy as np
+import os
+import time
+from fvcore.common.download import download
+from panopticapi.utils import rgb2id
+from PIL import Image
+
+from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
+
+
+def _process_panoptic_to_semantic(input_panoptic, output_semantic, segments, id_map):
+ panoptic = np.asarray(Image.open(input_panoptic), dtype=np.uint32)
+ panoptic = rgb2id(panoptic)
+ output = np.zeros_like(panoptic, dtype=np.uint8) + 255
+ for seg in segments:
+ cat_id = seg["category_id"]
+ new_cat_id = id_map[cat_id]
+ output[panoptic == seg["id"]] = new_cat_id
+ Image.fromarray(output).save(output_semantic)
+
+
+def separate_coco_semantic_from_panoptic(panoptic_json, panoptic_root, sem_seg_root, categories):
+ """
+ Create semantic segmentation annotations from panoptic segmentation
+ annotations, to be used by PanopticFPN.
+
+ It maps all thing categories to class 0, and maps all unlabeled pixels to class 255.
+ It maps all stuff categories to contiguous ids starting from 1.
+
+ Args:
+ panoptic_json (str): path to the panoptic json file, in COCO's format.
+ panoptic_root (str): a directory with panoptic annotation files, in COCO's format.
+ sem_seg_root (str): a directory to output semantic annotation files
+ categories (list[dict]): category metadata. Each dict needs to have:
+ "id": corresponds to the "category_id" in the json annotations
+ "isthing": 0 or 1
+ """
+ os.makedirs(sem_seg_root, exist_ok=True)
+
+ stuff_ids = [k["id"] for k in categories if k["isthing"] == 0]
+ thing_ids = [k["id"] for k in categories if k["isthing"] == 1]
+ id_map = {} # map from category id to id in the output semantic annotation
+ assert len(stuff_ids) <= 254
+ for i, stuff_id in enumerate(stuff_ids):
+ id_map[stuff_id] = i + 1
+ for thing_id in thing_ids:
+ id_map[thing_id] = 0
+ id_map[0] = 255
+
+ with open(panoptic_json) as f:
+ obj = json.load(f)
+
+ pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4))
+
+ def iter_annotations():
+ for anno in obj["annotations"]:
+ file_name = anno["file_name"]
+ segments = anno["segments_info"]
+ input = os.path.join(panoptic_root, file_name)
+ output = os.path.join(sem_seg_root, file_name)
+ yield input, output, segments
+
+ print("Start writing to {} ...".format(sem_seg_root))
+ start = time.time()
+ pool.starmap(
+ functools.partial(_process_panoptic_to_semantic, id_map=id_map),
+ iter_annotations(),
+ chunksize=100,
+ )
+ print("Finished. time: {:.2f}s".format(time.time() - start))
+
+
+if __name__ == "__main__":
+ dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "coco")
+ for s in ["val2017", "train2017"]:
+ separate_coco_semantic_from_panoptic(
+ os.path.join(dataset_dir, "annotations/panoptic_{}.json".format(s)),
+ os.path.join(dataset_dir, "panoptic_{}".format(s)),
+ os.path.join(dataset_dir, "panoptic_stuff_{}".format(s)),
+ COCO_CATEGORIES,
+ )
+
+ # Prepare val2017_100 for quick testing:
+
+ dest_dir = os.path.join(dataset_dir, "annotations/")
+ URL_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/"
+ download(URL_PREFIX + "annotations/coco/panoptic_val2017_100.json", dest_dir)
+ with open(os.path.join(dest_dir, "panoptic_val2017_100.json")) as f:
+ obj = json.load(f)
+
+ def link_val100(dir_full, dir_100):
+ print("Creating " + dir_100 + " ...")
+ os.makedirs(dir_100, exist_ok=True)
+ for img in obj["images"]:
+ basename = os.path.splitext(img["file_name"])[0]
+ src = os.path.join(dir_full, basename + ".png")
+ dst = os.path.join(dir_100, basename + ".png")
+ src = os.path.relpath(src, start=dir_100)
+ os.symlink(src, dst)
+
+ link_val100(
+ os.path.join(dataset_dir, "panoptic_val2017"),
+ os.path.join(dataset_dir, "panoptic_val2017_100"),
+ )
+
+ link_val100(
+ os.path.join(dataset_dir, "panoptic_stuff_val2017"),
+ os.path.join(dataset_dir, "panoptic_stuff_val2017_100"),
+ )
diff --git a/model/vision/grit_src/third_party/CenterNet2/demo/README.md b/model/vision/grit_src/third_party/CenterNet2/demo/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..133d8d38e5e9f5f44aca92c59f73309e166d7132
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/demo/README.md
@@ -0,0 +1,8 @@
+
+## Detectron2 Demo
+
+We provide a command line tool to run a simple demo of builtin configs.
+The usage is explained in [GETTING_STARTED.md](../GETTING_STARTED.md).
+
+See our [blog post](https://ai.facebook.com/blog/-detectron2-a-pytorch-based-modular-object-detection-library-)
+for a high-quality demo generated with this tool.
diff --git a/model/vision/grit_src/third_party/CenterNet2/demo/demo.py b/model/vision/grit_src/third_party/CenterNet2/demo/demo.py
new file mode 100644
index 0000000000000000000000000000000000000000..4baa8767f7b299f18253aadb15a9bac5b9cc07fc
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/demo/demo.py
@@ -0,0 +1,188 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import argparse
+import glob
+import multiprocessing as mp
+import numpy as np
+import os
+import tempfile
+import time
+import warnings
+import cv2
+import tqdm
+
+from detectron2.config import get_cfg
+from detectron2.data.detection_utils import read_image
+from detectron2.utils.logger import setup_logger
+
+from predictor import VisualizationDemo
+
+# constants
+WINDOW_NAME = "COCO detections"
+
+
+def setup_cfg(args):
+ # load config from file and command-line arguments
+ cfg = get_cfg()
+ # To use demo for Panoptic-DeepLab, please uncomment the following two lines.
+ # from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa
+ # add_panoptic_deeplab_config(cfg)
+ cfg.merge_from_file(args.config_file)
+ cfg.merge_from_list(args.opts)
+ # Set score_threshold for builtin models
+ cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
+ cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
+ cfg.freeze()
+ return cfg
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
+ parser.add_argument(
+ "--config-file",
+ default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
+ metavar="FILE",
+ help="path to config file",
+ )
+ parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
+ parser.add_argument("--video-input", help="Path to video file.")
+ parser.add_argument(
+ "--input",
+ nargs="+",
+ help="A list of space separated input images; "
+ "or a single glob pattern such as 'directory/*.jpg'",
+ )
+ parser.add_argument(
+ "--output",
+ help="A file or directory to save output visualizations. "
+ "If not given, will show output in an OpenCV window.",
+ )
+
+ parser.add_argument(
+ "--confidence-threshold",
+ type=float,
+ default=0.5,
+ help="Minimum score for instance predictions to be shown",
+ )
+ parser.add_argument(
+ "--opts",
+ help="Modify config options using the command-line 'KEY VALUE' pairs",
+ default=[],
+ nargs=argparse.REMAINDER,
+ )
+ return parser
+
+
+def test_opencv_video_format(codec, file_ext):
+ with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
+ filename = os.path.join(dir, "test_file" + file_ext)
+ writer = cv2.VideoWriter(
+ filename=filename,
+ fourcc=cv2.VideoWriter_fourcc(*codec),
+ fps=float(30),
+ frameSize=(10, 10),
+ isColor=True,
+ )
+ [writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
+ writer.release()
+ if os.path.isfile(filename):
+ return True
+ return False
+
+
+if __name__ == "__main__":
+ mp.set_start_method("spawn", force=True)
+ args = get_parser().parse_args()
+ setup_logger(name="fvcore")
+ logger = setup_logger()
+ logger.info("Arguments: " + str(args))
+
+ cfg = setup_cfg(args)
+
+ demo = VisualizationDemo(cfg)
+
+ if args.input:
+ if len(args.input) == 1:
+ args.input = glob.glob(os.path.expanduser(args.input[0]))
+ assert args.input, "The input path(s) was not found"
+ for path in tqdm.tqdm(args.input, disable=not args.output):
+ # use PIL, to be consistent with evaluation
+ img = read_image(path, format="BGR")
+ start_time = time.time()
+ predictions, visualized_output = demo.run_on_image(img)
+ logger.info(
+ "{}: {} in {:.2f}s".format(
+ path,
+ "detected {} instances".format(len(predictions["instances"]))
+ if "instances" in predictions
+ else "finished",
+ time.time() - start_time,
+ )
+ )
+
+ if args.output:
+ if os.path.isdir(args.output):
+ assert os.path.isdir(args.output), args.output
+ out_filename = os.path.join(args.output, os.path.basename(path))
+ else:
+ assert len(args.input) == 1, "Please specify a directory with args.output"
+ out_filename = args.output
+ visualized_output.save(out_filename)
+ else:
+ cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
+ cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
+ if cv2.waitKey(0) == 27:
+ break # esc to quit
+ elif args.webcam:
+ assert args.input is None, "Cannot have both --input and --webcam!"
+ assert args.output is None, "output not yet supported with --webcam!"
+ cam = cv2.VideoCapture(0)
+ for vis in tqdm.tqdm(demo.run_on_video(cam)):
+ cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
+ cv2.imshow(WINDOW_NAME, vis)
+ if cv2.waitKey(1) == 27:
+ break # esc to quit
+ cam.release()
+ cv2.destroyAllWindows()
+ elif args.video_input:
+ video = cv2.VideoCapture(args.video_input)
+ width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
+ height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ frames_per_second = video.get(cv2.CAP_PROP_FPS)
+ num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
+ basename = os.path.basename(args.video_input)
+ codec, file_ext = (
+ ("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4")
+ )
+ if codec == ".mp4v":
+ warnings.warn("x264 codec not available, switching to mp4v")
+ if args.output:
+ if os.path.isdir(args.output):
+ output_fname = os.path.join(args.output, basename)
+ output_fname = os.path.splitext(output_fname)[0] + file_ext
+ else:
+ output_fname = args.output
+ assert not os.path.isfile(output_fname), output_fname
+ output_file = cv2.VideoWriter(
+ filename=output_fname,
+ # some installation of opencv may not support x264 (due to its license),
+ # you can try other format (e.g. MPEG)
+ fourcc=cv2.VideoWriter_fourcc(*codec),
+ fps=float(frames_per_second),
+ frameSize=(width, height),
+ isColor=True,
+ )
+ assert os.path.isfile(args.video_input)
+ for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
+ if args.output:
+ output_file.write(vis_frame)
+ else:
+ cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
+ cv2.imshow(basename, vis_frame)
+ if cv2.waitKey(1) == 27:
+ break # esc to quit
+ video.release()
+ if args.output:
+ output_file.release()
+ else:
+ cv2.destroyAllWindows()
diff --git a/model/vision/grit_src/third_party/CenterNet2/demo/predictor.py b/model/vision/grit_src/third_party/CenterNet2/demo/predictor.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b7ebd3f846850172c1f560f8492d51e5667f76d
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/demo/predictor.py
@@ -0,0 +1,220 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import atexit
+import bisect
+import multiprocessing as mp
+from collections import deque
+import cv2
+import torch
+
+from detectron2.data import MetadataCatalog
+from detectron2.engine.defaults import DefaultPredictor
+from detectron2.utils.video_visualizer import VideoVisualizer
+from detectron2.utils.visualizer import ColorMode, Visualizer
+
+
+class VisualizationDemo(object):
+ def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
+ """
+ Args:
+ cfg (CfgNode):
+ instance_mode (ColorMode):
+ parallel (bool): whether to run the model in different processes from visualization.
+ Useful since the visualization logic can be slow.
+ """
+ self.metadata = MetadataCatalog.get(
+ cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
+ )
+ self.cpu_device = torch.device("cpu")
+ self.instance_mode = instance_mode
+
+ self.parallel = parallel
+ if parallel:
+ num_gpu = torch.cuda.device_count()
+ self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
+ else:
+ self.predictor = DefaultPredictor(cfg)
+
+ def run_on_image(self, image):
+ """
+ Args:
+ image (np.ndarray): an image of shape (H, W, C) (in BGR order).
+ This is the format used by OpenCV.
+
+ Returns:
+ predictions (dict): the output of the model.
+ vis_output (VisImage): the visualized image output.
+ """
+ vis_output = None
+ predictions = self.predictor(image)
+ # Convert image from OpenCV BGR format to Matplotlib RGB format.
+ image = image[:, :, ::-1]
+ visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
+ if "panoptic_seg" in predictions:
+ panoptic_seg, segments_info = predictions["panoptic_seg"]
+ vis_output = visualizer.draw_panoptic_seg_predictions(
+ panoptic_seg.to(self.cpu_device), segments_info
+ )
+ else:
+ if "sem_seg" in predictions:
+ vis_output = visualizer.draw_sem_seg(
+ predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
+ )
+ if "instances" in predictions:
+ instances = predictions["instances"].to(self.cpu_device)
+ vis_output = visualizer.draw_instance_predictions(predictions=instances)
+
+ return predictions, vis_output
+
+ def _frame_from_video(self, video):
+ while video.isOpened():
+ success, frame = video.read()
+ if success:
+ yield frame
+ else:
+ break
+
+ def run_on_video(self, video):
+ """
+ Visualizes predictions on frames of the input video.
+
+ Args:
+ video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
+ either a webcam or a video file.
+
+ Yields:
+ ndarray: BGR visualizations of each video frame.
+ """
+ video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
+
+ def process_predictions(frame, predictions):
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+ if "panoptic_seg" in predictions:
+ panoptic_seg, segments_info = predictions["panoptic_seg"]
+ vis_frame = video_visualizer.draw_panoptic_seg_predictions(
+ frame, panoptic_seg.to(self.cpu_device), segments_info
+ )
+ elif "instances" in predictions:
+ predictions = predictions["instances"].to(self.cpu_device)
+ vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
+ elif "sem_seg" in predictions:
+ vis_frame = video_visualizer.draw_sem_seg(
+ frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
+ )
+
+ # Converts Matplotlib RGB format to OpenCV BGR format
+ vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
+ return vis_frame
+
+ frame_gen = self._frame_from_video(video)
+ if self.parallel:
+ buffer_size = self.predictor.default_buffer_size
+
+ frame_data = deque()
+
+ for cnt, frame in enumerate(frame_gen):
+ frame_data.append(frame)
+ self.predictor.put(frame)
+
+ if cnt >= buffer_size:
+ frame = frame_data.popleft()
+ predictions = self.predictor.get()
+ yield process_predictions(frame, predictions)
+
+ while len(frame_data):
+ frame = frame_data.popleft()
+ predictions = self.predictor.get()
+ yield process_predictions(frame, predictions)
+ else:
+ for frame in frame_gen:
+ yield process_predictions(frame, self.predictor(frame))
+
+
+class AsyncPredictor:
+ """
+ A predictor that runs the model asynchronously, possibly on >1 GPUs.
+ Because rendering the visualization takes considerably amount of time,
+ this helps improve throughput a little bit when rendering videos.
+ """
+
+ class _StopToken:
+ pass
+
+ class _PredictWorker(mp.Process):
+ def __init__(self, cfg, task_queue, result_queue):
+ self.cfg = cfg
+ self.task_queue = task_queue
+ self.result_queue = result_queue
+ super().__init__()
+
+ def run(self):
+ predictor = DefaultPredictor(self.cfg)
+
+ while True:
+ task = self.task_queue.get()
+ if isinstance(task, AsyncPredictor._StopToken):
+ break
+ idx, data = task
+ result = predictor(data)
+ self.result_queue.put((idx, result))
+
+ def __init__(self, cfg, num_gpus: int = 1):
+ """
+ Args:
+ cfg (CfgNode):
+ num_gpus (int): if 0, will run on CPU
+ """
+ num_workers = max(num_gpus, 1)
+ self.task_queue = mp.Queue(maxsize=num_workers * 3)
+ self.result_queue = mp.Queue(maxsize=num_workers * 3)
+ self.procs = []
+ for gpuid in range(max(num_gpus, 1)):
+ cfg = cfg.clone()
+ cfg.defrost()
+ cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
+ self.procs.append(
+ AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
+ )
+
+ self.put_idx = 0
+ self.get_idx = 0
+ self.result_rank = []
+ self.result_data = []
+
+ for p in self.procs:
+ p.start()
+ atexit.register(self.shutdown)
+
+ def put(self, image):
+ self.put_idx += 1
+ self.task_queue.put((self.put_idx, image))
+
+ def get(self):
+ self.get_idx += 1 # the index needed for this request
+ if len(self.result_rank) and self.result_rank[0] == self.get_idx:
+ res = self.result_data[0]
+ del self.result_data[0], self.result_rank[0]
+ return res
+
+ while True:
+ # make sure the results are returned in the correct order
+ idx, res = self.result_queue.get()
+ if idx == self.get_idx:
+ return res
+ insert = bisect.bisect(self.result_rank, idx)
+ self.result_rank.insert(insert, idx)
+ self.result_data.insert(insert, res)
+
+ def __len__(self):
+ return self.put_idx - self.get_idx
+
+ def __call__(self, image):
+ self.put(image)
+ return self.get()
+
+ def shutdown(self):
+ for _ in self.procs:
+ self.task_queue.put(AsyncPredictor._StopToken())
+
+ @property
+ def default_buffer_size(self):
+ return len(self.procs) * 5
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/__init__.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bdd994b49294485c27610772f97f177741f5518f
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/__init__.py
@@ -0,0 +1,10 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .utils.env import setup_environment
+
+setup_environment()
+
+
+# This line will be programatically read/write by setup.py.
+# Leave them at the bottom of this file and don't touch them.
+__version__ = "0.6"
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/checkpoint/__init__.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/checkpoint/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..99da0469ae7e169d8970e4b642fed3f870076860
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/checkpoint/__init__.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+# File:
+
+
+from . import catalog as _UNUSED # register the handler
+from .detection_checkpoint import DetectionCheckpointer
+from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer
+
+__all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"]
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/checkpoint/c2_model_loading.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/checkpoint/c2_model_loading.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c8d181bd7200bd3fd38446e743f8f16780d6e76
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/checkpoint/c2_model_loading.py
@@ -0,0 +1,407 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import logging
+import re
+from typing import Dict, List
+import torch
+from tabulate import tabulate
+
+
+def convert_basic_c2_names(original_keys):
+ """
+ Apply some basic name conversion to names in C2 weights.
+ It only deals with typical backbone models.
+
+ Args:
+ original_keys (list[str]):
+ Returns:
+ list[str]: The same number of strings matching those in original_keys.
+ """
+ layer_keys = copy.deepcopy(original_keys)
+ layer_keys = [
+ {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
+ ] # some hard-coded mappings
+
+ layer_keys = [k.replace("_", ".") for k in layer_keys]
+ layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
+ layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
+ # Uniform both bn and gn names to "norm"
+ layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
+ layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
+ layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
+
+ # stem
+ layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
+ # to avoid mis-matching with "conv1" in other components (e.g. detection head)
+ layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
+
+ # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
+ # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
+ # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
+ # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
+ # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
+
+ # blocks
+ layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
+ layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
+ layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
+ layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
+
+ # DensePose substitutions
+ layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
+ layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
+ layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
+ layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
+ layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
+ return layer_keys
+
+
+def convert_c2_detectron_names(weights):
+ """
+ Map Caffe2 Detectron weight names to Detectron2 names.
+
+ Args:
+ weights (dict): name -> tensor
+
+ Returns:
+ dict: detectron2 names -> tensor
+ dict: detectron2 names -> C2 names
+ """
+ logger = logging.getLogger(__name__)
+ logger.info("Renaming Caffe2 weights ......")
+ original_keys = sorted(weights.keys())
+ layer_keys = copy.deepcopy(original_keys)
+
+ layer_keys = convert_basic_c2_names(layer_keys)
+
+ # --------------------------------------------------------------------------
+ # RPN hidden representation conv
+ # --------------------------------------------------------------------------
+ # FPN case
+ # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
+ # shared for all other levels, hence the appearance of "fpn2"
+ layer_keys = [
+ k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
+ ]
+ # Non-FPN case
+ layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
+
+ # --------------------------------------------------------------------------
+ # RPN box transformation conv
+ # --------------------------------------------------------------------------
+ # FPN case (see note above about "fpn2")
+ layer_keys = [
+ k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
+ for k in layer_keys
+ ]
+ layer_keys = [
+ k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
+ for k in layer_keys
+ ]
+ # Non-FPN case
+ layer_keys = [
+ k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
+ ]
+ layer_keys = [
+ k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
+ for k in layer_keys
+ ]
+
+ # --------------------------------------------------------------------------
+ # Fast R-CNN box head
+ # --------------------------------------------------------------------------
+ layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
+ layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
+ layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
+ layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
+ # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
+ layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
+
+ # --------------------------------------------------------------------------
+ # FPN lateral and output convolutions
+ # --------------------------------------------------------------------------
+ def fpn_map(name):
+ """
+ Look for keys with the following patterns:
+ 1) Starts with "fpn.inner."
+ Example: "fpn.inner.res2.2.sum.lateral.weight"
+ Meaning: These are lateral pathway convolutions
+ 2) Starts with "fpn.res"
+ Example: "fpn.res2.2.sum.weight"
+ Meaning: These are FPN output convolutions
+ """
+ splits = name.split(".")
+ norm = ".norm" if "norm" in splits else ""
+ if name.startswith("fpn.inner."):
+ # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
+ stage = int(splits[2][len("res") :])
+ return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
+ elif name.startswith("fpn.res"):
+ # splits example: ['fpn', 'res2', '2', 'sum', 'weight']
+ stage = int(splits[1][len("res") :])
+ return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
+ return name
+
+ layer_keys = [fpn_map(k) for k in layer_keys]
+
+ # --------------------------------------------------------------------------
+ # Mask R-CNN mask head
+ # --------------------------------------------------------------------------
+ # roi_heads.StandardROIHeads case
+ layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
+ layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
+ layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
+ # roi_heads.Res5ROIHeads case
+ layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
+
+ # --------------------------------------------------------------------------
+ # Keypoint R-CNN head
+ # --------------------------------------------------------------------------
+ # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
+ layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
+ layer_keys = [
+ k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
+ ]
+ layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
+
+ # --------------------------------------------------------------------------
+ # Done with replacements
+ # --------------------------------------------------------------------------
+ assert len(set(layer_keys)) == len(layer_keys)
+ assert len(original_keys) == len(layer_keys)
+
+ new_weights = {}
+ new_keys_to_original_keys = {}
+ for orig, renamed in zip(original_keys, layer_keys):
+ new_keys_to_original_keys[renamed] = orig
+ if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
+ # remove the meaningless prediction weight for background class
+ new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
+ new_weights[renamed] = weights[orig][new_start_idx:]
+ logger.info(
+ "Remove prediction weight for background class in {}. The shape changes from "
+ "{} to {}.".format(
+ renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
+ )
+ )
+ elif renamed.startswith("cls_score."):
+ # move weights of bg class from original index 0 to last index
+ logger.info(
+ "Move classification weights for background class in {} from index 0 to "
+ "index {}.".format(renamed, weights[orig].shape[0] - 1)
+ )
+ new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
+ else:
+ new_weights[renamed] = weights[orig]
+
+ return new_weights, new_keys_to_original_keys
+
+
+# Note the current matching is not symmetric.
+# it assumes model_state_dict will have longer names.
+def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
+ """
+ Match names between the two state-dict, and returns a new chkpt_state_dict with names
+ converted to match model_state_dict with heuristics. The returned dict can be later
+ loaded with fvcore checkpointer.
+ If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
+ model and will be renamed at first.
+
+ Strategy: suppose that the models that we will create will have prefixes appended
+ to each of its keys, for example due to an extra level of nesting that the original
+ pre-trained weights from ImageNet won't contain. For example, model.state_dict()
+ might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
+ res2.conv1.weight. We thus want to match both parameters together.
+ For that, we look for each model weight, look among all loaded keys if there is one
+ that is a suffix of the current weight name, and use it if that's the case.
+ If multiple matches exist, take the one with longest size
+ of the corresponding name. For example, for the same model as before, the pretrained
+ weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
+ we want to match backbone[0].body.conv1.weight to conv1.weight, and
+ backbone[0].body.res2.conv1.weight to res2.conv1.weight.
+ """
+ model_keys = sorted(model_state_dict.keys())
+ if c2_conversion:
+ ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
+ # original_keys: the name in the original dict (before renaming)
+ else:
+ original_keys = {x: x for x in ckpt_state_dict.keys()}
+ ckpt_keys = sorted(ckpt_state_dict.keys())
+
+ def match(a, b):
+ # Matched ckpt_key should be a complete (starts with '.') suffix.
+ # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
+ # but matches whatever_conv1 or mesh_head.whatever_conv1.
+ return a == b or a.endswith("." + b)
+
+ # get a matrix of string matches, where each (i, j) entry correspond to the size of the
+ # ckpt_key string, if it matches
+ match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
+ match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
+ # use the matched one with longest size in case of multiple matches
+ max_match_size, idxs = match_matrix.max(1)
+ # remove indices that correspond to no-match
+ idxs[max_match_size == 0] = -1
+
+ logger = logging.getLogger(__name__)
+ # matched_pairs (matched checkpoint key --> matched model key)
+ matched_keys = {}
+ result_state_dict = {}
+ for idx_model, idx_ckpt in enumerate(idxs.tolist()):
+ if idx_ckpt == -1:
+ continue
+ key_model = model_keys[idx_model]
+ key_ckpt = ckpt_keys[idx_ckpt]
+ value_ckpt = ckpt_state_dict[key_ckpt]
+ shape_in_model = model_state_dict[key_model].shape
+
+ if shape_in_model != value_ckpt.shape:
+ logger.warning(
+ "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
+ key_ckpt, value_ckpt.shape, key_model, shape_in_model
+ )
+ )
+ logger.warning(
+ "{} will not be loaded. Please double check and see if this is desired.".format(
+ key_ckpt
+ )
+ )
+ continue
+
+ assert key_model not in result_state_dict
+ result_state_dict[key_model] = value_ckpt
+ if key_ckpt in matched_keys: # already added to matched_keys
+ logger.error(
+ "Ambiguity found for {} in checkpoint!"
+ "It matches at least two keys in the model ({} and {}).".format(
+ key_ckpt, key_model, matched_keys[key_ckpt]
+ )
+ )
+ raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
+
+ matched_keys[key_ckpt] = key_model
+
+ # logging:
+ matched_model_keys = sorted(matched_keys.values())
+ if len(matched_model_keys) == 0:
+ logger.warning("No weights in checkpoint matched with model.")
+ return ckpt_state_dict
+ common_prefix = _longest_common_prefix(matched_model_keys)
+ rev_matched_keys = {v: k for k, v in matched_keys.items()}
+ original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
+
+ model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
+ table = []
+ memo = set()
+ for key_model in matched_model_keys:
+ if key_model in memo:
+ continue
+ if key_model in model_key_groups:
+ group = model_key_groups[key_model]
+ memo |= set(group)
+ shapes = [tuple(model_state_dict[k].shape) for k in group]
+ table.append(
+ (
+ _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
+ _group_str([original_keys[k] for k in group]),
+ " ".join([str(x).replace(" ", "") for x in shapes]),
+ )
+ )
+ else:
+ key_checkpoint = original_keys[key_model]
+ shape = str(tuple(model_state_dict[key_model].shape))
+ table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
+ table_str = tabulate(
+ table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
+ )
+ logger.info(
+ "Following weights matched with "
+ + (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
+ + ":\n"
+ + table_str
+ )
+
+ unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
+ for k in unmatched_ckpt_keys:
+ result_state_dict[k] = ckpt_state_dict[k]
+ return result_state_dict
+
+
+def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
+ """
+ Params in the same submodule are grouped together.
+
+ Args:
+ keys: names of all parameters
+ original_names: mapping from parameter name to their name in the checkpoint
+
+ Returns:
+ dict[name -> all other names in the same group]
+ """
+
+ def _submodule_name(key):
+ pos = key.rfind(".")
+ if pos < 0:
+ return None
+ prefix = key[: pos + 1]
+ return prefix
+
+ all_submodules = [_submodule_name(k) for k in keys]
+ all_submodules = [x for x in all_submodules if x]
+ all_submodules = sorted(all_submodules, key=len)
+
+ ret = {}
+ for prefix in all_submodules:
+ group = [k for k in keys if k.startswith(prefix)]
+ if len(group) <= 1:
+ continue
+ original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
+ if len(original_name_lcp) == 0:
+ # don't group weights if original names don't share prefix
+ continue
+
+ for k in group:
+ if k in ret:
+ continue
+ ret[k] = group
+ return ret
+
+
+def _longest_common_prefix(names: List[str]) -> str:
+ """
+ ["abc.zfg", "abc.zef"] -> "abc."
+ """
+ names = [n.split(".") for n in names]
+ m1, m2 = min(names), max(names)
+ ret = [a for a, b in zip(m1, m2) if a == b]
+ ret = ".".join(ret) + "." if len(ret) else ""
+ return ret
+
+
+def _longest_common_prefix_str(names: List[str]) -> str:
+ m1, m2 = min(names), max(names)
+ lcp = [a for a, b in zip(m1, m2) if a == b]
+ lcp = "".join(lcp)
+ return lcp
+
+
+def _group_str(names: List[str]) -> str:
+ """
+ Turn "common1", "common2", "common3" into "common{1,2,3}"
+ """
+ lcp = _longest_common_prefix_str(names)
+ rest = [x[len(lcp) :] for x in names]
+ rest = "{" + ",".join(rest) + "}"
+ ret = lcp + rest
+
+ # add some simplification for BN specifically
+ ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
+ ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
+ return ret
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/checkpoint/catalog.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/checkpoint/catalog.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a85736754a0de4550df96c22f38fc515bd02d71
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/checkpoint/catalog.py
@@ -0,0 +1,115 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+
+from detectron2.utils.file_io import PathHandler, PathManager
+
+
+class ModelCatalog(object):
+ """
+ Store mappings from names to third-party models.
+ """
+
+ S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron"
+
+ # MSRA models have STRIDE_IN_1X1=True. False otherwise.
+ # NOTE: all BN models here have fused BN into an affine layer.
+ # As a result, you should only load them to a model with "FrozenBN".
+ # Loading them to a model with regular BN or SyncBN is wrong.
+ # Even when loaded to FrozenBN, it is still different from affine by an epsilon,
+ # which should be negligible for training.
+ # NOTE: all models here uses PIXEL_STD=[1,1,1]
+ # NOTE: Most of the BN models here are no longer used. We use the
+ # re-converted pre-trained models under detectron2 model zoo instead.
+ C2_IMAGENET_MODELS = {
+ "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
+ "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
+ "FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
+ "FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
+ "FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
+ "FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl",
+ "FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl",
+ }
+
+ C2_DETECTRON_PATH_FORMAT = (
+ "{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl" # noqa B950
+ )
+
+ C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival"
+ C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival"
+
+ # format: {model_name} -> part of the url
+ C2_DETECTRON_MODELS = {
+ "35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950
+ "35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950
+ "35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950
+ "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950
+ "35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950
+ "35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950
+ "35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950
+ "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950
+ "48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950
+ "37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950
+ "35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950
+ "35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950
+ "36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950
+ }
+
+ @staticmethod
+ def get(name):
+ if name.startswith("Caffe2Detectron/COCO"):
+ return ModelCatalog._get_c2_detectron_baseline(name)
+ if name.startswith("ImageNetPretrained/"):
+ return ModelCatalog._get_c2_imagenet_pretrained(name)
+ raise RuntimeError("model not present in the catalog: {}".format(name))
+
+ @staticmethod
+ def _get_c2_imagenet_pretrained(name):
+ prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX
+ name = name[len("ImageNetPretrained/") :]
+ name = ModelCatalog.C2_IMAGENET_MODELS[name]
+ url = "/".join([prefix, name])
+ return url
+
+ @staticmethod
+ def _get_c2_detectron_baseline(name):
+ name = name[len("Caffe2Detectron/COCO/") :]
+ url = ModelCatalog.C2_DETECTRON_MODELS[name]
+ if "keypoint_rcnn" in name:
+ dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS
+ else:
+ dataset = ModelCatalog.C2_DATASET_COCO
+
+ if "35998355/rpn_R-50-C4_1x" in name:
+ # this one model is somehow different from others ..
+ type = "rpn"
+ else:
+ type = "generalized_rcnn"
+
+ # Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`.
+ url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format(
+ prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset
+ )
+ return url
+
+
+class ModelCatalogHandler(PathHandler):
+ """
+ Resolve URL like catalog://.
+ """
+
+ PREFIX = "catalog://"
+
+ def _get_supported_prefixes(self):
+ return [self.PREFIX]
+
+ def _get_local_path(self, path, **kwargs):
+ logger = logging.getLogger(__name__)
+ catalog_path = ModelCatalog.get(path[len(self.PREFIX) :])
+ logger.info("Catalog entry {} points to {}".format(path, catalog_path))
+ return PathManager.get_local_path(catalog_path, **kwargs)
+
+ def _open(self, path, mode="r", **kwargs):
+ return PathManager.open(self._get_local_path(path), mode, **kwargs)
+
+
+PathManager.register_handler(ModelCatalogHandler())
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/checkpoint/detection_checkpoint.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/checkpoint/detection_checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..82fd3b2d40054573917a445b138d29a6dabfb907
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/checkpoint/detection_checkpoint.py
@@ -0,0 +1,120 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+import os
+import pickle
+import torch
+from fvcore.common.checkpoint import Checkpointer
+from torch.nn.parallel import DistributedDataParallel
+
+import detectron2.utils.comm as comm
+from detectron2.utils.file_io import PathManager
+
+from .c2_model_loading import align_and_update_state_dicts
+
+
+class DetectionCheckpointer(Checkpointer):
+ """
+ Same as :class:`Checkpointer`, but is able to:
+ 1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models.
+ 2. correctly load checkpoints that are only available on the master worker
+ """
+
+ def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
+ is_main_process = comm.is_main_process()
+ super().__init__(
+ model,
+ save_dir,
+ save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
+ **checkpointables,
+ )
+ self.path_manager = PathManager
+
+ def load(self, path, *args, **kwargs):
+ need_sync = False
+
+ if path and isinstance(self.model, DistributedDataParallel):
+ logger = logging.getLogger(__name__)
+ path = self.path_manager.get_local_path(path)
+ has_file = os.path.isfile(path)
+ all_has_file = comm.all_gather(has_file)
+ if not all_has_file[0]:
+ raise OSError(f"File {path} not found on main worker.")
+ if not all(all_has_file):
+ logger.warning(
+ f"Not all workers can read checkpoint {path}. "
+ "Training may fail to fully resume."
+ )
+ # TODO: broadcast the checkpoint file contents from main
+ # worker, and load from it instead.
+ need_sync = True
+ if not has_file:
+ path = None # don't load if not readable
+ ret = super().load(path, *args, **kwargs)
+
+ if need_sync:
+ logger.info("Broadcasting model states from main worker ...")
+ self.model._sync_params_and_buffers()
+ return ret
+
+ def _load_file(self, filename):
+ if filename.endswith(".pkl"):
+ with PathManager.open(filename, "rb") as f:
+ data = pickle.load(f, encoding="latin1")
+ if "model" in data and "__author__" in data:
+ # file is in Detectron2 model zoo format
+ self.logger.info("Reading a file from '{}'".format(data["__author__"]))
+ return data
+ else:
+ # assume file is from Caffe2 / Detectron1 model zoo
+ if "blobs" in data:
+ # Detection models have "blobs", but ImageNet models don't
+ data = data["blobs"]
+ data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
+ return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
+ elif filename.endswith(".pyth"):
+ # assume file is from pycls; no one else seems to use the ".pyth" extension
+ with PathManager.open(filename, "rb") as f:
+ data = torch.load(f)
+ assert (
+ "model_state" in data
+ ), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'."
+ model_state = {
+ k: v
+ for k, v in data["model_state"].items()
+ if not k.endswith("num_batches_tracked")
+ }
+ return {"model": model_state, "__author__": "pycls", "matching_heuristics": True}
+
+ loaded = super()._load_file(filename) # load native pth checkpoint
+ if "model" not in loaded:
+ loaded = {"model": loaded}
+ return loaded
+
+ def _load_model(self, checkpoint):
+ if checkpoint.get("matching_heuristics", False):
+ self._convert_ndarray_to_tensor(checkpoint["model"])
+ # convert weights by name-matching heuristics
+ checkpoint["model"] = align_and_update_state_dicts(
+ self.model.state_dict(),
+ checkpoint["model"],
+ c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
+ )
+ # for non-caffe2 models, use standard ways to load it
+ incompatible = super()._load_model(checkpoint)
+
+ model_buffers = dict(self.model.named_buffers(recurse=False))
+ for k in ["pixel_mean", "pixel_std"]:
+ # Ignore missing key message about pixel_mean/std.
+ # Though they may be missing in old checkpoints, they will be correctly
+ # initialized from config anyway.
+ if k in model_buffers:
+ try:
+ incompatible.missing_keys.remove(k)
+ except ValueError:
+ pass
+ for k in incompatible.unexpected_keys[:]:
+ # Ignore unexpected keys about cell anchors. They exist in old checkpoints
+ # but now they are non-persistent buffers and will not be in new checkpoints.
+ if "anchor_generator.cell_anchors" in k:
+ incompatible.unexpected_keys.remove(k)
+ return incompatible
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/config/__init__.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/config/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e648e632d55c70f160d49630378d202fbde4e45
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/config/__init__.py
@@ -0,0 +1,24 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .compat import downgrade_config, upgrade_config
+from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable
+from .instantiate import instantiate
+from .lazy import LazyCall, LazyConfig
+
+__all__ = [
+ "CfgNode",
+ "get_cfg",
+ "global_cfg",
+ "set_global_cfg",
+ "downgrade_config",
+ "upgrade_config",
+ "configurable",
+ "instantiate",
+ "LazyCall",
+ "LazyConfig",
+]
+
+
+from detectron2.utils.env import fixup_module_metadata
+
+fixup_module_metadata(__name__, globals(), __all__)
+del fixup_module_metadata
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/config/compat.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/config/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..11a08c439bf14defd880e37a938fab8a08e68eeb
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/config/compat.py
@@ -0,0 +1,229 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+"""
+Backward compatibility of configs.
+
+Instructions to bump version:
++ It's not needed to bump version if new keys are added.
+ It's only needed when backward-incompatible changes happen
+ (i.e., some existing keys disappear, or the meaning of a key changes)
++ To bump version, do the following:
+ 1. Increment _C.VERSION in defaults.py
+ 2. Add a converter in this file.
+
+ Each ConverterVX has a function "upgrade" which in-place upgrades config from X-1 to X,
+ and a function "downgrade" which in-place downgrades config from X to X-1
+
+ In each function, VERSION is left unchanged.
+
+ Each converter assumes that its input has the relevant keys
+ (i.e., the input is not a partial config).
+ 3. Run the tests (test_config.py) to make sure the upgrade & downgrade
+ functions are consistent.
+"""
+
+import logging
+from typing import List, Optional, Tuple
+
+from .config import CfgNode as CN
+from .defaults import _C
+
+__all__ = ["upgrade_config", "downgrade_config"]
+
+
+def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN:
+ """
+ Upgrade a config from its current version to a newer version.
+
+ Args:
+ cfg (CfgNode):
+ to_version (int): defaults to the latest version.
+ """
+ cfg = cfg.clone()
+ if to_version is None:
+ to_version = _C.VERSION
+
+ assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format(
+ cfg.VERSION, to_version
+ )
+ for k in range(cfg.VERSION, to_version):
+ converter = globals()["ConverterV" + str(k + 1)]
+ converter.upgrade(cfg)
+ cfg.VERSION = k + 1
+ return cfg
+
+
+def downgrade_config(cfg: CN, to_version: int) -> CN:
+ """
+ Downgrade a config from its current version to an older version.
+
+ Args:
+ cfg (CfgNode):
+ to_version (int):
+
+ Note:
+ A general downgrade of arbitrary configs is not always possible due to the
+ different functionalities in different versions.
+ The purpose of downgrade is only to recover the defaults in old versions,
+ allowing it to load an old partial yaml config.
+ Therefore, the implementation only needs to fill in the default values
+ in the old version when a general downgrade is not possible.
+ """
+ cfg = cfg.clone()
+ assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format(
+ cfg.VERSION, to_version
+ )
+ for k in range(cfg.VERSION, to_version, -1):
+ converter = globals()["ConverterV" + str(k)]
+ converter.downgrade(cfg)
+ cfg.VERSION = k - 1
+ return cfg
+
+
+def guess_version(cfg: CN, filename: str) -> int:
+ """
+ Guess the version of a partial config where the VERSION field is not specified.
+ Returns the version, or the latest if cannot make a guess.
+
+ This makes it easier for users to migrate.
+ """
+ logger = logging.getLogger(__name__)
+
+ def _has(name: str) -> bool:
+ cur = cfg
+ for n in name.split("."):
+ if n not in cur:
+ return False
+ cur = cur[n]
+ return True
+
+ # Most users' partial configs have "MODEL.WEIGHT", so guess on it
+ ret = None
+ if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"):
+ ret = 1
+
+ if ret is not None:
+ logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret))
+ else:
+ ret = _C.VERSION
+ logger.warning(
+ "Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format(
+ filename, ret
+ )
+ )
+ return ret
+
+
+def _rename(cfg: CN, old: str, new: str) -> None:
+ old_keys = old.split(".")
+ new_keys = new.split(".")
+
+ def _set(key_seq: List[str], val: str) -> None:
+ cur = cfg
+ for k in key_seq[:-1]:
+ if k not in cur:
+ cur[k] = CN()
+ cur = cur[k]
+ cur[key_seq[-1]] = val
+
+ def _get(key_seq: List[str]) -> CN:
+ cur = cfg
+ for k in key_seq:
+ cur = cur[k]
+ return cur
+
+ def _del(key_seq: List[str]) -> None:
+ cur = cfg
+ for k in key_seq[:-1]:
+ cur = cur[k]
+ del cur[key_seq[-1]]
+ if len(cur) == 0 and len(key_seq) > 1:
+ _del(key_seq[:-1])
+
+ _set(new_keys, _get(old_keys))
+ _del(old_keys)
+
+
+class _RenameConverter:
+ """
+ A converter that handles simple rename.
+ """
+
+ RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name)
+
+ @classmethod
+ def upgrade(cls, cfg: CN) -> None:
+ for old, new in cls.RENAME:
+ _rename(cfg, old, new)
+
+ @classmethod
+ def downgrade(cls, cfg: CN) -> None:
+ for old, new in cls.RENAME[::-1]:
+ _rename(cfg, new, old)
+
+
+class ConverterV1(_RenameConverter):
+ RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")]
+
+
+class ConverterV2(_RenameConverter):
+ """
+ A large bulk of rename, before public release.
+ """
+
+ RENAME = [
+ ("MODEL.WEIGHT", "MODEL.WEIGHTS"),
+ ("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"),
+ ("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"),
+ ("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"),
+ ("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"),
+ (
+ "MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD",
+ "MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH",
+ ),
+ (
+ "MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT",
+ "MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT",
+ ),
+ (
+ "MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD",
+ "MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH",
+ ),
+ ("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"),
+ ("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"),
+ ("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"),
+ ("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"),
+ ("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"),
+ ("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"),
+ ("TEST.AUG_ON", "TEST.AUG.ENABLED"),
+ ("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"),
+ ("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"),
+ ("TEST.AUG_FLIP", "TEST.AUG.FLIP"),
+ ]
+
+ @classmethod
+ def upgrade(cls, cfg: CN) -> None:
+ super().upgrade(cfg)
+
+ if cfg.MODEL.META_ARCHITECTURE == "RetinaNet":
+ _rename(
+ cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS"
+ )
+ _rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
+ del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"]
+ del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"]
+ else:
+ _rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS")
+ _rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
+ del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"]
+ del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"]
+ del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"]
+
+ @classmethod
+ def downgrade(cls, cfg: CN) -> None:
+ super().downgrade(cfg)
+
+ _rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS")
+ _rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES")
+ cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS
+ cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES
+ cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/config/config.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/config/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..49a55b1bc87509e2bb24b902ae12c21d5aaeda81
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/config/config.py
@@ -0,0 +1,265 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import functools
+import inspect
+import logging
+from fvcore.common.config import CfgNode as _CfgNode
+
+from detectron2.utils.file_io import PathManager
+
+
+class CfgNode(_CfgNode):
+ """
+ The same as `fvcore.common.config.CfgNode`, but different in:
+
+ 1. Use unsafe yaml loading by default.
+ Note that this may lead to arbitrary code execution: you must not
+ load a config file from untrusted sources before manually inspecting
+ the content of the file.
+ 2. Support config versioning.
+ When attempting to merge an old config, it will convert the old config automatically.
+
+ .. automethod:: clone
+ .. automethod:: freeze
+ .. automethod:: defrost
+ .. automethod:: is_frozen
+ .. automethod:: load_yaml_with_base
+ .. automethod:: merge_from_list
+ .. automethod:: merge_from_other_cfg
+ """
+
+ @classmethod
+ def _open_cfg(cls, filename):
+ return PathManager.open(filename, "r")
+
+ # Note that the default value of allow_unsafe is changed to True
+ def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
+ """
+ Load content from the given config file and merge it into self.
+
+ Args:
+ cfg_filename: config filename
+ allow_unsafe: allow unsafe yaml syntax
+ """
+ assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
+ loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
+ loaded_cfg = type(self)(loaded_cfg)
+
+ # defaults.py needs to import CfgNode
+ from .defaults import _C
+
+ latest_ver = _C.VERSION
+ assert (
+ latest_ver == self.VERSION
+ ), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
+
+ logger = logging.getLogger(__name__)
+
+ loaded_ver = loaded_cfg.get("VERSION", None)
+ if loaded_ver is None:
+ from .compat import guess_version
+
+ loaded_ver = guess_version(loaded_cfg, cfg_filename)
+ assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
+ loaded_ver, self.VERSION
+ )
+
+ if loaded_ver == self.VERSION:
+ self.merge_from_other_cfg(loaded_cfg)
+ else:
+ # compat.py needs to import CfgNode
+ from .compat import upgrade_config, downgrade_config
+
+ logger.warning(
+ "Loading an old v{} config file '{}' by automatically upgrading to v{}. "
+ "See docs/CHANGELOG.md for instructions to update your files.".format(
+ loaded_ver, cfg_filename, self.VERSION
+ )
+ )
+ # To convert, first obtain a full config at an old version
+ old_self = downgrade_config(self, to_version=loaded_ver)
+ old_self.merge_from_other_cfg(loaded_cfg)
+ new_config = upgrade_config(old_self)
+ self.clear()
+ self.update(new_config)
+
+ def dump(self, *args, **kwargs):
+ """
+ Returns:
+ str: a yaml string representation of the config
+ """
+ # to make it show up in docs
+ return super().dump(*args, **kwargs)
+
+
+global_cfg = CfgNode()
+
+
+def get_cfg() -> CfgNode:
+ """
+ Get a copy of the default config.
+
+ Returns:
+ a detectron2 CfgNode instance.
+ """
+ from .defaults import _C
+
+ return _C.clone()
+
+
+def set_global_cfg(cfg: CfgNode) -> None:
+ """
+ Let the global config point to the given cfg.
+
+ Assume that the given "cfg" has the key "KEY", after calling
+ `set_global_cfg(cfg)`, the key can be accessed by:
+ ::
+ from detectron2.config import global_cfg
+ print(global_cfg.KEY)
+
+ By using a hacky global config, you can access these configs anywhere,
+ without having to pass the config object or the values deep into the code.
+ This is a hacky feature introduced for quick prototyping / research exploration.
+ """
+ global global_cfg
+ global_cfg.clear()
+ global_cfg.update(cfg)
+
+
+def configurable(init_func=None, *, from_config=None):
+ """
+ Decorate a function or a class's __init__ method so that it can be called
+ with a :class:`CfgNode` object using a :func:`from_config` function that translates
+ :class:`CfgNode` to arguments.
+
+ Examples:
+ ::
+ # Usage 1: Decorator on __init__:
+ class A:
+ @configurable
+ def __init__(self, a, b=2, c=3):
+ pass
+
+ @classmethod
+ def from_config(cls, cfg): # 'cfg' must be the first argument
+ # Returns kwargs to be passed to __init__
+ return {"a": cfg.A, "b": cfg.B}
+
+ a1 = A(a=1, b=2) # regular construction
+ a2 = A(cfg) # construct with a cfg
+ a3 = A(cfg, b=3, c=4) # construct with extra overwrite
+
+ # Usage 2: Decorator on any function. Needs an extra from_config argument:
+ @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
+ def a_func(a, b=2, c=3):
+ pass
+
+ a1 = a_func(a=1, b=2) # regular call
+ a2 = a_func(cfg) # call with a cfg
+ a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
+
+ Args:
+ init_func (callable): a class's ``__init__`` method in usage 1. The
+ class must have a ``from_config`` classmethod which takes `cfg` as
+ the first argument.
+ from_config (callable): the from_config function in usage 2. It must take `cfg`
+ as its first argument.
+ """
+
+ if init_func is not None:
+ assert (
+ inspect.isfunction(init_func)
+ and from_config is None
+ and init_func.__name__ == "__init__"
+ ), "Incorrect use of @configurable. Check API documentation for examples."
+
+ @functools.wraps(init_func)
+ def wrapped(self, *args, **kwargs):
+ try:
+ from_config_func = type(self).from_config
+ except AttributeError as e:
+ raise AttributeError(
+ "Class with @configurable must have a 'from_config' classmethod."
+ ) from e
+ if not inspect.ismethod(from_config_func):
+ raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
+
+ if _called_with_cfg(*args, **kwargs):
+ explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
+ init_func(self, **explicit_args)
+ else:
+ init_func(self, *args, **kwargs)
+
+ return wrapped
+
+ else:
+ if from_config is None:
+ return configurable # @configurable() is made equivalent to @configurable
+ assert inspect.isfunction(
+ from_config
+ ), "from_config argument of configurable must be a function!"
+
+ def wrapper(orig_func):
+ @functools.wraps(orig_func)
+ def wrapped(*args, **kwargs):
+ if _called_with_cfg(*args, **kwargs):
+ explicit_args = _get_args_from_config(from_config, *args, **kwargs)
+ return orig_func(**explicit_args)
+ else:
+ return orig_func(*args, **kwargs)
+
+ wrapped.from_config = from_config
+ return wrapped
+
+ return wrapper
+
+
+def _get_args_from_config(from_config_func, *args, **kwargs):
+ """
+ Use `from_config` to obtain explicit arguments.
+
+ Returns:
+ dict: arguments to be used for cls.__init__
+ """
+ signature = inspect.signature(from_config_func)
+ if list(signature.parameters.keys())[0] != "cfg":
+ if inspect.isfunction(from_config_func):
+ name = from_config_func.__name__
+ else:
+ name = f"{from_config_func.__self__}.from_config"
+ raise TypeError(f"{name} must take 'cfg' as the first argument!")
+ support_var_arg = any(
+ param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
+ for param in signature.parameters.values()
+ )
+ if support_var_arg: # forward all arguments to from_config, if from_config accepts them
+ ret = from_config_func(*args, **kwargs)
+ else:
+ # forward supported arguments to from_config
+ supported_arg_names = set(signature.parameters.keys())
+ extra_kwargs = {}
+ for name in list(kwargs.keys()):
+ if name not in supported_arg_names:
+ extra_kwargs[name] = kwargs.pop(name)
+ ret = from_config_func(*args, **kwargs)
+ # forward the other arguments to __init__
+ ret.update(extra_kwargs)
+ return ret
+
+
+def _called_with_cfg(*args, **kwargs):
+ """
+ Returns:
+ bool: whether the arguments contain CfgNode and should be considered
+ forwarded to from_config.
+ """
+ from omegaconf import DictConfig
+
+ if len(args) and isinstance(args[0], (_CfgNode, DictConfig)):
+ return True
+ if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)):
+ return True
+ # `from_config`'s first argument is forced to be "cfg".
+ # So the above check covers all cases.
+ return False
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/config/defaults.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/config/defaults.py
new file mode 100644
index 0000000000000000000000000000000000000000..848486dfe91a62559e6ae35120a4dac26d4bd66d
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/config/defaults.py
@@ -0,0 +1,635 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .config import CfgNode as CN
+
+# NOTE: given the new config system
+# (https://detectron2.readthedocs.io/en/latest/tutorials/lazyconfigs.html),
+# we will stop adding new functionalities to default CfgNode.
+
+# -----------------------------------------------------------------------------
+# Convention about Training / Test specific parameters
+# -----------------------------------------------------------------------------
+# Whenever an argument can be either used for training or for testing, the
+# corresponding name will be post-fixed by a _TRAIN for a training parameter,
+# or _TEST for a test-specific parameter.
+# For example, the number of images during training will be
+# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
+# IMAGES_PER_BATCH_TEST
+
+# -----------------------------------------------------------------------------
+# Config definition
+# -----------------------------------------------------------------------------
+
+_C = CN()
+
+# The version number, to upgrade from old configs to new ones if any
+# changes happen. It's recommended to keep a VERSION in your config file.
+_C.VERSION = 2
+
+_C.MODEL = CN()
+_C.MODEL.LOAD_PROPOSALS = False
+_C.MODEL.MASK_ON = False
+_C.MODEL.KEYPOINT_ON = False
+_C.MODEL.DEVICE = "cuda"
+_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
+
+# Path (a file path, or URL like detectron2://.., https://..) to a checkpoint file
+# to be loaded to the model. You can find available models in the model zoo.
+_C.MODEL.WEIGHTS = ""
+
+# Values to be used for image normalization (BGR order, since INPUT.FORMAT defaults to BGR).
+# To train on images of different number of channels, just set different mean & std.
+# Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]
+_C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675]
+# When using pre-trained models in Detectron1 or any MSRA models,
+# std has been absorbed into its conv1 weights, so the std needs to be set 1.
+# Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)
+_C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0]
+
+
+# -----------------------------------------------------------------------------
+# INPUT
+# -----------------------------------------------------------------------------
+_C.INPUT = CN()
+# By default, {MIN,MAX}_SIZE options are used in transforms.ResizeShortestEdge.
+# Please refer to ResizeShortestEdge for detailed definition.
+# Size of the smallest side of the image during training
+_C.INPUT.MIN_SIZE_TRAIN = (800,)
+# Sample size of smallest side by choice or random selection from range give by
+# INPUT.MIN_SIZE_TRAIN
+_C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice"
+# Maximum size of the side of the image during training
+_C.INPUT.MAX_SIZE_TRAIN = 1333
+# Size of the smallest side of the image during testing. Set to zero to disable resize in testing.
+_C.INPUT.MIN_SIZE_TEST = 800
+# Maximum size of the side of the image during testing
+_C.INPUT.MAX_SIZE_TEST = 1333
+# Mode for flipping images used in data augmentation during training
+# choose one of ["horizontal, "vertical", "none"]
+_C.INPUT.RANDOM_FLIP = "horizontal"
+
+# `True` if cropping is used for data augmentation during training
+_C.INPUT.CROP = CN({"ENABLED": False})
+# Cropping type. See documentation of `detectron2.data.transforms.RandomCrop` for explanation.
+_C.INPUT.CROP.TYPE = "relative_range"
+# Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of
+# pixels if CROP.TYPE is "absolute"
+_C.INPUT.CROP.SIZE = [0.9, 0.9]
+
+
+# Whether the model needs RGB, YUV, HSV etc.
+# Should be one of the modes defined here, as we use PIL to read the image:
+# https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes
+# with BGR being the one exception. One can set image format to BGR, we will
+# internally use RGB for conversion and flip the channels over
+_C.INPUT.FORMAT = "BGR"
+# The ground truth mask format that the model will use.
+# Mask R-CNN supports either "polygon" or "bitmask" as ground truth.
+_C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask"
+
+
+# -----------------------------------------------------------------------------
+# Dataset
+# -----------------------------------------------------------------------------
+_C.DATASETS = CN()
+# List of the dataset names for training. Must be registered in DatasetCatalog
+# Samples from these datasets will be merged and used as one dataset.
+_C.DATASETS.TRAIN = ()
+# List of the pre-computed proposal files for training, which must be consistent
+# with datasets listed in DATASETS.TRAIN.
+_C.DATASETS.PROPOSAL_FILES_TRAIN = ()
+# Number of top scoring precomputed proposals to keep for training
+_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000
+# List of the dataset names for testing. Must be registered in DatasetCatalog
+_C.DATASETS.TEST = ()
+# List of the pre-computed proposal files for test, which must be consistent
+# with datasets listed in DATASETS.TEST.
+_C.DATASETS.PROPOSAL_FILES_TEST = ()
+# Number of top scoring precomputed proposals to keep for test
+_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000
+
+# -----------------------------------------------------------------------------
+# DataLoader
+# -----------------------------------------------------------------------------
+_C.DATALOADER = CN()
+# Number of data loading threads
+_C.DATALOADER.NUM_WORKERS = 4
+# If True, each batch should contain only images for which the aspect ratio
+# is compatible. This groups portrait images together, and landscape images
+# are not batched with portrait images.
+_C.DATALOADER.ASPECT_RATIO_GROUPING = True
+# Options: TrainingSampler, RepeatFactorTrainingSampler
+_C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler"
+# Repeat threshold for RepeatFactorTrainingSampler
+_C.DATALOADER.REPEAT_THRESHOLD = 0.0
+# Tf True, when working on datasets that have instance annotations, the
+# training dataloader will filter out images without associated annotations
+_C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True
+
+# ---------------------------------------------------------------------------- #
+# Backbone options
+# ---------------------------------------------------------------------------- #
+_C.MODEL.BACKBONE = CN()
+
+_C.MODEL.BACKBONE.NAME = "build_resnet_backbone"
+# Freeze the first several stages so they are not trained.
+# There are 5 stages in ResNet. The first is a convolution, and the following
+# stages are each group of residual blocks.
+_C.MODEL.BACKBONE.FREEZE_AT = 2
+
+
+# ---------------------------------------------------------------------------- #
+# FPN options
+# ---------------------------------------------------------------------------- #
+_C.MODEL.FPN = CN()
+# Names of the input feature maps to be used by FPN
+# They must have contiguous power of 2 strides
+# e.g., ["res2", "res3", "res4", "res5"]
+_C.MODEL.FPN.IN_FEATURES = []
+_C.MODEL.FPN.OUT_CHANNELS = 256
+
+# Options: "" (no norm), "GN"
+_C.MODEL.FPN.NORM = ""
+
+# Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg"
+_C.MODEL.FPN.FUSE_TYPE = "sum"
+
+
+# ---------------------------------------------------------------------------- #
+# Proposal generator options
+# ---------------------------------------------------------------------------- #
+_C.MODEL.PROPOSAL_GENERATOR = CN()
+# Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals"
+_C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN"
+# Proposal height and width both need to be greater than MIN_SIZE
+# (a the scale used during training or inference)
+_C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0
+
+
+# ---------------------------------------------------------------------------- #
+# Anchor generator options
+# ---------------------------------------------------------------------------- #
+_C.MODEL.ANCHOR_GENERATOR = CN()
+# The generator can be any name in the ANCHOR_GENERATOR registry
+_C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator"
+# Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input.
+# Format: list[list[float]]. SIZES[i] specifies the list of sizes to use for
+# IN_FEATURES[i]; len(SIZES) must be equal to len(IN_FEATURES) or 1.
+# When len(SIZES) == 1, SIZES[0] is used for all IN_FEATURES.
+_C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]]
+# Anchor aspect ratios. For each area given in `SIZES`, anchors with different aspect
+# ratios are generated by an anchor generator.
+# Format: list[list[float]]. ASPECT_RATIOS[i] specifies the list of aspect ratios (H/W)
+# to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true,
+# or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used
+# for all IN_FEATURES.
+_C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]
+# Anchor angles.
+# list[list[float]], the angle in degrees, for each input feature map.
+# ANGLES[i] specifies the list of angles for IN_FEATURES[i].
+_C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]]
+# Relative offset between the center of the first anchor and the top-left corner of the image
+# Value has to be in [0, 1). Recommend to use 0.5, which means half stride.
+# The value is not expected to affect model accuracy.
+_C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0
+
+# ---------------------------------------------------------------------------- #
+# RPN options
+# ---------------------------------------------------------------------------- #
+_C.MODEL.RPN = CN()
+_C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY
+
+# Names of the input feature maps to be used by RPN
+# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN
+_C.MODEL.RPN.IN_FEATURES = ["res4"]
+# Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels
+# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
+_C.MODEL.RPN.BOUNDARY_THRESH = -1
+# IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD]
+# Minimum overlap required between an anchor and ground-truth box for the
+# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
+# ==> positive RPN example: 1)
+# Maximum overlap allowed between an anchor and ground-truth box for the
+# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
+# ==> negative RPN example: 0)
+# Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD)
+# are ignored (-1)
+_C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7]
+_C.MODEL.RPN.IOU_LABELS = [0, -1, 1]
+# Number of regions per image used to train RPN
+_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
+# Target fraction of foreground (positive) examples per RPN minibatch
+_C.MODEL.RPN.POSITIVE_FRACTION = 0.5
+# Options are: "smooth_l1", "giou", "diou", "ciou"
+_C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1"
+_C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0
+# Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets
+_C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
+# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
+_C.MODEL.RPN.SMOOTH_L1_BETA = 0.0
+_C.MODEL.RPN.LOSS_WEIGHT = 1.0
+# Number of top scoring RPN proposals to keep before applying NMS
+# When FPN is used, this is *per FPN level* (not total)
+_C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000
+_C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000
+# Number of top scoring RPN proposals to keep after applying NMS
+# When FPN is used, this limit is applied per level and then again to the union
+# of proposals from all levels
+# NOTE: When FPN is used, the meaning of this config is different from Detectron1.
+# It means per-batch topk in Detectron1, but per-image topk here.
+# See the "find_top_rpn_proposals" function for details.
+_C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000
+_C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000
+# NMS threshold used on RPN proposals
+_C.MODEL.RPN.NMS_THRESH = 0.7
+# Set this to -1 to use the same number of output channels as input channels.
+_C.MODEL.RPN.CONV_DIMS = [-1]
+
+# ---------------------------------------------------------------------------- #
+# ROI HEADS options
+# ---------------------------------------------------------------------------- #
+_C.MODEL.ROI_HEADS = CN()
+_C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads"
+# Number of foreground classes
+_C.MODEL.ROI_HEADS.NUM_CLASSES = 80
+# Names of the input feature maps to be used by ROI heads
+# Currently all heads (box, mask, ...) use the same input feature map list
+# e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN
+_C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"]
+# IOU overlap ratios [IOU_THRESHOLD]
+# Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD)
+# Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD)
+_C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5]
+_C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1]
+# RoI minibatch size *per image* (number of regions of interest [ROIs]) during training
+# Total number of RoIs per training minibatch =
+# ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH
+# E.g., a common configuration is: 512 * 16 = 8192
+_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
+# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
+_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
+
+# Only used on test mode
+
+# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
+# balance obtaining high recall with not having too many low precision
+# detections that will slow down inference post processing steps (like NMS)
+# A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down
+# inference.
+_C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05
+# Overlap threshold used for non-maximum suppression (suppress boxes with
+# IoU >= this threshold)
+_C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5
+# If True, augment proposals with ground-truth boxes before sampling proposals to
+# train ROI heads.
+_C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True
+
+# ---------------------------------------------------------------------------- #
+# Box Head
+# ---------------------------------------------------------------------------- #
+_C.MODEL.ROI_BOX_HEAD = CN()
+# C4 don't use head name option
+# Options for non-C4 models: FastRCNNConvFCHead,
+_C.MODEL.ROI_BOX_HEAD.NAME = ""
+# Options are: "smooth_l1", "giou", "diou", "ciou"
+_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1"
+# The final scaling coefficient on the box regression loss, used to balance the magnitude of its
+# gradients with other losses in the model. See also `MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT`.
+_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0
+# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
+# These are empirically chosen to approximately lead to unit variance targets
+_C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0)
+# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
+_C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0
+_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
+_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
+# Type of pooling operation applied to the incoming feature map for each RoI
+_C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
+
+_C.MODEL.ROI_BOX_HEAD.NUM_FC = 0
+# Hidden layer dimension for FC layers in the RoI box head
+_C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024
+_C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0
+# Channel dimension for Conv layers in the RoI box head
+_C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256
+# Normalization method for the convolution layers.
+# Options: "" (no norm), "GN", "SyncBN".
+_C.MODEL.ROI_BOX_HEAD.NORM = ""
+# Whether to use class agnostic for bbox regression
+_C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False
+# If true, RoI heads use bounding boxes predicted by the box head rather than proposal boxes.
+_C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False
+
+# ---------------------------------------------------------------------------- #
+# Cascaded Box Head
+# ---------------------------------------------------------------------------- #
+_C.MODEL.ROI_BOX_CASCADE_HEAD = CN()
+# The number of cascade stages is implicitly defined by the length of the following two configs.
+_C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = (
+ (10.0, 10.0, 5.0, 5.0),
+ (20.0, 20.0, 10.0, 10.0),
+ (30.0, 30.0, 15.0, 15.0),
+)
+_C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7)
+
+
+# ---------------------------------------------------------------------------- #
+# Mask Head
+# ---------------------------------------------------------------------------- #
+_C.MODEL.ROI_MASK_HEAD = CN()
+_C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead"
+_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
+_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
+_C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head
+_C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256
+# Normalization method for the convolution layers.
+# Options: "" (no norm), "GN", "SyncBN".
+_C.MODEL.ROI_MASK_HEAD.NORM = ""
+# Whether to use class agnostic for mask prediction
+_C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False
+# Type of pooling operation applied to the incoming feature map for each RoI
+_C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2"
+
+
+# ---------------------------------------------------------------------------- #
+# Keypoint Head
+# ---------------------------------------------------------------------------- #
+_C.MODEL.ROI_KEYPOINT_HEAD = CN()
+_C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead"
+_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14
+_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0
+_C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8))
+_C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO.
+
+# Images with too few (or no) keypoints are excluded from training.
+_C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1
+# Normalize by the total number of visible keypoints in the minibatch if True.
+# Otherwise, normalize by the total number of keypoints that could ever exist
+# in the minibatch.
+# The keypoint softmax loss is only calculated on visible keypoints.
+# Since the number of visible keypoints can vary significantly between
+# minibatches, this has the effect of up-weighting the importance of
+# minibatches with few visible keypoints. (Imagine the extreme case of
+# only one visible keypoint versus N: in the case of N, each one
+# contributes 1/N to the gradient compared to the single keypoint
+# determining the gradient direction). Instead, we can normalize the
+# loss by the total number of keypoints, if it were the case that all
+# keypoints were visible in a full minibatch. (Returning to the example,
+# this means that the one visible keypoint contributes as much as each
+# of the N keypoints.)
+_C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True
+# Multi-task loss weight to use for keypoints
+# Recommended values:
+# - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True
+# - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False
+_C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0
+# Type of pooling operation applied to the incoming feature map for each RoI
+_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2"
+
+# ---------------------------------------------------------------------------- #
+# Semantic Segmentation Head
+# ---------------------------------------------------------------------------- #
+_C.MODEL.SEM_SEG_HEAD = CN()
+_C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead"
+_C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"]
+# Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for
+# the correposnding pixel.
+_C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255
+# Number of classes in the semantic segmentation head
+_C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54
+# Number of channels in the 3x3 convs inside semantic-FPN heads.
+_C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128
+# Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride.
+_C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4
+# Normalization method for the convolution layers. Options: "" (no norm), "GN".
+_C.MODEL.SEM_SEG_HEAD.NORM = "GN"
+_C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0
+
+_C.MODEL.PANOPTIC_FPN = CN()
+# Scaling of all losses from instance detection / segmentation head.
+_C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0
+
+# options when combining instance & semantic segmentation outputs
+_C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) # "COMBINE.ENABLED" is deprecated & not used
+_C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5
+_C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096
+_C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5
+
+
+# ---------------------------------------------------------------------------- #
+# RetinaNet Head
+# ---------------------------------------------------------------------------- #
+_C.MODEL.RETINANET = CN()
+
+# This is the number of foreground classes.
+_C.MODEL.RETINANET.NUM_CLASSES = 80
+
+_C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
+
+# Convolutions to use in the cls and bbox tower
+# NOTE: this doesn't include the last conv for logits
+_C.MODEL.RETINANET.NUM_CONVS = 4
+
+# IoU overlap ratio [bg, fg] for labeling anchors.
+# Anchors with < bg are labeled negative (0)
+# Anchors with >= bg and < fg are ignored (-1)
+# Anchors with >= fg are labeled positive (1)
+_C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5]
+_C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1]
+
+# Prior prob for rare case (i.e. foreground) at the beginning of training.
+# This is used to set the bias for the logits layer of the classifier subnet.
+# This improves training stability in the case of heavy class imbalance.
+_C.MODEL.RETINANET.PRIOR_PROB = 0.01
+
+# Inference cls score threshold, only anchors with score > INFERENCE_TH are
+# considered for inference (to improve speed)
+_C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05
+# Select topk candidates before NMS
+_C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000
+_C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5
+
+# Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets
+_C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
+
+# Loss parameters
+_C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0
+_C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25
+_C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1
+# Options are: "smooth_l1", "giou", "diou", "ciou"
+_C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1"
+
+# One of BN, SyncBN, FrozenBN, GN
+# Only supports GN until unshared norm is implemented
+_C.MODEL.RETINANET.NORM = ""
+
+
+# ---------------------------------------------------------------------------- #
+# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
+# Note that parts of a resnet may be used for both the backbone and the head
+# These options apply to both
+# ---------------------------------------------------------------------------- #
+_C.MODEL.RESNETS = CN()
+
+_C.MODEL.RESNETS.DEPTH = 50
+_C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone
+
+# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
+_C.MODEL.RESNETS.NUM_GROUPS = 1
+
+# Options: FrozenBN, GN, "SyncBN", "BN"
+_C.MODEL.RESNETS.NORM = "FrozenBN"
+
+# Baseline width of each group.
+# Scaling this parameters will scale the width of all bottleneck layers.
+_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
+
+# Place the stride 2 conv on the 1x1 filter
+# Use True only for the original MSRA ResNet; use False for C2 and Torch models
+_C.MODEL.RESNETS.STRIDE_IN_1X1 = True
+
+# Apply dilation in stage "res5"
+_C.MODEL.RESNETS.RES5_DILATION = 1
+
+# Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet
+# For R18 and R34, this needs to be set to 64
+_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
+_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
+
+# Apply Deformable Convolution in stages
+# Specify if apply deform_conv on Res2, Res3, Res4, Res5
+_C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False]
+# Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168);
+# Use False for DeformableV1.
+_C.MODEL.RESNETS.DEFORM_MODULATED = False
+# Number of groups in deformable conv.
+_C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1
+
+
+# ---------------------------------------------------------------------------- #
+# Solver
+# ---------------------------------------------------------------------------- #
+_C.SOLVER = CN()
+
+# Options: WarmupMultiStepLR, WarmupCosineLR.
+# See detectron2/solver/build.py for definition.
+_C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR"
+
+_C.SOLVER.MAX_ITER = 40000
+
+_C.SOLVER.BASE_LR = 0.001
+
+_C.SOLVER.MOMENTUM = 0.9
+
+_C.SOLVER.NESTEROV = False
+
+_C.SOLVER.WEIGHT_DECAY = 0.0001
+# The weight decay that's applied to parameters of normalization layers
+# (typically the affine transformation)
+_C.SOLVER.WEIGHT_DECAY_NORM = 0.0
+
+_C.SOLVER.GAMMA = 0.1
+# The iteration number to decrease learning rate by GAMMA.
+_C.SOLVER.STEPS = (30000,)
+
+_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000
+_C.SOLVER.WARMUP_ITERS = 1000
+_C.SOLVER.WARMUP_METHOD = "linear"
+
+# Save a checkpoint after every this number of iterations
+_C.SOLVER.CHECKPOINT_PERIOD = 5000
+
+# Number of images per batch across all machines. This is also the number
+# of training images per step (i.e. per iteration). If we use 16 GPUs
+# and IMS_PER_BATCH = 32, each GPU will see 2 images per batch.
+# May be adjusted automatically if REFERENCE_WORLD_SIZE is set.
+_C.SOLVER.IMS_PER_BATCH = 16
+
+# The reference number of workers (GPUs) this config is meant to train with.
+# It takes no effect when set to 0.
+# With a non-zero value, it will be used by DefaultTrainer to compute a desired
+# per-worker batch size, and then scale the other related configs (total batch size,
+# learning rate, etc) to match the per-worker batch size.
+# See documentation of `DefaultTrainer.auto_scale_workers` for details:
+_C.SOLVER.REFERENCE_WORLD_SIZE = 0
+
+# Detectron v1 (and previous detection code) used a 2x higher LR and 0 WD for
+# biases. This is not useful (at least for recent models). You should avoid
+# changing these and they exist only to reproduce Detectron v1 training if
+# desired.
+_C.SOLVER.BIAS_LR_FACTOR = 1.0
+_C.SOLVER.WEIGHT_DECAY_BIAS = None # None means following WEIGHT_DECAY
+
+# Gradient clipping
+_C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False})
+# Type of gradient clipping, currently 2 values are supported:
+# - "value": the absolute values of elements of each gradients are clipped
+# - "norm": the norm of the gradient for each parameter is clipped thus
+# affecting all elements in the parameter
+_C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value"
+# Maximum absolute value used for clipping gradients
+_C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0
+# Floating point number p for L-p norm to be used with the "norm"
+# gradient clipping type; for L-inf, please specify .inf
+_C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0
+
+# Enable automatic mixed precision for training
+# Note that this does not change model's inference behavior.
+# To use AMP in inference, run inference under autocast()
+_C.SOLVER.AMP = CN({"ENABLED": False})
+
+# ---------------------------------------------------------------------------- #
+# Specific test options
+# ---------------------------------------------------------------------------- #
+_C.TEST = CN()
+# For end-to-end tests to verify the expected accuracy.
+# Each item is [task, metric, value, tolerance]
+# e.g.: [['bbox', 'AP', 38.5, 0.2]]
+_C.TEST.EXPECTED_RESULTS = []
+# The period (in terms of steps) to evaluate the model during training.
+# Set to 0 to disable.
+_C.TEST.EVAL_PERIOD = 0
+# The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval
+# When empty, it will use the defaults in COCO.
+# Otherwise it should be a list[float] with the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
+_C.TEST.KEYPOINT_OKS_SIGMAS = []
+# Maximum number of detections to return per image during inference (100 is
+# based on the limit established for the COCO dataset).
+_C.TEST.DETECTIONS_PER_IMAGE = 100
+
+_C.TEST.AUG = CN({"ENABLED": False})
+_C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200)
+_C.TEST.AUG.MAX_SIZE = 4000
+_C.TEST.AUG.FLIP = True
+
+_C.TEST.PRECISE_BN = CN({"ENABLED": False})
+_C.TEST.PRECISE_BN.NUM_ITER = 200
+
+# ---------------------------------------------------------------------------- #
+# Misc options
+# ---------------------------------------------------------------------------- #
+# Directory where output files are written
+_C.OUTPUT_DIR = "./output"
+# Set seed to negative to fully randomize everything.
+# Set seed to positive to use a fixed seed. Note that a fixed seed increases
+# reproducibility but does not guarantee fully deterministic behavior.
+# Disabling all parallelism further increases reproducibility.
+_C.SEED = -1
+# Benchmark different cudnn algorithms.
+# If input images have very different sizes, this option will have large overhead
+# for about 10k iterations. It usually hurts total time, but can benefit for certain models.
+# If input images have the same or similar sizes, benchmark is often helpful.
+_C.CUDNN_BENCHMARK = False
+# The period (in terms of steps) for minibatch visualization at train time.
+# Set to 0 to disable.
+_C.VIS_PERIOD = 0
+
+# global config is for quick hack purposes.
+# You can set them in command line or config files,
+# and access it with:
+#
+# from detectron2.config import global_cfg
+# print(global_cfg.HACK)
+#
+# Do not commit any configs into it.
+_C.GLOBAL = CN()
+_C.GLOBAL.HACK = 1.0
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/config/instantiate.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/config/instantiate.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbb32e19ea518eee84941b20f58d1054e84d1937
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/config/instantiate.py
@@ -0,0 +1,82 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import dataclasses
+import logging
+from collections import abc
+from typing import Any
+
+from detectron2.utils.registry import _convert_target_to_string, locate
+
+__all__ = ["dump_dataclass", "instantiate"]
+
+
+def dump_dataclass(obj: Any):
+ """
+ Dump a dataclass recursively into a dict that can be later instantiated.
+
+ Args:
+ obj: a dataclass object
+
+ Returns:
+ dict
+ """
+ assert dataclasses.is_dataclass(obj) and not isinstance(
+ obj, type
+ ), "dump_dataclass() requires an instance of a dataclass."
+ ret = {"_target_": _convert_target_to_string(type(obj))}
+ for f in dataclasses.fields(obj):
+ v = getattr(obj, f.name)
+ if dataclasses.is_dataclass(v):
+ v = dump_dataclass(v)
+ if isinstance(v, (list, tuple)):
+ v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
+ ret[f.name] = v
+ return ret
+
+
+def instantiate(cfg):
+ """
+ Recursively instantiate objects defined in dictionaries by
+ "_target_" and arguments.
+
+ Args:
+ cfg: a dict-like object with "_target_" that defines the caller, and
+ other keys that define the arguments
+
+ Returns:
+ object instantiated by cfg
+ """
+ from omegaconf import ListConfig
+
+ if isinstance(cfg, ListConfig):
+ lst = [instantiate(x) for x in cfg]
+ return ListConfig(lst, flags={"allow_objects": True})
+ if isinstance(cfg, list):
+ # Specialize for list, because many classes take
+ # list[objects] as arguments, such as ResNet, DatasetMapper
+ return [instantiate(x) for x in cfg]
+
+ if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
+ # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
+ # but faster: https://github.com/facebookresearch/hydra/issues/1200
+ cfg = {k: instantiate(v) for k, v in cfg.items()}
+ cls = cfg.pop("_target_")
+ cls = instantiate(cls)
+
+ if isinstance(cls, str):
+ cls_name = cls
+ cls = locate(cls_name)
+ assert cls is not None, cls_name
+ else:
+ try:
+ cls_name = cls.__module__ + "." + cls.__qualname__
+ except Exception:
+ # target could be anything, so the above could fail
+ cls_name = str(cls)
+ assert callable(cls), f"_target_ {cls} does not define a callable object"
+ try:
+ return cls(**cfg)
+ except TypeError:
+ logger = logging.getLogger(__name__)
+ logger.error(f"Error when instantiating {cls_name}!")
+ raise
+ return cfg # return as-is if don't know what to do
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/config/lazy.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/config/lazy.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa5d86b427ab53ecbe992842cce71d12c5e3a141
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/config/lazy.py
@@ -0,0 +1,399 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import ast
+import builtins
+import importlib
+import inspect
+import logging
+import os
+import uuid
+from collections import abc
+from contextlib import contextmanager
+from copy import deepcopy
+from dataclasses import is_dataclass
+from typing import List, Tuple, Union
+import cloudpickle
+import yaml
+from omegaconf import DictConfig, ListConfig, OmegaConf
+
+from detectron2.utils.file_io import PathManager
+from detectron2.utils.registry import _convert_target_to_string
+
+__all__ = ["LazyCall", "LazyConfig"]
+
+
+class LazyCall:
+ """
+ Wrap a callable so that when it's called, the call will not be executed,
+ but returns a dict that describes the call.
+
+ LazyCall object has to be called with only keyword arguments. Positional
+ arguments are not yet supported.
+
+ Examples:
+ ::
+ from detectron2.config import instantiate, LazyCall
+
+ layer_cfg = LazyCall(nn.Conv2d)(in_channels=32, out_channels=32)
+ layer_cfg.out_channels = 64 # can edit it afterwards
+ layer = instantiate(layer_cfg)
+ """
+
+ def __init__(self, target):
+ if not (callable(target) or isinstance(target, (str, abc.Mapping))):
+ raise TypeError(
+ f"target of LazyCall must be a callable or defines a callable! Got {target}"
+ )
+ self._target = target
+
+ def __call__(self, **kwargs):
+ if is_dataclass(self._target):
+ # omegaconf object cannot hold dataclass type
+ # https://github.com/omry/omegaconf/issues/784
+ target = _convert_target_to_string(self._target)
+ else:
+ target = self._target
+ kwargs["_target_"] = target
+
+ return DictConfig(content=kwargs, flags={"allow_objects": True})
+
+
+def _visit_dict_config(cfg, func):
+ """
+ Apply func recursively to all DictConfig in cfg.
+ """
+ if isinstance(cfg, DictConfig):
+ func(cfg)
+ for v in cfg.values():
+ _visit_dict_config(v, func)
+ elif isinstance(cfg, ListConfig):
+ for v in cfg:
+ _visit_dict_config(v, func)
+
+
+def _validate_py_syntax(filename):
+ # see also https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/config.py
+ with PathManager.open(filename, "r") as f:
+ content = f.read()
+ try:
+ ast.parse(content)
+ except SyntaxError as e:
+ raise SyntaxError(f"Config file {filename} has syntax error!") from e
+
+
+def _cast_to_config(obj):
+ # if given a dict, return DictConfig instead
+ if isinstance(obj, dict):
+ return DictConfig(obj, flags={"allow_objects": True})
+ return obj
+
+
+_CFG_PACKAGE_NAME = "detectron2._cfg_loader"
+"""
+A namespace to put all imported config into.
+"""
+
+
+def _random_package_name(filename):
+ # generate a random package name when loading config files
+ return _CFG_PACKAGE_NAME + str(uuid.uuid4())[:4] + "." + os.path.basename(filename)
+
+
+@contextmanager
+def _patch_import():
+ """
+ Enhance relative import statements in config files, so that they:
+ 1. locate files purely based on relative location, regardless of packages.
+ e.g. you can import file without having __init__
+ 2. do not cache modules globally; modifications of module states has no side effect
+ 3. support other storage system through PathManager
+ 4. imported dict are turned into omegaconf.DictConfig automatically
+ """
+ old_import = builtins.__import__
+
+ def find_relative_file(original_file, relative_import_path, level):
+ cur_file = os.path.dirname(original_file)
+ for _ in range(level - 1):
+ cur_file = os.path.dirname(cur_file)
+ cur_name = relative_import_path.lstrip(".")
+ for part in cur_name.split("."):
+ cur_file = os.path.join(cur_file, part)
+ # NOTE: directory import is not handled. Because then it's unclear
+ # if such import should produce python module or DictConfig. This can
+ # be discussed further if needed.
+ if not cur_file.endswith(".py"):
+ cur_file += ".py"
+ if not PathManager.isfile(cur_file):
+ raise ImportError(
+ f"Cannot import name {relative_import_path} from "
+ f"{original_file}: {cur_file} has to exist."
+ )
+ return cur_file
+
+ def new_import(name, globals=None, locals=None, fromlist=(), level=0):
+ if (
+ # Only deal with relative imports inside config files
+ level != 0
+ and globals is not None
+ and (globals.get("__package__", "") or "").startswith(_CFG_PACKAGE_NAME)
+ ):
+ cur_file = find_relative_file(globals["__file__"], name, level)
+ _validate_py_syntax(cur_file)
+ spec = importlib.machinery.ModuleSpec(
+ _random_package_name(cur_file), None, origin=cur_file
+ )
+ module = importlib.util.module_from_spec(spec)
+ module.__file__ = cur_file
+ with PathManager.open(cur_file) as f:
+ content = f.read()
+ exec(compile(content, cur_file, "exec"), module.__dict__)
+ for name in fromlist: # turn imported dict into DictConfig automatically
+ val = _cast_to_config(module.__dict__[name])
+ module.__dict__[name] = val
+ return module
+ return old_import(name, globals, locals, fromlist=fromlist, level=level)
+
+ builtins.__import__ = new_import
+ yield new_import
+ builtins.__import__ = old_import
+
+
+class LazyConfig:
+ """
+ Provide methods to save, load, and overrides an omegaconf config object
+ which may contain definition of lazily-constructed objects.
+ """
+
+ @staticmethod
+ def load_rel(filename: str, keys: Union[None, str, Tuple[str, ...]] = None):
+ """
+ Similar to :meth:`load()`, but load path relative to the caller's
+ source file.
+
+ This has the same functionality as a relative import, except that this method
+ accepts filename as a string, so more characters are allowed in the filename.
+ """
+ caller_frame = inspect.stack()[1]
+ caller_fname = caller_frame[0].f_code.co_filename
+ assert caller_fname != "", "load_rel Unable to find caller"
+ caller_dir = os.path.dirname(caller_fname)
+ filename = os.path.join(caller_dir, filename)
+ return LazyConfig.load(filename, keys)
+
+ @staticmethod
+ def load(filename: str, keys: Union[None, str, Tuple[str, ...]] = None):
+ """
+ Load a config file.
+
+ Args:
+ filename: absolute path or relative path w.r.t. the current working directory
+ keys: keys to load and return. If not given, return all keys
+ (whose values are config objects) in a dict.
+ """
+ has_keys = keys is not None
+ filename = filename.replace("/./", "/") # redundant
+ if os.path.splitext(filename)[1] not in [".py", ".yaml", ".yml"]:
+ raise ValueError(f"Config file {filename} has to be a python or yaml file.")
+ if filename.endswith(".py"):
+ _validate_py_syntax(filename)
+
+ with _patch_import():
+ # Record the filename
+ module_namespace = {
+ "__file__": filename,
+ "__package__": _random_package_name(filename),
+ }
+ with PathManager.open(filename) as f:
+ content = f.read()
+ # Compile first with filename to:
+ # 1. make filename appears in stacktrace
+ # 2. make load_rel able to find its parent's (possibly remote) location
+ exec(compile(content, filename, "exec"), module_namespace)
+
+ ret = module_namespace
+ else:
+ with PathManager.open(filename) as f:
+ obj = yaml.unsafe_load(f)
+ ret = OmegaConf.create(obj, flags={"allow_objects": True})
+
+ if has_keys:
+ if isinstance(keys, str):
+ return _cast_to_config(ret[keys])
+ else:
+ return tuple(_cast_to_config(ret[a]) for a in keys)
+ else:
+ if filename.endswith(".py"):
+ # when not specified, only load those that are config objects
+ ret = DictConfig(
+ {
+ name: _cast_to_config(value)
+ for name, value in ret.items()
+ if isinstance(value, (DictConfig, ListConfig, dict))
+ and not name.startswith("_")
+ },
+ flags={"allow_objects": True},
+ )
+ return ret
+
+ @staticmethod
+ def save(cfg, filename: str):
+ """
+ Save a config object to a yaml file.
+ Note that when the config dictionary contains complex objects (e.g. lambda),
+ it can't be saved to yaml. In that case we will print an error and
+ attempt to save to a pkl file instead.
+
+ Args:
+ cfg: an omegaconf config object
+ filename: yaml file name to save the config file
+ """
+ logger = logging.getLogger(__name__)
+ try:
+ cfg = deepcopy(cfg)
+ except Exception:
+ pass
+ else:
+ # if it's deep-copyable, then...
+ def _replace_type_by_name(x):
+ if "_target_" in x and callable(x._target_):
+ try:
+ x._target_ = _convert_target_to_string(x._target_)
+ except AttributeError:
+ pass
+
+ # not necessary, but makes yaml looks nicer
+ _visit_dict_config(cfg, _replace_type_by_name)
+
+ save_pkl = False
+ try:
+ dict = OmegaConf.to_container(cfg, resolve=False)
+ dumped = yaml.dump(dict, default_flow_style=None, allow_unicode=True, width=9999)
+ with PathManager.open(filename, "w") as f:
+ f.write(dumped)
+
+ try:
+ _ = yaml.unsafe_load(dumped) # test that it is loadable
+ except Exception:
+ logger.warning(
+ "The config contains objects that cannot serialize to a valid yaml. "
+ f"{filename} is human-readable but cannot be loaded."
+ )
+ save_pkl = True
+ except Exception:
+ logger.exception("Unable to serialize the config to yaml. Error:")
+ save_pkl = True
+
+ if save_pkl:
+ new_filename = filename + ".pkl"
+ try:
+ # retry by pickle
+ with PathManager.open(new_filename, "wb") as f:
+ cloudpickle.dump(cfg, f)
+ logger.warning(f"Config is saved using cloudpickle at {new_filename}.")
+ except Exception:
+ pass
+
+ @staticmethod
+ def apply_overrides(cfg, overrides: List[str]):
+ """
+ In-place override contents of cfg.
+
+ Args:
+ cfg: an omegaconf config object
+ overrides: list of strings in the format of "a=b" to override configs.
+ See https://hydra.cc/docs/next/advanced/override_grammar/basic/
+ for syntax.
+
+ Returns:
+ the cfg object
+ """
+
+ def safe_update(cfg, key, value):
+ parts = key.split(".")
+ for idx in range(1, len(parts)):
+ prefix = ".".join(parts[:idx])
+ v = OmegaConf.select(cfg, prefix, default=None)
+ if v is None:
+ break
+ if not OmegaConf.is_config(v):
+ raise KeyError(
+ f"Trying to update key {key}, but {prefix} "
+ f"is not a config, but has type {type(v)}."
+ )
+ OmegaConf.update(cfg, key, value, merge=True)
+
+ from hydra.core.override_parser.overrides_parser import OverridesParser
+
+ parser = OverridesParser.create()
+ overrides = parser.parse_overrides(overrides)
+ for o in overrides:
+ key = o.key_or_group
+ value = o.value()
+ if o.is_delete():
+ # TODO support this
+ raise NotImplementedError("deletion is not yet a supported override")
+ safe_update(cfg, key, value)
+ return cfg
+
+ @staticmethod
+ def to_py(cfg, prefix: str = "cfg."):
+ """
+ Try to convert a config object into Python-like psuedo code.
+
+ Note that perfect conversion is not always possible. So the returned
+ results are mainly meant to be human-readable, and not meant to be executed.
+
+ Args:
+ cfg: an omegaconf config object
+ prefix: root name for the resulting code (default: "cfg.")
+
+
+ Returns:
+ str of formatted Python code
+ """
+ import black
+
+ cfg = OmegaConf.to_container(cfg, resolve=True)
+
+ def _to_str(obj, prefix=None, inside_call=False):
+ if prefix is None:
+ prefix = []
+ if isinstance(obj, abc.Mapping) and "_target_" in obj:
+ # Dict representing a function call
+ target = _convert_target_to_string(obj.pop("_target_"))
+ args = []
+ for k, v in sorted(obj.items()):
+ args.append(f"{k}={_to_str(v, inside_call=True)}")
+ args = ", ".join(args)
+ call = f"{target}({args})"
+ return "".join(prefix) + call
+ elif isinstance(obj, abc.Mapping) and not inside_call:
+ # Dict that is not inside a call is a list of top-level config objects that we
+ # render as one object per line with dot separated prefixes
+ key_list = []
+ for k, v in sorted(obj.items()):
+ if isinstance(v, abc.Mapping) and "_target_" not in v:
+ key_list.append(_to_str(v, prefix=prefix + [k + "."]))
+ else:
+ key = "".join(prefix) + k
+ key_list.append(f"{key}={_to_str(v)}")
+ return "\n".join(key_list)
+ elif isinstance(obj, abc.Mapping):
+ # Dict that is inside a call is rendered as a regular dict
+ return (
+ "{"
+ + ",".join(
+ f"{repr(k)}: {_to_str(v, inside_call=inside_call)}"
+ for k, v in sorted(obj.items())
+ )
+ + "}"
+ )
+ elif isinstance(obj, list):
+ return "[" + ",".join(_to_str(x, inside_call=inside_call) for x in obj) + "]"
+ else:
+ return repr(obj)
+
+ py_str = _to_str(cfg, prefix=[prefix])
+ try:
+ return black.format_str(py_str, mode=black.Mode())
+ except black.InvalidInput:
+ return py_str
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/__init__.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..259f669b78bd05815cb8d3351fd6c5fc9a1b85a1
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from . import transforms # isort:skip
+
+from .build import (
+ build_batch_data_loader,
+ build_detection_test_loader,
+ build_detection_train_loader,
+ get_detection_dataset_dicts,
+ load_proposals_into_dataset,
+ print_instances_class_histogram,
+)
+from .catalog import DatasetCatalog, MetadataCatalog, Metadata
+from .common import DatasetFromList, MapDataset, ToIterableDataset
+from .dataset_mapper import DatasetMapper
+
+# ensure the builtin datasets are registered
+from . import datasets, samplers # isort:skip
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/benchmark.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac2f372a4b111ad40b8e720adea208608271bab6
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/benchmark.py
@@ -0,0 +1,225 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+import numpy as np
+from itertools import count
+from typing import List, Tuple
+import torch
+import tqdm
+from fvcore.common.timer import Timer
+
+from detectron2.utils import comm
+
+from .build import build_batch_data_loader
+from .common import DatasetFromList, MapDataset
+from .samplers import TrainingSampler
+
+logger = logging.getLogger(__name__)
+
+
+class _EmptyMapDataset(torch.utils.data.Dataset):
+ """
+ Map anything to emptiness.
+ """
+
+ def __init__(self, dataset):
+ self.ds = dataset
+
+ def __len__(self):
+ return len(self.ds)
+
+ def __getitem__(self, idx):
+ _ = self.ds[idx]
+ return [0]
+
+
+def iter_benchmark(
+ iterator, num_iter: int, warmup: int = 5, max_time_seconds: float = 60
+) -> Tuple[float, List[float]]:
+ """
+ Benchmark an iterator/iterable for `num_iter` iterations with an extra
+ `warmup` iterations of warmup.
+ End early if `max_time_seconds` time is spent on iterations.
+
+ Returns:
+ float: average time (seconds) per iteration
+ list[float]: time spent on each iteration. Sometimes useful for further analysis.
+ """
+ num_iter, warmup = int(num_iter), int(warmup)
+
+ iterator = iter(iterator)
+ for _ in range(warmup):
+ next(iterator)
+ timer = Timer()
+ all_times = []
+ for curr_iter in tqdm.trange(num_iter):
+ start = timer.seconds()
+ if start > max_time_seconds:
+ num_iter = curr_iter
+ break
+ next(iterator)
+ all_times.append(timer.seconds() - start)
+ avg = timer.seconds() / num_iter
+ return avg, all_times
+
+
+class DataLoaderBenchmark:
+ """
+ Some common benchmarks that help understand perf bottleneck of a standard dataloader
+ made of dataset, mapper and sampler.
+ """
+
+ def __init__(
+ self,
+ dataset,
+ *,
+ mapper,
+ sampler=None,
+ total_batch_size,
+ num_workers=0,
+ max_time_seconds: int = 90,
+ ):
+ """
+ Args:
+ max_time_seconds (int): maximum time to spent for each benchmark
+ other args: same as in `build.py:build_detection_train_loader`
+ """
+ if isinstance(dataset, list):
+ dataset = DatasetFromList(dataset, copy=False, serialize=True)
+ if sampler is None:
+ sampler = TrainingSampler(len(dataset))
+
+ self.dataset = dataset
+ self.mapper = mapper
+ self.sampler = sampler
+ self.total_batch_size = total_batch_size
+ self.num_workers = num_workers
+ self.per_gpu_batch_size = self.total_batch_size // comm.get_world_size()
+
+ self.max_time_seconds = max_time_seconds
+
+ def _benchmark(self, iterator, num_iter, warmup, msg=None):
+ avg, all_times = iter_benchmark(iterator, num_iter, warmup, self.max_time_seconds)
+ if msg is not None:
+ self._log_time(msg, avg, all_times)
+ return avg, all_times
+
+ def _log_time(self, msg, avg, all_times, distributed=False):
+ percentiles = [np.percentile(all_times, k, interpolation="nearest") for k in [1, 5, 95, 99]]
+ if not distributed:
+ logger.info(
+ f"{msg}: avg={1.0/avg:.1f} it/s, "
+ f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, "
+ f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s."
+ )
+ return
+ avg_per_gpu = comm.all_gather(avg)
+ percentiles_per_gpu = comm.all_gather(percentiles)
+ if comm.get_rank() > 0:
+ return
+ for idx, avg, percentiles in zip(count(), avg_per_gpu, percentiles_per_gpu):
+ logger.info(
+ f"GPU{idx} {msg}: avg={1.0/avg:.1f} it/s, "
+ f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, "
+ f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s."
+ )
+
+ def benchmark_dataset(self, num_iter, warmup=5):
+ """
+ Benchmark the speed of taking raw samples from the dataset.
+ """
+
+ def loader():
+ while True:
+ for k in self.sampler:
+ yield self.dataset[k]
+
+ self._benchmark(loader(), num_iter, warmup, "Dataset Alone")
+
+ def benchmark_mapper(self, num_iter, warmup=5):
+ """
+ Benchmark the speed of taking raw samples from the dataset and map
+ them in a single process.
+ """
+
+ def loader():
+ while True:
+ for k in self.sampler:
+ yield self.mapper(self.dataset[k])
+
+ self._benchmark(loader(), num_iter, warmup, "Single Process Mapper (sec/sample)")
+
+ def benchmark_workers(self, num_iter, warmup=10):
+ """
+ Benchmark the dataloader by tuning num_workers to [0, 1, self.num_workers].
+ """
+ candidates = [0, 1]
+ if self.num_workers not in candidates:
+ candidates.append(self.num_workers)
+
+ dataset = MapDataset(self.dataset, self.mapper)
+ for n in candidates:
+ loader = build_batch_data_loader(
+ dataset,
+ self.sampler,
+ self.total_batch_size,
+ num_workers=n,
+ )
+ self._benchmark(
+ iter(loader),
+ num_iter * max(n, 1),
+ warmup * max(n, 1),
+ f"DataLoader ({n} workers, bs={self.per_gpu_batch_size})",
+ )
+ del loader
+
+ def benchmark_IPC(self, num_iter, warmup=10):
+ """
+ Benchmark the dataloader where each worker outputs nothing. This
+ eliminates the IPC overhead compared to the regular dataloader.
+
+ PyTorch multiprocessing's IPC only optimizes for torch tensors.
+ Large numpy arrays or other data structure may incur large IPC overhead.
+ """
+ n = self.num_workers
+ dataset = _EmptyMapDataset(MapDataset(self.dataset, self.mapper))
+ loader = build_batch_data_loader(
+ dataset, self.sampler, self.total_batch_size, num_workers=n
+ )
+ self._benchmark(
+ iter(loader),
+ num_iter * max(n, 1),
+ warmup * max(n, 1),
+ f"DataLoader ({n} workers, bs={self.per_gpu_batch_size}) w/o comm",
+ )
+
+ def benchmark_distributed(self, num_iter, warmup=10):
+ """
+ Benchmark the dataloader in each distributed worker, and log results of
+ all workers. This helps understand the final performance as well as
+ the variances among workers.
+
+ It also prints startup time (first iter) of the dataloader.
+ """
+ gpu = comm.get_world_size()
+ dataset = MapDataset(self.dataset, self.mapper)
+ n = self.num_workers
+ loader = build_batch_data_loader(
+ dataset, self.sampler, self.total_batch_size, num_workers=n
+ )
+
+ timer = Timer()
+ loader = iter(loader)
+ next(loader)
+ startup_time = timer.seconds()
+ logger.info("Dataloader startup time: {:.2f} seconds".format(startup_time))
+
+ comm.synchronize()
+
+ avg, all_times = self._benchmark(loader, num_iter * max(n, 1), warmup * max(n, 1))
+ del loader
+ self._log_time(
+ f"DataLoader ({gpu} GPUs x {n} workers, total bs={self.total_batch_size})",
+ avg,
+ all_times,
+ True,
+ )
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/build.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/build.py
new file mode 100644
index 0000000000000000000000000000000000000000..a31369d1693f86154a7a9249fc043d49f3e9f390
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/build.py
@@ -0,0 +1,542 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import itertools
+import logging
+import numpy as np
+import operator
+import pickle
+from typing import Any, Callable, Dict, List, Optional, Union
+import torch
+import torch.utils.data as torchdata
+from tabulate import tabulate
+from termcolor import colored
+
+from detectron2.config import configurable
+from detectron2.structures import BoxMode
+from detectron2.utils.comm import get_world_size
+from detectron2.utils.env import seed_all_rng
+from detectron2.utils.file_io import PathManager
+from detectron2.utils.logger import _log_api_usage, log_first_n
+
+from .catalog import DatasetCatalog, MetadataCatalog
+from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset
+from .dataset_mapper import DatasetMapper
+from .detection_utils import check_metadata_consistency
+from .samplers import (
+ InferenceSampler,
+ RandomSubsetTrainingSampler,
+ RepeatFactorTrainingSampler,
+ TrainingSampler,
+)
+
+"""
+This file contains the default logic to build a dataloader for training or testing.
+"""
+
+__all__ = [
+ "build_batch_data_loader",
+ "build_detection_train_loader",
+ "build_detection_test_loader",
+ "get_detection_dataset_dicts",
+ "load_proposals_into_dataset",
+ "print_instances_class_histogram",
+]
+
+
+def filter_images_with_only_crowd_annotations(dataset_dicts):
+ """
+ Filter out images with none annotations or only crowd annotations
+ (i.e., images without non-crowd annotations).
+ A common training-time preprocessing on COCO dataset.
+
+ Args:
+ dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
+
+ Returns:
+ list[dict]: the same format, but filtered.
+ """
+ num_before = len(dataset_dicts)
+
+ def valid(anns):
+ for ann in anns:
+ if ann.get("iscrowd", 0) == 0:
+ return True
+ return False
+
+ dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])]
+ num_after = len(dataset_dicts)
+ logger = logging.getLogger(__name__)
+ logger.info(
+ "Removed {} images with no usable annotations. {} images left.".format(
+ num_before - num_after, num_after
+ )
+ )
+ return dataset_dicts
+
+
+def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image):
+ """
+ Filter out images with too few number of keypoints.
+
+ Args:
+ dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
+
+ Returns:
+ list[dict]: the same format as dataset_dicts, but filtered.
+ """
+ num_before = len(dataset_dicts)
+
+ def visible_keypoints_in_image(dic):
+ # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility
+ annotations = dic["annotations"]
+ return sum(
+ (np.array(ann["keypoints"][2::3]) > 0).sum()
+ for ann in annotations
+ if "keypoints" in ann
+ )
+
+ dataset_dicts = [
+ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image
+ ]
+ num_after = len(dataset_dicts)
+ logger = logging.getLogger(__name__)
+ logger.info(
+ "Removed {} images with fewer than {} keypoints.".format(
+ num_before - num_after, min_keypoints_per_image
+ )
+ )
+ return dataset_dicts
+
+
+def load_proposals_into_dataset(dataset_dicts, proposal_file):
+ """
+ Load precomputed object proposals into the dataset.
+
+ The proposal file should be a pickled dict with the following keys:
+
+ - "ids": list[int] or list[str], the image ids
+ - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
+ - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
+ corresponding to the boxes.
+ - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.
+
+ Args:
+ dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
+ proposal_file (str): file path of pre-computed proposals, in pkl format.
+
+ Returns:
+ list[dict]: the same format as dataset_dicts, but added proposal field.
+ """
+ logger = logging.getLogger(__name__)
+ logger.info("Loading proposals from: {}".format(proposal_file))
+
+ with PathManager.open(proposal_file, "rb") as f:
+ proposals = pickle.load(f, encoding="latin1")
+
+ # Rename the key names in D1 proposal files
+ rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
+ for key in rename_keys:
+ if key in proposals:
+ proposals[rename_keys[key]] = proposals.pop(key)
+
+ # Fetch the indexes of all proposals that are in the dataset
+ # Convert image_id to str since they could be int.
+ img_ids = set({str(record["image_id"]) for record in dataset_dicts})
+ id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids}
+
+ # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
+ bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS
+
+ for record in dataset_dicts:
+ # Get the index of the proposal
+ i = id_to_index[str(record["image_id"])]
+
+ boxes = proposals["boxes"][i]
+ objectness_logits = proposals["objectness_logits"][i]
+ # Sort the proposals in descending order of the scores
+ inds = objectness_logits.argsort()[::-1]
+ record["proposal_boxes"] = boxes[inds]
+ record["proposal_objectness_logits"] = objectness_logits[inds]
+ record["proposal_bbox_mode"] = bbox_mode
+
+ return dataset_dicts
+
+
+def print_instances_class_histogram(dataset_dicts, class_names):
+ """
+ Args:
+ dataset_dicts (list[dict]): list of dataset dicts.
+ class_names (list[str]): list of class names (zero-indexed).
+ """
+ num_classes = len(class_names)
+ hist_bins = np.arange(num_classes + 1)
+ histogram = np.zeros((num_classes,), dtype=np.int)
+ for entry in dataset_dicts:
+ annos = entry["annotations"]
+ classes = np.asarray(
+ [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=np.int
+ )
+ if len(classes):
+ assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}"
+ assert (
+ classes.max() < num_classes
+ ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes"
+ histogram += np.histogram(classes, bins=hist_bins)[0]
+
+ N_COLS = min(6, len(class_names) * 2)
+
+ def short_name(x):
+ # make long class names shorter. useful for lvis
+ if len(x) > 13:
+ return x[:11] + ".."
+ return x
+
+ data = list(
+ itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)])
+ )
+ total_num_instances = sum(data[1::2])
+ data.extend([None] * (N_COLS - (len(data) % N_COLS)))
+ if num_classes > 1:
+ data.extend(["total", total_num_instances])
+ data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
+ table = tabulate(
+ data,
+ headers=["category", "#instances"] * (N_COLS // 2),
+ tablefmt="pipe",
+ numalign="left",
+ stralign="center",
+ )
+ log_first_n(
+ logging.INFO,
+ "Distribution of instances among all {} categories:\n".format(num_classes)
+ + colored(table, "cyan"),
+ key="message",
+ )
+
+
+def get_detection_dataset_dicts(
+ names,
+ filter_empty=True,
+ min_keypoints=0,
+ proposal_files=None,
+ check_consistency=True,
+):
+ """
+ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
+
+ Args:
+ names (str or list[str]): a dataset name or a list of dataset names
+ filter_empty (bool): whether to filter out images without instance annotations
+ min_keypoints (int): filter out images with fewer keypoints than
+ `min_keypoints`. Set to 0 to do nothing.
+ proposal_files (list[str]): if given, a list of object proposal files
+ that match each dataset in `names`.
+ check_consistency (bool): whether to check if datasets have consistent metadata.
+
+ Returns:
+ list[dict]: a list of dicts following the standard dataset dict format.
+ """
+ if isinstance(names, str):
+ names = [names]
+ assert len(names), names
+ dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names]
+ for dataset_name, dicts in zip(names, dataset_dicts):
+ assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
+
+ if proposal_files is not None:
+ assert len(names) == len(proposal_files)
+ # load precomputed proposals from proposal files
+ dataset_dicts = [
+ load_proposals_into_dataset(dataset_i_dicts, proposal_file)
+ for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files)
+ ]
+
+ if isinstance(dataset_dicts[0], torchdata.Dataset):
+ return torchdata.ConcatDataset(dataset_dicts)
+
+ dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
+
+ has_instances = "annotations" in dataset_dicts[0]
+ if filter_empty and has_instances:
+ dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
+ if min_keypoints > 0 and has_instances:
+ dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
+
+ if check_consistency and has_instances:
+ try:
+ class_names = MetadataCatalog.get(names[0]).thing_classes
+ check_metadata_consistency("thing_classes", names)
+ print_instances_class_histogram(dataset_dicts, class_names)
+ except AttributeError: # class names are not available for this dataset
+ pass
+
+ assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names))
+ return dataset_dicts
+
+
+def build_batch_data_loader(
+ dataset,
+ sampler,
+ total_batch_size,
+ *,
+ aspect_ratio_grouping=False,
+ num_workers=0,
+ collate_fn=None,
+):
+ """
+ Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are:
+ 1. support aspect ratio grouping options
+ 2. use no "batch collation", because this is common for detection training
+
+ Args:
+ dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset.
+ sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices.
+ Must be provided iff. ``dataset`` is a map-style dataset.
+ total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see
+ :func:`build_detection_train_loader`.
+
+ Returns:
+ iterable[list]. Length of each list is the batch size of the current
+ GPU. Each element in the list comes from the dataset.
+ """
+ world_size = get_world_size()
+ assert (
+ total_batch_size > 0 and total_batch_size % world_size == 0
+ ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(
+ total_batch_size, world_size
+ )
+ batch_size = total_batch_size // world_size
+
+ if isinstance(dataset, torchdata.IterableDataset):
+ assert sampler is None, "sampler must be None if dataset is IterableDataset"
+ else:
+ dataset = ToIterableDataset(dataset, sampler)
+
+ if aspect_ratio_grouping:
+ data_loader = torchdata.DataLoader(
+ dataset,
+ num_workers=num_workers,
+ collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
+ worker_init_fn=worker_init_reset_seed,
+ ) # yield individual mapped dict
+ data_loader = AspectRatioGroupedDataset(data_loader, batch_size)
+ if collate_fn is None:
+ return data_loader
+ return MapDataset(data_loader, collate_fn)
+ else:
+ return torchdata.DataLoader(
+ dataset,
+ batch_size=batch_size,
+ drop_last=True,
+ num_workers=num_workers,
+ collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
+ worker_init_fn=worker_init_reset_seed,
+ )
+
+
+def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):
+ if dataset is None:
+ dataset = get_detection_dataset_dicts(
+ cfg.DATASETS.TRAIN,
+ filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
+ min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
+ if cfg.MODEL.KEYPOINT_ON
+ else 0,
+ proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
+ )
+ _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0])
+
+ if mapper is None:
+ mapper = DatasetMapper(cfg, True)
+
+ if sampler is None:
+ sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
+ logger = logging.getLogger(__name__)
+ logger.info("Using training sampler {}".format(sampler_name))
+ if sampler_name == "TrainingSampler":
+ sampler = TrainingSampler(len(dataset))
+ elif sampler_name == "RepeatFactorTrainingSampler":
+ repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
+ dataset, cfg.DATALOADER.REPEAT_THRESHOLD
+ )
+ sampler = RepeatFactorTrainingSampler(repeat_factors)
+ elif sampler_name == "RandomSubsetTrainingSampler":
+ sampler = RandomSubsetTrainingSampler(len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO)
+ else:
+ raise ValueError("Unknown training sampler: {}".format(sampler_name))
+
+ return {
+ "dataset": dataset,
+ "sampler": sampler,
+ "mapper": mapper,
+ "total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
+ "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
+ "num_workers": cfg.DATALOADER.NUM_WORKERS,
+ }
+
+
+@configurable(from_config=_train_loader_from_config)
+def build_detection_train_loader(
+ dataset,
+ *,
+ mapper,
+ sampler=None,
+ total_batch_size,
+ aspect_ratio_grouping=True,
+ num_workers=0,
+ collate_fn=None,
+):
+ """
+ Build a dataloader for object detection with some default features.
+
+ Args:
+ dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
+ or a pytorch dataset (either map-style or iterable). It can be obtained
+ by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
+ mapper (callable): a callable which takes a sample (dict) from dataset and
+ returns the format to be consumed by the model.
+ When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``.
+ sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces
+ indices to be applied on ``dataset``.
+ If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`,
+ which coordinates an infinite random shuffle sequence across all workers.
+ Sampler must be None if ``dataset`` is iterable.
+ total_batch_size (int): total batch size across all workers.
+ aspect_ratio_grouping (bool): whether to group images with similar
+ aspect ratio for efficiency. When enabled, it requires each
+ element in dataset be a dict with keys "width" and "height".
+ num_workers (int): number of parallel data loading workers
+ collate_fn: a function that determines how to do batching, same as the argument of
+ `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of
+ data. No collation is OK for small batch size and simple data structures.
+ If your batch size is large and each sample contains too many small tensors,
+ it's more efficient to collate them in data loader.
+
+ Returns:
+ torch.utils.data.DataLoader:
+ a dataloader. Each output from it is a ``list[mapped_element]`` of length
+ ``total_batch_size / num_workers``, where ``mapped_element`` is produced
+ by the ``mapper``.
+ """
+ if isinstance(dataset, list):
+ dataset = DatasetFromList(dataset, copy=False)
+ if mapper is not None:
+ dataset = MapDataset(dataset, mapper)
+
+ if isinstance(dataset, torchdata.IterableDataset):
+ assert sampler is None, "sampler must be None if dataset is IterableDataset"
+ else:
+ if sampler is None:
+ sampler = TrainingSampler(len(dataset))
+ assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}"
+ return build_batch_data_loader(
+ dataset,
+ sampler,
+ total_batch_size,
+ aspect_ratio_grouping=aspect_ratio_grouping,
+ num_workers=num_workers,
+ collate_fn=collate_fn,
+ )
+
+
+def _test_loader_from_config(cfg, dataset_name, mapper=None):
+ """
+ Uses the given `dataset_name` argument (instead of the names in cfg), because the
+ standard practice is to evaluate each test set individually (not combining them).
+ """
+ if isinstance(dataset_name, str):
+ dataset_name = [dataset_name]
+
+ dataset = get_detection_dataset_dicts(
+ dataset_name,
+ filter_empty=False,
+ proposal_files=[
+ cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name
+ ]
+ if cfg.MODEL.LOAD_PROPOSALS
+ else None,
+ )
+ if mapper is None:
+ mapper = DatasetMapper(cfg, False)
+ return {
+ "dataset": dataset,
+ "mapper": mapper,
+ "num_workers": cfg.DATALOADER.NUM_WORKERS,
+ "sampler": InferenceSampler(len(dataset)),
+ }
+
+
+@configurable(from_config=_test_loader_from_config)
+def build_detection_test_loader(
+ dataset: Union[List[Any], torchdata.Dataset],
+ *,
+ mapper: Callable[[Dict[str, Any]], Any],
+ sampler: Optional[torchdata.Sampler] = None,
+ batch_size: int = 1,
+ num_workers: int = 0,
+ collate_fn: Optional[Callable[[List[Any]], Any]] = None,
+) -> torchdata.DataLoader:
+ """
+ Similar to `build_detection_train_loader`, with default batch size = 1,
+ and sampler = :class:`InferenceSampler`. This sampler coordinates all workers
+ to produce the exact set of all samples.
+
+ Args:
+ dataset: a list of dataset dicts,
+ or a pytorch dataset (either map-style or iterable). They can be obtained
+ by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
+ mapper: a callable which takes a sample (dict) from dataset
+ and returns the format to be consumed by the model.
+ When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
+ sampler: a sampler that produces
+ indices to be applied on ``dataset``. Default to :class:`InferenceSampler`,
+ which splits the dataset across all workers. Sampler must be None
+ if `dataset` is iterable.
+ batch_size: the batch size of the data loader to be created.
+ Default to 1 image per worker since this is the standard when reporting
+ inference time in papers.
+ num_workers: number of parallel data loading workers
+ collate_fn: same as the argument of `torch.utils.data.DataLoader`.
+ Defaults to do no collation and return a list of data.
+
+ Returns:
+ DataLoader: a torch DataLoader, that loads the given detection
+ dataset, with test-time transformation and batching.
+
+ Examples:
+ ::
+ data_loader = build_detection_test_loader(
+ DatasetRegistry.get("my_test"),
+ mapper=DatasetMapper(...))
+
+ # or, instantiate with a CfgNode:
+ data_loader = build_detection_test_loader(cfg, "my_test")
+ """
+ if isinstance(dataset, list):
+ dataset = DatasetFromList(dataset, copy=False)
+ if mapper is not None:
+ dataset = MapDataset(dataset, mapper)
+ if isinstance(dataset, torchdata.IterableDataset):
+ assert sampler is None, "sampler must be None if dataset is IterableDataset"
+ else:
+ if sampler is None:
+ sampler = InferenceSampler(len(dataset))
+ return torchdata.DataLoader(
+ dataset,
+ batch_size=batch_size,
+ sampler=sampler,
+ drop_last=False,
+ num_workers=num_workers,
+ collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
+ )
+
+
+def trivial_batch_collator(batch):
+ """
+ A batch collator that does nothing.
+ """
+ return batch
+
+
+def worker_init_reset_seed(worker_id):
+ initial_seed = torch.initial_seed() % 2 ** 31
+ seed_all_rng(initial_seed + worker_id)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/catalog.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/catalog.py
new file mode 100644
index 0000000000000000000000000000000000000000..45c110c19508f23921b9033cdaf0aa8056f0c125
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/catalog.py
@@ -0,0 +1,236 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import logging
+import types
+from collections import UserDict
+from typing import List
+
+from detectron2.utils.logger import log_first_n
+
+__all__ = ["DatasetCatalog", "MetadataCatalog", "Metadata"]
+
+
+class _DatasetCatalog(UserDict):
+ """
+ A global dictionary that stores information about the datasets and how to obtain them.
+
+ It contains a mapping from strings
+ (which are names that identify a dataset, e.g. "coco_2014_train")
+ to a function which parses the dataset and returns the samples in the
+ format of `list[dict]`.
+
+ The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details)
+ if used with the data loader functionalities in `data/build.py,data/detection_transform.py`.
+
+ The purpose of having this catalog is to make it easy to choose
+ different datasets, by just using the strings in the config.
+ """
+
+ def register(self, name, func):
+ """
+ Args:
+ name (str): the name that identifies a dataset, e.g. "coco_2014_train".
+ func (callable): a callable which takes no arguments and returns a list of dicts.
+ It must return the same results if called multiple times.
+ """
+ assert callable(func), "You must register a function with `DatasetCatalog.register`!"
+ assert name not in self, "Dataset '{}' is already registered!".format(name)
+ self[name] = func
+
+ def get(self, name):
+ """
+ Call the registered function and return its results.
+
+ Args:
+ name (str): the name that identifies a dataset, e.g. "coco_2014_train".
+
+ Returns:
+ list[dict]: dataset annotations.
+ """
+ try:
+ f = self[name]
+ except KeyError as e:
+ raise KeyError(
+ "Dataset '{}' is not registered! Available datasets are: {}".format(
+ name, ", ".join(list(self.keys()))
+ )
+ ) from e
+ return f()
+
+ def list(self) -> List[str]:
+ """
+ List all registered datasets.
+
+ Returns:
+ list[str]
+ """
+ return list(self.keys())
+
+ def remove(self, name):
+ """
+ Alias of ``pop``.
+ """
+ self.pop(name)
+
+ def __str__(self):
+ return "DatasetCatalog(registered datasets: {})".format(", ".join(self.keys()))
+
+ __repr__ = __str__
+
+
+DatasetCatalog = _DatasetCatalog()
+DatasetCatalog.__doc__ = (
+ _DatasetCatalog.__doc__
+ + """
+ .. automethod:: detectron2.data.catalog.DatasetCatalog.register
+ .. automethod:: detectron2.data.catalog.DatasetCatalog.get
+"""
+)
+
+
+class Metadata(types.SimpleNamespace):
+ """
+ A class that supports simple attribute setter/getter.
+ It is intended for storing metadata of a dataset and make it accessible globally.
+
+ Examples:
+ ::
+ # somewhere when you load the data:
+ MetadataCatalog.get("mydataset").thing_classes = ["person", "dog"]
+
+ # somewhere when you print statistics or visualize:
+ classes = MetadataCatalog.get("mydataset").thing_classes
+ """
+
+ # the name of the dataset
+ # set default to N/A so that `self.name` in the errors will not trigger getattr again
+ name: str = "N/A"
+
+ _RENAMED = {
+ "class_names": "thing_classes",
+ "dataset_id_to_contiguous_id": "thing_dataset_id_to_contiguous_id",
+ "stuff_class_names": "stuff_classes",
+ }
+
+ def __getattr__(self, key):
+ if key in self._RENAMED:
+ log_first_n(
+ logging.WARNING,
+ "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
+ n=10,
+ )
+ return getattr(self, self._RENAMED[key])
+
+ # "name" exists in every metadata
+ if len(self.__dict__) > 1:
+ raise AttributeError(
+ "Attribute '{}' does not exist in the metadata of dataset '{}'. Available "
+ "keys are {}.".format(key, self.name, str(self.__dict__.keys()))
+ )
+ else:
+ raise AttributeError(
+ f"Attribute '{key}' does not exist in the metadata of dataset '{self.name}': "
+ "metadata is empty."
+ )
+
+ def __setattr__(self, key, val):
+ if key in self._RENAMED:
+ log_first_n(
+ logging.WARNING,
+ "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
+ n=10,
+ )
+ setattr(self, self._RENAMED[key], val)
+
+ # Ensure that metadata of the same name stays consistent
+ try:
+ oldval = getattr(self, key)
+ assert oldval == val, (
+ "Attribute '{}' in the metadata of '{}' cannot be set "
+ "to a different value!\n{} != {}".format(key, self.name, oldval, val)
+ )
+ except AttributeError:
+ super().__setattr__(key, val)
+
+ def as_dict(self):
+ """
+ Returns all the metadata as a dict.
+ Note that modifications to the returned dict will not reflect on the Metadata object.
+ """
+ return copy.copy(self.__dict__)
+
+ def set(self, **kwargs):
+ """
+ Set multiple metadata with kwargs.
+ """
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+ return self
+
+ def get(self, key, default=None):
+ """
+ Access an attribute and return its value if exists.
+ Otherwise return default.
+ """
+ try:
+ return getattr(self, key)
+ except AttributeError:
+ return default
+
+
+class _MetadataCatalog(UserDict):
+ """
+ MetadataCatalog is a global dictionary that provides access to
+ :class:`Metadata` of a given dataset.
+
+ The metadata associated with a certain name is a singleton: once created, the
+ metadata will stay alive and will be returned by future calls to ``get(name)``.
+
+ It's like global variables, so don't abuse it.
+ It's meant for storing knowledge that's constant and shared across the execution
+ of the program, e.g.: the class names in COCO.
+ """
+
+ def get(self, name):
+ """
+ Args:
+ name (str): name of a dataset (e.g. coco_2014_train).
+
+ Returns:
+ Metadata: The :class:`Metadata` instance associated with this name,
+ or create an empty one if none is available.
+ """
+ assert len(name)
+ r = super().get(name, None)
+ if r is None:
+ r = self[name] = Metadata(name=name)
+ return r
+
+ def list(self):
+ """
+ List all registered metadata.
+
+ Returns:
+ list[str]: keys (names of datasets) of all registered metadata
+ """
+ return list(self.keys())
+
+ def remove(self, name):
+ """
+ Alias of ``pop``.
+ """
+ self.pop(name)
+
+ def __str__(self):
+ return "MetadataCatalog(registered metadata: {})".format(", ".join(self.keys()))
+
+ __repr__ = __str__
+
+
+MetadataCatalog = _MetadataCatalog()
+MetadataCatalog.__doc__ = (
+ _MetadataCatalog.__doc__
+ + """
+ .. automethod:: detectron2.data.catalog.MetadataCatalog.get
+"""
+)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/common.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6b8742417abc897f5faa190db1341bbe7b2940d
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/common.py
@@ -0,0 +1,241 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import itertools
+import logging
+import numpy as np
+import pickle
+import random
+import torch.utils.data as data
+from torch.utils.data.sampler import Sampler
+
+from detectron2.utils.serialize import PicklableWrapper
+
+__all__ = ["MapDataset", "DatasetFromList", "AspectRatioGroupedDataset", "ToIterableDataset"]
+
+
+def _shard_iterator_dataloader_worker(iterable):
+ # Shard the iterable if we're currently inside pytorch dataloader worker.
+ worker_info = data.get_worker_info()
+ if worker_info is None or worker_info.num_workers == 1:
+ # do nothing
+ yield from iterable
+ else:
+ yield from itertools.islice(iterable, worker_info.id, None, worker_info.num_workers)
+
+
+class _MapIterableDataset(data.IterableDataset):
+ """
+ Map a function over elements in an IterableDataset.
+
+ Similar to pytorch's MapIterDataPipe, but support filtering when map_func
+ returns None.
+
+ This class is not public-facing. Will be called by `MapDataset`.
+ """
+
+ def __init__(self, dataset, map_func):
+ self._dataset = dataset
+ self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
+
+ def __len__(self):
+ return len(self._dataset)
+
+ def __iter__(self):
+ for x in map(self._map_func, self._dataset):
+ if x is not None:
+ yield x
+
+
+class MapDataset(data.Dataset):
+ """
+ Map a function over the elements in a dataset.
+ """
+
+ def __init__(self, dataset, map_func):
+ """
+ Args:
+ dataset: a dataset where map function is applied. Can be either
+ map-style or iterable dataset. When given an iterable dataset,
+ the returned object will also be an iterable dataset.
+ map_func: a callable which maps the element in dataset. map_func can
+ return None to skip the data (e.g. in case of errors).
+ How None is handled depends on the style of `dataset`.
+ If `dataset` is map-style, it randomly tries other elements.
+ If `dataset` is iterable, it skips the data and tries the next.
+ """
+ self._dataset = dataset
+ self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
+
+ self._rng = random.Random(42)
+ self._fallback_candidates = set(range(len(dataset)))
+
+ def __new__(cls, dataset, map_func):
+ is_iterable = isinstance(dataset, data.IterableDataset)
+ if is_iterable:
+ return _MapIterableDataset(dataset, map_func)
+ else:
+ return super().__new__(cls)
+
+ def __getnewargs__(self):
+ return self._dataset, self._map_func
+
+ def __len__(self):
+ return len(self._dataset)
+
+ def __getitem__(self, idx):
+ retry_count = 0
+ cur_idx = int(idx)
+
+ while True:
+ data = self._map_func(self._dataset[cur_idx])
+ if data is not None:
+ self._fallback_candidates.add(cur_idx)
+ return data
+
+ # _map_func fails for this idx, use a random new index from the pool
+ retry_count += 1
+ self._fallback_candidates.discard(cur_idx)
+ cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]
+
+ if retry_count >= 3:
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ "Failed to apply `_map_func` for idx: {}, retry count: {}".format(
+ idx, retry_count
+ )
+ )
+
+
+class DatasetFromList(data.Dataset):
+ """
+ Wrap a list to a torch Dataset. It produces elements of the list as data.
+ """
+
+ def __init__(self, lst: list, copy: bool = True, serialize: bool = True):
+ """
+ Args:
+ lst (list): a list which contains elements to produce.
+ copy (bool): whether to deepcopy the element when producing it,
+ so that the result can be modified in place without affecting the
+ source in the list.
+ serialize (bool): whether to hold memory using serialized objects, when
+ enabled, data loader workers can use shared RAM from master
+ process instead of making a copy.
+ """
+ self._lst = lst
+ self._copy = copy
+ self._serialize = serialize
+
+ def _serialize(data):
+ buffer = pickle.dumps(data, protocol=-1)
+ return np.frombuffer(buffer, dtype=np.uint8)
+
+ if self._serialize:
+ logger = logging.getLogger(__name__)
+ logger.info(
+ "Serializing {} elements to byte tensors and concatenating them all ...".format(
+ len(self._lst)
+ )
+ )
+ self._lst = [_serialize(x) for x in self._lst]
+ self._addr = np.asarray([len(x) for x in self._lst], dtype=np.int64)
+ self._addr = np.cumsum(self._addr)
+ self._lst = np.concatenate(self._lst)
+ logger.info("Serialized dataset takes {:.2f} MiB".format(len(self._lst) / 1024 ** 2))
+
+ def __len__(self):
+ if self._serialize:
+ return len(self._addr)
+ else:
+ return len(self._lst)
+
+ def __getitem__(self, idx):
+ if self._serialize:
+ start_addr = 0 if idx == 0 else self._addr[idx - 1].item()
+ end_addr = self._addr[idx].item()
+ bytes = memoryview(self._lst[start_addr:end_addr])
+ return pickle.loads(bytes)
+ elif self._copy:
+ return copy.deepcopy(self._lst[idx])
+ else:
+ return self._lst[idx]
+
+
+class ToIterableDataset(data.IterableDataset):
+ """
+ Convert an old indices-based (also called map-style) dataset
+ to an iterable-style dataset.
+ """
+
+ def __init__(self, dataset: data.Dataset, sampler: Sampler, shard_sampler: bool = True):
+ """
+ Args:
+ dataset: an old-style dataset with ``__getitem__``
+ sampler: a cheap iterable that produces indices to be applied on ``dataset``.
+ shard_sampler: whether to shard the sampler based on the current pytorch data loader
+ worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple
+ workers, it is responsible for sharding its data based on worker id so that workers
+ don't produce identical data.
+
+ Most samplers (like our TrainingSampler) do not shard based on dataloader worker id
+ and this argument should be set to True. But certain samplers may be already
+ sharded, in that case this argument should be set to False.
+ """
+ assert not isinstance(dataset, data.IterableDataset), dataset
+ assert isinstance(sampler, Sampler), sampler
+ self.dataset = dataset
+ self.sampler = sampler
+ self.shard_sampler = shard_sampler
+
+ def __iter__(self):
+ if not self.shard_sampler:
+ sampler = self.sampler
+ else:
+ # With map-style dataset, `DataLoader(dataset, sampler)` runs the
+ # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))`
+ # will run sampler in every of the N worker. So we should only keep 1/N of the ids on
+ # each worker. The assumption is that sampler is cheap to iterate so it's fine to
+ # discard ids in workers.
+ sampler = _shard_iterator_dataloader_worker(self.sampler)
+ for idx in sampler:
+ yield self.dataset[idx]
+
+ def __len__(self):
+ return len(self.sampler)
+
+
+class AspectRatioGroupedDataset(data.IterableDataset):
+ """
+ Batch data that have similar aspect ratio together.
+ In this implementation, images whose aspect ratio < (or >) 1 will
+ be batched together.
+ This improves training speed because the images then need less padding
+ to form a batch.
+
+ It assumes the underlying dataset produces dicts with "width" and "height" keys.
+ It will then produce a list of original dicts with length = batch_size,
+ all with similar aspect ratios.
+ """
+
+ def __init__(self, dataset, batch_size):
+ """
+ Args:
+ dataset: an iterable. Each element must be a dict with keys
+ "width" and "height", which will be used to batch data.
+ batch_size (int):
+ """
+ self.dataset = dataset
+ self.batch_size = batch_size
+ self._buckets = [[] for _ in range(2)]
+ # Hard-coded two aspect ratio groups: w > h and w < h.
+ # Can add support for more aspect ratio groups, but doesn't seem useful
+
+ def __iter__(self):
+ for d in self.dataset:
+ w, h = d["width"], d["height"]
+ bucket_id = 0 if w > h else 1
+ bucket = self._buckets[bucket_id]
+ bucket.append(d)
+ if len(bucket) == self.batch_size:
+ yield bucket[:]
+ del bucket[:]
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8714f7990f11e146a01e03d108518e0356b50c4
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/dataset_mapper.py
@@ -0,0 +1,191 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import logging
+import numpy as np
+from typing import List, Optional, Union
+import torch
+
+from detectron2.config import configurable
+
+from . import detection_utils as utils
+from . import transforms as T
+
+"""
+This file contains the default mapping that's applied to "dataset dicts".
+"""
+
+__all__ = ["DatasetMapper"]
+
+
+class DatasetMapper:
+ """
+ A callable which takes a dataset dict in Detectron2 Dataset format,
+ and map it into a format used by the model.
+
+ This is the default callable to be used to map your dataset dict into training data.
+ You may need to follow it to implement your own one for customized logic,
+ such as a different way to read or transform images.
+ See :doc:`/tutorials/data_loading` for details.
+
+ The callable currently does the following:
+
+ 1. Read the image from "file_name"
+ 2. Applies cropping/geometric transforms to the image and annotations
+ 3. Prepare data and annotations to Tensor and :class:`Instances`
+ """
+
+ @configurable
+ def __init__(
+ self,
+ is_train: bool,
+ *,
+ augmentations: List[Union[T.Augmentation, T.Transform]],
+ image_format: str,
+ use_instance_mask: bool = False,
+ use_keypoint: bool = False,
+ instance_mask_format: str = "polygon",
+ keypoint_hflip_indices: Optional[np.ndarray] = None,
+ precomputed_proposal_topk: Optional[int] = None,
+ recompute_boxes: bool = False,
+ ):
+ """
+ NOTE: this interface is experimental.
+
+ Args:
+ is_train: whether it's used in training or inference
+ augmentations: a list of augmentations or deterministic transforms to apply
+ image_format: an image format supported by :func:`detection_utils.read_image`.
+ use_instance_mask: whether to process instance segmentation annotations, if available
+ use_keypoint: whether to process keypoint annotations if available
+ instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
+ masks into this format.
+ keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
+ precomputed_proposal_topk: if given, will load pre-computed
+ proposals from dataset_dict and keep the top k proposals for each image.
+ recompute_boxes: whether to overwrite bounding box annotations
+ by computing tight bounding boxes from instance mask annotations.
+ """
+ if recompute_boxes:
+ assert use_instance_mask, "recompute_boxes requires instance masks"
+ # fmt: off
+ self.is_train = is_train
+ self.augmentations = T.AugmentationList(augmentations)
+ self.image_format = image_format
+ self.use_instance_mask = use_instance_mask
+ self.instance_mask_format = instance_mask_format
+ self.use_keypoint = use_keypoint
+ self.keypoint_hflip_indices = keypoint_hflip_indices
+ self.proposal_topk = precomputed_proposal_topk
+ self.recompute_boxes = recompute_boxes
+ # fmt: on
+ logger = logging.getLogger(__name__)
+ mode = "training" if is_train else "inference"
+ logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
+
+ @classmethod
+ def from_config(cls, cfg, is_train: bool = True):
+ augs = utils.build_augmentation(cfg, is_train)
+ if cfg.INPUT.CROP.ENABLED and is_train:
+ augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
+ recompute_boxes = cfg.MODEL.MASK_ON
+ else:
+ recompute_boxes = False
+
+ ret = {
+ "is_train": is_train,
+ "augmentations": augs,
+ "image_format": cfg.INPUT.FORMAT,
+ "use_instance_mask": cfg.MODEL.MASK_ON,
+ "instance_mask_format": cfg.INPUT.MASK_FORMAT,
+ "use_keypoint": cfg.MODEL.KEYPOINT_ON,
+ "recompute_boxes": recompute_boxes,
+ }
+
+ if cfg.MODEL.KEYPOINT_ON:
+ ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
+
+ if cfg.MODEL.LOAD_PROPOSALS:
+ ret["precomputed_proposal_topk"] = (
+ cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
+ if is_train
+ else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
+ )
+ return ret
+
+ def _transform_annotations(self, dataset_dict, transforms, image_shape):
+ # USER: Modify this if you want to keep them for some reason.
+ for anno in dataset_dict["annotations"]:
+ if not self.use_instance_mask:
+ anno.pop("segmentation", None)
+ if not self.use_keypoint:
+ anno.pop("keypoints", None)
+
+ # USER: Implement additional transformations if you have other types of data
+ annos = [
+ utils.transform_instance_annotations(
+ obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
+ )
+ for obj in dataset_dict.pop("annotations")
+ if obj.get("iscrowd", 0) == 0
+ ]
+ instances = utils.annotations_to_instances(
+ annos, image_shape, mask_format=self.instance_mask_format
+ )
+
+ # After transforms such as cropping are applied, the bounding box may no longer
+ # tightly bound the object. As an example, imagine a triangle object
+ # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
+ # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
+ # the intersection of original bounding box and the cropping box.
+ if self.recompute_boxes:
+ instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
+ dataset_dict["instances"] = utils.filter_empty_instances(instances)
+
+ def __call__(self, dataset_dict):
+ """
+ Args:
+ dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
+
+ Returns:
+ dict: a format that builtin models in detectron2 accept
+ """
+ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
+ # USER: Write your own image loading if it's not from a file
+ image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
+ utils.check_image_size(dataset_dict, image)
+
+ # USER: Remove if you don't do semantic/panoptic segmentation.
+ if "sem_seg_file_name" in dataset_dict:
+ sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
+ else:
+ sem_seg_gt = None
+
+ aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
+ transforms = self.augmentations(aug_input)
+ image, sem_seg_gt = aug_input.image, aug_input.sem_seg
+
+ image_shape = image.shape[:2] # h, w
+ # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
+ # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
+ # Therefore it's important to use torch.Tensor.
+ dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
+ if sem_seg_gt is not None:
+ dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
+
+ # USER: Remove if you don't use pre-computed proposals.
+ # Most users would not need this feature.
+ if self.proposal_topk is not None:
+ utils.transform_proposals(
+ dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk
+ )
+
+ if not self.is_train:
+ # USER: Modify this if you want to keep them for some reason.
+ dataset_dict.pop("annotations", None)
+ dataset_dict.pop("sem_seg_file_name", None)
+ return dataset_dict
+
+ if "annotations" in dataset_dict:
+ self._transform_annotations(dataset_dict, transforms, image_shape)
+
+ return dataset_dict
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/README.md b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb3e4f7afec17137c95c78be6ef06d520ec8032
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/README.md
@@ -0,0 +1,9 @@
+
+
+### Common Datasets
+
+The dataset implemented here do not need to load the data into the final format.
+It should provide the minimal data structure needed to use the dataset, so it can be very efficient.
+
+For example, for an image dataset, just provide the file names and labels, but don't read the images.
+Let the downstream decide how to read.
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/__init__.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a44bedc15e5f0e762fc4d77efd6f1b07c6ff77d0
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/__init__.py
@@ -0,0 +1,9 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .coco import load_coco_json, load_sem_seg, register_coco_instances, convert_to_coco_json
+from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated
+from .lvis import load_lvis_json, register_lvis_instances, get_lvis_instances_meta
+from .pascal_voc import load_voc_instances, register_pascal_voc
+from . import builtin as _builtin # ensure the builtin datasets are registered
+
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/builtin.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/builtin.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3a68aa833f12f0fa324a269c36190f21b8a75bd
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/builtin.py
@@ -0,0 +1,259 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+
+"""
+This file registers pre-defined datasets at hard-coded paths, and their metadata.
+
+We hard-code metadata for common datasets. This will enable:
+1. Consistency check when loading the datasets
+2. Use models on these standard datasets directly and run demos,
+ without having to download the dataset annotations
+
+We hard-code some paths to the dataset that's assumed to
+exist in "./datasets/".
+
+Users SHOULD NOT use this file to create new dataset / metadata for new dataset.
+To add new dataset, refer to the tutorial "docs/DATASETS.md".
+"""
+
+import os
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+
+from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata
+from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic
+from .cityscapes_panoptic import register_all_cityscapes_panoptic
+from .coco import load_sem_seg, register_coco_instances
+from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated
+from .lvis import get_lvis_instances_meta, register_lvis_instances
+from .pascal_voc import register_pascal_voc
+
+# ==== Predefined datasets and splits for COCO ==========
+
+_PREDEFINED_SPLITS_COCO = {}
+_PREDEFINED_SPLITS_COCO["coco"] = {
+ "coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"),
+ "coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
+ "coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"),
+ "coco_2014_valminusminival": (
+ "coco/val2014",
+ "coco/annotations/instances_valminusminival2014.json",
+ ),
+ "coco_2017_train": ("coco/train2017", "coco/annotations/instances_train2017.json"),
+ "coco_2017_val": ("coco/val2017", "coco/annotations/instances_val2017.json"),
+ "coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"),
+ "coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"),
+ "coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"),
+}
+
+_PREDEFINED_SPLITS_COCO["coco_person"] = {
+ "keypoints_coco_2014_train": (
+ "coco/train2014",
+ "coco/annotations/person_keypoints_train2014.json",
+ ),
+ "keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"),
+ "keypoints_coco_2014_minival": (
+ "coco/val2014",
+ "coco/annotations/person_keypoints_minival2014.json",
+ ),
+ "keypoints_coco_2014_valminusminival": (
+ "coco/val2014",
+ "coco/annotations/person_keypoints_valminusminival2014.json",
+ ),
+ "keypoints_coco_2017_train": (
+ "coco/train2017",
+ "coco/annotations/person_keypoints_train2017.json",
+ ),
+ "keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"),
+ "keypoints_coco_2017_val_100": (
+ "coco/val2017",
+ "coco/annotations/person_keypoints_val2017_100.json",
+ ),
+}
+
+
+_PREDEFINED_SPLITS_COCO_PANOPTIC = {
+ "coco_2017_train_panoptic": (
+ # This is the original panoptic annotation directory
+ "coco/panoptic_train2017",
+ "coco/annotations/panoptic_train2017.json",
+ # This directory contains semantic annotations that are
+ # converted from panoptic annotations.
+ # It is used by PanopticFPN.
+ # You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
+ # to create these directories.
+ "coco/panoptic_stuff_train2017",
+ ),
+ "coco_2017_val_panoptic": (
+ "coco/panoptic_val2017",
+ "coco/annotations/panoptic_val2017.json",
+ "coco/panoptic_stuff_val2017",
+ ),
+ "coco_2017_val_100_panoptic": (
+ "coco/panoptic_val2017_100",
+ "coco/annotations/panoptic_val2017_100.json",
+ "coco/panoptic_stuff_val2017_100",
+ ),
+}
+
+
+def register_all_coco(root):
+ for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
+ for key, (image_root, json_file) in splits_per_dataset.items():
+ # Assume pre-defined datasets live in `./datasets`.
+ register_coco_instances(
+ key,
+ _get_builtin_metadata(dataset_name),
+ os.path.join(root, json_file) if "://" not in json_file else json_file,
+ os.path.join(root, image_root),
+ )
+
+ for (
+ prefix,
+ (panoptic_root, panoptic_json, semantic_root),
+ ) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
+ prefix_instances = prefix[: -len("_panoptic")]
+ instances_meta = MetadataCatalog.get(prefix_instances)
+ image_root, instances_json = instances_meta.image_root, instances_meta.json_file
+ # The "separated" version of COCO panoptic segmentation dataset,
+ # e.g. used by Panoptic FPN
+ register_coco_panoptic_separated(
+ prefix,
+ _get_builtin_metadata("coco_panoptic_separated"),
+ image_root,
+ os.path.join(root, panoptic_root),
+ os.path.join(root, panoptic_json),
+ os.path.join(root, semantic_root),
+ instances_json,
+ )
+ # The "standard" version of COCO panoptic segmentation dataset,
+ # e.g. used by Panoptic-DeepLab
+ register_coco_panoptic(
+ prefix,
+ _get_builtin_metadata("coco_panoptic_standard"),
+ image_root,
+ os.path.join(root, panoptic_root),
+ os.path.join(root, panoptic_json),
+ instances_json,
+ )
+
+
+# ==== Predefined datasets and splits for LVIS ==========
+
+
+_PREDEFINED_SPLITS_LVIS = {
+ "lvis_v1": {
+ "lvis_v1_train": ("coco/", "lvis/lvis_v1_train.json"),
+ "lvis_v1_val": ("coco/", "lvis/lvis_v1_val.json"),
+ "lvis_v1_test_dev": ("coco/", "lvis/lvis_v1_image_info_test_dev.json"),
+ "lvis_v1_test_challenge": ("coco/", "lvis/lvis_v1_image_info_test_challenge.json"),
+ },
+ "lvis_v0.5": {
+ "lvis_v0.5_train": ("coco/", "lvis/lvis_v0.5_train.json"),
+ "lvis_v0.5_val": ("coco/", "lvis/lvis_v0.5_val.json"),
+ "lvis_v0.5_val_rand_100": ("coco/", "lvis/lvis_v0.5_val_rand_100.json"),
+ "lvis_v0.5_test": ("coco/", "lvis/lvis_v0.5_image_info_test.json"),
+ },
+ "lvis_v0.5_cocofied": {
+ "lvis_v0.5_train_cocofied": ("coco/", "lvis/lvis_v0.5_train_cocofied.json"),
+ "lvis_v0.5_val_cocofied": ("coco/", "lvis/lvis_v0.5_val_cocofied.json"),
+ },
+}
+
+
+def register_all_lvis(root):
+ for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items():
+ for key, (image_root, json_file) in splits_per_dataset.items():
+ register_lvis_instances(
+ key,
+ get_lvis_instances_meta(dataset_name),
+ os.path.join(root, json_file) if "://" not in json_file else json_file,
+ os.path.join(root, image_root),
+ )
+
+
+# ==== Predefined splits for raw cityscapes images ===========
+_RAW_CITYSCAPES_SPLITS = {
+ "cityscapes_fine_{task}_train": ("cityscapes/leftImg8bit/train/", "cityscapes/gtFine/train/"),
+ "cityscapes_fine_{task}_val": ("cityscapes/leftImg8bit/val/", "cityscapes/gtFine/val/"),
+ "cityscapes_fine_{task}_test": ("cityscapes/leftImg8bit/test/", "cityscapes/gtFine/test/"),
+}
+
+
+def register_all_cityscapes(root):
+ for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():
+ meta = _get_builtin_metadata("cityscapes")
+ image_dir = os.path.join(root, image_dir)
+ gt_dir = os.path.join(root, gt_dir)
+
+ inst_key = key.format(task="instance_seg")
+ DatasetCatalog.register(
+ inst_key,
+ lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
+ x, y, from_json=True, to_polygons=True
+ ),
+ )
+ MetadataCatalog.get(inst_key).set(
+ image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
+ )
+
+ sem_key = key.format(task="sem_seg")
+ DatasetCatalog.register(
+ sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y)
+ )
+ MetadataCatalog.get(sem_key).set(
+ image_dir=image_dir,
+ gt_dir=gt_dir,
+ evaluator_type="cityscapes_sem_seg",
+ ignore_label=255,
+ **meta,
+ )
+
+
+# ==== Predefined splits for PASCAL VOC ===========
+def register_all_pascal_voc(root):
+ SPLITS = [
+ ("voc_2007_trainval", "VOC2007", "trainval"),
+ ("voc_2007_train", "VOC2007", "train"),
+ ("voc_2007_val", "VOC2007", "val"),
+ ("voc_2007_test", "VOC2007", "test"),
+ ("voc_2012_trainval", "VOC2012", "trainval"),
+ ("voc_2012_train", "VOC2012", "train"),
+ ("voc_2012_val", "VOC2012", "val"),
+ ]
+ for name, dirname, split in SPLITS:
+ year = 2007 if "2007" in name else 2012
+ register_pascal_voc(name, os.path.join(root, dirname), split, year)
+ MetadataCatalog.get(name).evaluator_type = "pascal_voc"
+
+
+def register_all_ade20k(root):
+ root = os.path.join(root, "ADEChallengeData2016")
+ for name, dirname in [("train", "training"), ("val", "validation")]:
+ image_dir = os.path.join(root, "images", dirname)
+ gt_dir = os.path.join(root, "annotations_detectron2", dirname)
+ name = f"ade20k_sem_seg_{name}"
+ DatasetCatalog.register(
+ name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
+ )
+ MetadataCatalog.get(name).set(
+ stuff_classes=ADE20K_SEM_SEG_CATEGORIES[:],
+ image_root=image_dir,
+ sem_seg_root=gt_dir,
+ evaluator_type="sem_seg",
+ ignore_label=255,
+ )
+
+
+# True for open source;
+# Internally at fb, we register them elsewhere
+if __name__.endswith(".builtin"):
+ # Assume pre-defined datasets live in `./datasets`.
+ _root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets"))
+ register_all_coco(_root)
+ register_all_lvis(_root)
+ register_all_cityscapes(_root)
+ register_all_cityscapes_panoptic(_root)
+ register_all_pascal_voc(_root)
+ register_all_ade20k(_root)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/builtin_meta.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/builtin_meta.py
new file mode 100644
index 0000000000000000000000000000000000000000..63c7a1a31b31dd89b82011effee26471faccacf5
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/builtin_meta.py
@@ -0,0 +1,350 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+"""
+Note:
+For your custom dataset, there is no need to hard-code metadata anywhere in the code.
+For example, for COCO-format dataset, metadata will be obtained automatically
+when calling `load_coco_json`. For other dataset, metadata may also be obtained in other ways
+during loading.
+
+However, we hard-coded metadata for a few common dataset here.
+The only goal is to allow users who don't have these dataset to use pre-trained models.
+Users don't have to download a COCO json (which contains metadata), in order to visualize a
+COCO model (with correct class names and colors).
+"""
+
+
+# All coco categories, together with their nice-looking visualization colors
+# It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json
+COCO_CATEGORIES = [
+ {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
+ {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
+ {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
+ {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
+ {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
+ {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
+ {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
+ {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
+ {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
+ {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
+ {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
+ {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
+ {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
+ {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
+ {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
+ {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
+ {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
+ {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
+ {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
+ {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
+ {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
+ {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
+ {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
+ {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
+ {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
+ {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
+ {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
+ {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
+ {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
+ {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
+ {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
+ {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
+ {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
+ {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
+ {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
+ {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
+ {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
+ {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
+ {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
+ {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
+ {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
+ {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
+ {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
+ {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
+ {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
+ {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
+ {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
+ {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
+ {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
+ {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
+ {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
+ {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
+ {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
+ {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
+ {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
+ {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
+ {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
+ {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
+ {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
+ {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
+ {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
+ {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
+ {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
+ {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
+ {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
+ {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
+ {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
+ {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
+ {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
+ {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
+ {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
+ {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
+ {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
+ {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
+ {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
+ {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
+ {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
+ {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
+ {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
+ {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
+ {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"},
+ {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"},
+ {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"},
+ {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"},
+ {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"},
+ {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"},
+ {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"},
+ {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"},
+ {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"},
+ {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"},
+ {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"},
+ {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"},
+ {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"},
+ {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"},
+ {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"},
+ {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"},
+ {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"},
+ {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"},
+ {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"},
+ {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"},
+ {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"},
+ {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"},
+ {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"},
+ {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"},
+ {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"},
+ {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"},
+ {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"},
+ {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"},
+ {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"},
+ {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"},
+ {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"},
+ {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"},
+ {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"},
+ {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"},
+ {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"},
+ {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"},
+ {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"},
+ {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"},
+ {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"},
+ {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"},
+ {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"},
+ {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"},
+ {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"},
+ {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"},
+ {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"},
+ {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"},
+ {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"},
+ {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"},
+ {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"},
+ {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"},
+ {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"},
+ {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"},
+ {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"},
+]
+
+# fmt: off
+COCO_PERSON_KEYPOINT_NAMES = (
+ "nose",
+ "left_eye", "right_eye",
+ "left_ear", "right_ear",
+ "left_shoulder", "right_shoulder",
+ "left_elbow", "right_elbow",
+ "left_wrist", "right_wrist",
+ "left_hip", "right_hip",
+ "left_knee", "right_knee",
+ "left_ankle", "right_ankle",
+)
+# fmt: on
+
+# Pairs of keypoints that should be exchanged under horizontal flipping
+COCO_PERSON_KEYPOINT_FLIP_MAP = (
+ ("left_eye", "right_eye"),
+ ("left_ear", "right_ear"),
+ ("left_shoulder", "right_shoulder"),
+ ("left_elbow", "right_elbow"),
+ ("left_wrist", "right_wrist"),
+ ("left_hip", "right_hip"),
+ ("left_knee", "right_knee"),
+ ("left_ankle", "right_ankle"),
+)
+
+# rules for pairs of keypoints to draw a line between, and the line color to use.
+KEYPOINT_CONNECTION_RULES = [
+ # face
+ ("left_ear", "left_eye", (102, 204, 255)),
+ ("right_ear", "right_eye", (51, 153, 255)),
+ ("left_eye", "nose", (102, 0, 204)),
+ ("nose", "right_eye", (51, 102, 255)),
+ # upper-body
+ ("left_shoulder", "right_shoulder", (255, 128, 0)),
+ ("left_shoulder", "left_elbow", (153, 255, 204)),
+ ("right_shoulder", "right_elbow", (128, 229, 255)),
+ ("left_elbow", "left_wrist", (153, 255, 153)),
+ ("right_elbow", "right_wrist", (102, 255, 224)),
+ # lower-body
+ ("left_hip", "right_hip", (255, 102, 0)),
+ ("left_hip", "left_knee", (255, 255, 77)),
+ ("right_hip", "right_knee", (153, 255, 204)),
+ ("left_knee", "left_ankle", (191, 255, 128)),
+ ("right_knee", "right_ankle", (255, 195, 77)),
+]
+
+# All Cityscapes categories, together with their nice-looking visualization colors
+# It's from https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py # noqa
+CITYSCAPES_CATEGORIES = [
+ {"color": (128, 64, 128), "isthing": 0, "id": 7, "trainId": 0, "name": "road"},
+ {"color": (244, 35, 232), "isthing": 0, "id": 8, "trainId": 1, "name": "sidewalk"},
+ {"color": (70, 70, 70), "isthing": 0, "id": 11, "trainId": 2, "name": "building"},
+ {"color": (102, 102, 156), "isthing": 0, "id": 12, "trainId": 3, "name": "wall"},
+ {"color": (190, 153, 153), "isthing": 0, "id": 13, "trainId": 4, "name": "fence"},
+ {"color": (153, 153, 153), "isthing": 0, "id": 17, "trainId": 5, "name": "pole"},
+ {"color": (250, 170, 30), "isthing": 0, "id": 19, "trainId": 6, "name": "traffic light"},
+ {"color": (220, 220, 0), "isthing": 0, "id": 20, "trainId": 7, "name": "traffic sign"},
+ {"color": (107, 142, 35), "isthing": 0, "id": 21, "trainId": 8, "name": "vegetation"},
+ {"color": (152, 251, 152), "isthing": 0, "id": 22, "trainId": 9, "name": "terrain"},
+ {"color": (70, 130, 180), "isthing": 0, "id": 23, "trainId": 10, "name": "sky"},
+ {"color": (220, 20, 60), "isthing": 1, "id": 24, "trainId": 11, "name": "person"},
+ {"color": (255, 0, 0), "isthing": 1, "id": 25, "trainId": 12, "name": "rider"},
+ {"color": (0, 0, 142), "isthing": 1, "id": 26, "trainId": 13, "name": "car"},
+ {"color": (0, 0, 70), "isthing": 1, "id": 27, "trainId": 14, "name": "truck"},
+ {"color": (0, 60, 100), "isthing": 1, "id": 28, "trainId": 15, "name": "bus"},
+ {"color": (0, 80, 100), "isthing": 1, "id": 31, "trainId": 16, "name": "train"},
+ {"color": (0, 0, 230), "isthing": 1, "id": 32, "trainId": 17, "name": "motorcycle"},
+ {"color": (119, 11, 32), "isthing": 1, "id": 33, "trainId": 18, "name": "bicycle"},
+]
+
+# fmt: off
+ADE20K_SEM_SEG_CATEGORIES = [
+ "wall", "building", "sky", "floor", "tree", "ceiling", "road, route", "bed", "window ", "grass", "cabinet", "sidewalk, pavement", "person", "earth, ground", "door", "table", "mountain, mount", "plant", "curtain", "chair", "car", "water", "painting, picture", "sofa", "shelf", "house", "sea", "mirror", "rug", "field", "armchair", "seat", "fence", "desk", "rock, stone", "wardrobe, closet, press", "lamp", "tub", "rail", "cushion", "base, pedestal, stand", "box", "column, pillar", "signboard, sign", "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", "skyscraper", "fireplace", "refrigerator, icebox", "grandstand, covered stand", "path", "stairs", "runway", "case, display case, showcase, vitrine", "pool table, billiard table, snooker table", "pillow", "screen door, screen", "stairway, staircase", "river", "bridge, span", "bookcase", "blind, screen", "coffee table", "toilet, can, commode, crapper, pot, potty, stool, throne", "flower", "book", "hill", "bench", "countertop", "stove", "palm, palm tree", "kitchen island", "computer", "swivel chair", "boat", "bar", "arcade machine", "hovel, hut, hutch, shack, shanty", "bus", "towel", "light", "truck", "tower", "chandelier", "awning, sunshade, sunblind", "street lamp", "booth", "tv", "plane", "dirt track", "clothes", "pole", "land, ground, soil", "bannister, banister, balustrade, balusters, handrail", "escalator, moving staircase, moving stairway", "ottoman, pouf, pouffe, puff, hassock", "bottle", "buffet, counter, sideboard", "poster, posting, placard, notice, bill, card", "stage", "van", "ship", "fountain", "conveyer belt, conveyor belt, conveyer, conveyor, transporter", "canopy", "washer, automatic washer, washing machine", "plaything, toy", "pool", "stool", "barrel, cask", "basket, handbasket", "falls", "tent", "bag", "minibike, motorbike", "cradle", "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", "trade name", "microwave", "pot", "animal", "bicycle", "lake", "dishwasher", "screen", "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", "traffic light", "tray", "trash can", "fan", "pier", "crt screen", "plate", "monitor", "bulletin board", "shower", "radiator", "glass, drinking glass", "clock", "flag", # noqa
+]
+# After processed by `prepare_ade20k_sem_seg.py`, id 255 means ignore
+# fmt: on
+
+
+def _get_coco_instances_meta():
+ thing_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 1]
+ thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1]
+ assert len(thing_ids) == 80, len(thing_ids)
+ # Mapping from the incontiguous COCO category id to an id in [0, 79]
+ thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
+ thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1]
+ ret = {
+ "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
+ "thing_classes": thing_classes,
+ "thing_colors": thing_colors,
+ }
+ return ret
+
+
+def _get_coco_panoptic_separated_meta():
+ """
+ Returns metadata for "separated" version of the panoptic segmentation dataset.
+ """
+ stuff_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 0]
+ assert len(stuff_ids) == 53, len(stuff_ids)
+
+ # For semantic segmentation, this mapping maps from contiguous stuff id
+ # (in [0, 53], used in models) to ids in the dataset (used for processing results)
+ # The id 0 is mapped to an extra category "thing".
+ stuff_dataset_id_to_contiguous_id = {k: i + 1 for i, k in enumerate(stuff_ids)}
+ # When converting COCO panoptic annotations to semantic annotations
+ # We label the "thing" category to 0
+ stuff_dataset_id_to_contiguous_id[0] = 0
+
+ # 54 names for COCO stuff categories (including "things")
+ stuff_classes = ["things"] + [
+ k["name"].replace("-other", "").replace("-merged", "")
+ for k in COCO_CATEGORIES
+ if k["isthing"] == 0
+ ]
+
+ # NOTE: I randomly picked a color for things
+ stuff_colors = [[82, 18, 128]] + [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 0]
+ ret = {
+ "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
+ "stuff_classes": stuff_classes,
+ "stuff_colors": stuff_colors,
+ }
+ ret.update(_get_coco_instances_meta())
+ return ret
+
+
+def _get_builtin_metadata(dataset_name):
+ if dataset_name == "coco":
+ return _get_coco_instances_meta()
+ if dataset_name == "coco_panoptic_separated":
+ return _get_coco_panoptic_separated_meta()
+ elif dataset_name == "coco_panoptic_standard":
+ meta = {}
+ # The following metadata maps contiguous id from [0, #thing categories +
+ # #stuff categories) to their names and colors. We have to replica of the
+ # same name and color under "thing_*" and "stuff_*" because the current
+ # visualization function in D2 handles thing and class classes differently
+ # due to some heuristic used in Panoptic FPN. We keep the same naming to
+ # enable reusing existing visualization functions.
+ thing_classes = [k["name"] for k in COCO_CATEGORIES]
+ thing_colors = [k["color"] for k in COCO_CATEGORIES]
+ stuff_classes = [k["name"] for k in COCO_CATEGORIES]
+ stuff_colors = [k["color"] for k in COCO_CATEGORIES]
+
+ meta["thing_classes"] = thing_classes
+ meta["thing_colors"] = thing_colors
+ meta["stuff_classes"] = stuff_classes
+ meta["stuff_colors"] = stuff_colors
+
+ # Convert category id for training:
+ # category id: like semantic segmentation, it is the class id for each
+ # pixel. Since there are some classes not used in evaluation, the category
+ # id is not always contiguous and thus we have two set of category ids:
+ # - original category id: category id in the original dataset, mainly
+ # used for evaluation.
+ # - contiguous category id: [0, #classes), in order to train the linear
+ # softmax classifier.
+ thing_dataset_id_to_contiguous_id = {}
+ stuff_dataset_id_to_contiguous_id = {}
+
+ for i, cat in enumerate(COCO_CATEGORIES):
+ if cat["isthing"]:
+ thing_dataset_id_to_contiguous_id[cat["id"]] = i
+ else:
+ stuff_dataset_id_to_contiguous_id[cat["id"]] = i
+
+ meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
+ meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
+
+ return meta
+ elif dataset_name == "coco_person":
+ return {
+ "thing_classes": ["person"],
+ "keypoint_names": COCO_PERSON_KEYPOINT_NAMES,
+ "keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP,
+ "keypoint_connection_rules": KEYPOINT_CONNECTION_RULES,
+ }
+ elif dataset_name == "cityscapes":
+ # fmt: off
+ CITYSCAPES_THING_CLASSES = [
+ "person", "rider", "car", "truck",
+ "bus", "train", "motorcycle", "bicycle",
+ ]
+ CITYSCAPES_STUFF_CLASSES = [
+ "road", "sidewalk", "building", "wall", "fence", "pole", "traffic light",
+ "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car",
+ "truck", "bus", "train", "motorcycle", "bicycle",
+ ]
+ # fmt: on
+ return {
+ "thing_classes": CITYSCAPES_THING_CLASSES,
+ "stuff_classes": CITYSCAPES_STUFF_CLASSES,
+ }
+ raise KeyError("No built-in metadata for dataset {}".format(dataset_name))
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/cityscapes.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/cityscapes.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e84a5bdb3d4e410d8eef4b80a5d4c099a180104
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/cityscapes.py
@@ -0,0 +1,329 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import functools
+import json
+import logging
+import multiprocessing as mp
+import numpy as np
+import os
+from itertools import chain
+import pycocotools.mask as mask_util
+from PIL import Image
+
+from detectron2.structures import BoxMode
+from detectron2.utils.comm import get_world_size
+from detectron2.utils.file_io import PathManager
+from detectron2.utils.logger import setup_logger
+
+try:
+ import cv2 # noqa
+except ImportError:
+ # OpenCV is an optional dependency at the moment
+ pass
+
+
+logger = logging.getLogger(__name__)
+
+
+def _get_cityscapes_files(image_dir, gt_dir):
+ files = []
+ # scan through the directory
+ cities = PathManager.ls(image_dir)
+ logger.info(f"{len(cities)} cities found in '{image_dir}'.")
+ for city in cities:
+ city_img_dir = os.path.join(image_dir, city)
+ city_gt_dir = os.path.join(gt_dir, city)
+ for basename in PathManager.ls(city_img_dir):
+ image_file = os.path.join(city_img_dir, basename)
+
+ suffix = "leftImg8bit.png"
+ assert basename.endswith(suffix), basename
+ basename = basename[: -len(suffix)]
+
+ instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png")
+ label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png")
+ json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json")
+
+ files.append((image_file, instance_file, label_file, json_file))
+ assert len(files), "No images found in {}".format(image_dir)
+ for f in files[0]:
+ assert PathManager.isfile(f), f
+ return files
+
+
+def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
+ """
+ Args:
+ image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
+ gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
+ from_json (bool): whether to read annotations from the raw json file or the png files.
+ to_polygons (bool): whether to represent the segmentation as polygons
+ (COCO's format) instead of masks (cityscapes's format).
+
+ Returns:
+ list[dict]: a list of dicts in Detectron2 standard format. (See
+ `Using Custom Datasets `_ )
+ """
+ if from_json:
+ assert to_polygons, (
+ "Cityscapes's json annotations are in polygon format. "
+ "Converting to mask format is not supported now."
+ )
+ files = _get_cityscapes_files(image_dir, gt_dir)
+
+ logger.info("Preprocessing cityscapes annotations ...")
+ # This is still not fast: all workers will execute duplicate works and will
+ # take up to 10m on a 8GPU server.
+ pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
+
+ ret = pool.map(
+ functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),
+ files,
+ )
+ logger.info("Loaded {} images from {}".format(len(ret), image_dir))
+
+ # Map cityscape ids to contiguous ids
+ from cityscapesscripts.helpers.labels import labels
+
+ labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
+ dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
+ for dict_per_image in ret:
+ for anno in dict_per_image["annotations"]:
+ anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
+ return ret
+
+
+def load_cityscapes_semantic(image_dir, gt_dir):
+ """
+ Args:
+ image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
+ gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
+
+ Returns:
+ list[dict]: a list of dict, each has "file_name" and
+ "sem_seg_file_name".
+ """
+ ret = []
+ # gt_dir is small and contain many small files. make sense to fetch to local first
+ gt_dir = PathManager.get_local_path(gt_dir)
+ for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir):
+ label_file = label_file.replace("labelIds", "labelTrainIds")
+
+ with PathManager.open(json_file, "r") as f:
+ jsonobj = json.load(f)
+ ret.append(
+ {
+ "file_name": image_file,
+ "sem_seg_file_name": label_file,
+ "height": jsonobj["imgHeight"],
+ "width": jsonobj["imgWidth"],
+ }
+ )
+ assert len(ret), f"No images found in {image_dir}!"
+ assert PathManager.isfile(
+ ret[0]["sem_seg_file_name"]
+ ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
+ return ret
+
+
+def _cityscapes_files_to_dict(files, from_json, to_polygons):
+ """
+ Parse cityscapes annotation files to a instance segmentation dataset dict.
+
+ Args:
+ files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
+ from_json (bool): whether to read annotations from the raw json file or the png files.
+ to_polygons (bool): whether to represent the segmentation as polygons
+ (COCO's format) instead of masks (cityscapes's format).
+
+ Returns:
+ A dict in Detectron2 Dataset format.
+ """
+ from cityscapesscripts.helpers.labels import id2label, name2label
+
+ image_file, instance_id_file, _, json_file = files
+
+ annos = []
+
+ if from_json:
+ from shapely.geometry import MultiPolygon, Polygon
+
+ with PathManager.open(json_file, "r") as f:
+ jsonobj = json.load(f)
+ ret = {
+ "file_name": image_file,
+ "image_id": os.path.basename(image_file),
+ "height": jsonobj["imgHeight"],
+ "width": jsonobj["imgWidth"],
+ }
+
+ # `polygons_union` contains the union of all valid polygons.
+ polygons_union = Polygon()
+
+ # CityscapesScripts draw the polygons in sequential order
+ # and each polygon *overwrites* existing ones. See
+ # (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
+ # We use reverse order, and each polygon *avoids* early ones.
+ # This will resolve the ploygon overlaps in the same way as CityscapesScripts.
+ for obj in jsonobj["objects"][::-1]:
+ if "deleted" in obj: # cityscapes data format specific
+ continue
+ label_name = obj["label"]
+
+ try:
+ label = name2label[label_name]
+ except KeyError:
+ if label_name.endswith("group"): # crowd area
+ label = name2label[label_name[: -len("group")]]
+ else:
+ raise
+ if label.id < 0: # cityscapes data format
+ continue
+
+ # Cityscapes's raw annotations uses integer coordinates
+ # Therefore +0.5 here
+ poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
+ # CityscapesScript uses PIL.ImageDraw.polygon to rasterize
+ # polygons for evaluation. This function operates in integer space
+ # and draws each pixel whose center falls into the polygon.
+ # Therefore it draws a polygon which is 0.5 "fatter" in expectation.
+ # We therefore dilate the input polygon by 0.5 as our input.
+ poly = Polygon(poly_coord).buffer(0.5, resolution=4)
+
+ if not label.hasInstances or label.ignoreInEval:
+ # even if we won't store the polygon it still contributes to overlaps resolution
+ polygons_union = polygons_union.union(poly)
+ continue
+
+ # Take non-overlapping part of the polygon
+ poly_wo_overlaps = poly.difference(polygons_union)
+ if poly_wo_overlaps.is_empty:
+ continue
+ polygons_union = polygons_union.union(poly)
+
+ anno = {}
+ anno["iscrowd"] = label_name.endswith("group")
+ anno["category_id"] = label.id
+
+ if isinstance(poly_wo_overlaps, Polygon):
+ poly_list = [poly_wo_overlaps]
+ elif isinstance(poly_wo_overlaps, MultiPolygon):
+ poly_list = poly_wo_overlaps.geoms
+ else:
+ raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
+
+ poly_coord = []
+ for poly_el in poly_list:
+ # COCO API can work only with exterior boundaries now, hence we store only them.
+ # TODO: store both exterior and interior boundaries once other parts of the
+ # codebase support holes in polygons.
+ poly_coord.append(list(chain(*poly_el.exterior.coords)))
+ anno["segmentation"] = poly_coord
+ (xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
+
+ anno["bbox"] = (xmin, ymin, xmax, ymax)
+ anno["bbox_mode"] = BoxMode.XYXY_ABS
+
+ annos.append(anno)
+ else:
+ # See also the official annotation parsing scripts at
+ # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
+ with PathManager.open(instance_id_file, "rb") as f:
+ inst_image = np.asarray(Image.open(f), order="F")
+ # ids < 24 are stuff labels (filtering them first is about 5% faster)
+ flattened_ids = np.unique(inst_image[inst_image >= 24])
+
+ ret = {
+ "file_name": image_file,
+ "image_id": os.path.basename(image_file),
+ "height": inst_image.shape[0],
+ "width": inst_image.shape[1],
+ }
+
+ for instance_id in flattened_ids:
+ # For non-crowd annotations, instance_id // 1000 is the label_id
+ # Crowd annotations have <1000 instance ids
+ label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
+ label = id2label[label_id]
+ if not label.hasInstances or label.ignoreInEval:
+ continue
+
+ anno = {}
+ anno["iscrowd"] = instance_id < 1000
+ anno["category_id"] = label.id
+
+ mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
+
+ inds = np.nonzero(mask)
+ ymin, ymax = inds[0].min(), inds[0].max()
+ xmin, xmax = inds[1].min(), inds[1].max()
+ anno["bbox"] = (xmin, ymin, xmax, ymax)
+ if xmax <= xmin or ymax <= ymin:
+ continue
+ anno["bbox_mode"] = BoxMode.XYXY_ABS
+ if to_polygons:
+ # This conversion comes from D4809743 and D5171122,
+ # when Mask-RCNN was first developed.
+ contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
+ -2
+ ]
+ polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
+ # opencv's can produce invalid polygons
+ if len(polygons) == 0:
+ continue
+ anno["segmentation"] = polygons
+ else:
+ anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
+ annos.append(anno)
+ ret["annotations"] = annos
+ return ret
+
+
+if __name__ == "__main__":
+ """
+ Test the cityscapes dataset loader.
+
+ Usage:
+ python -m detectron2.data.datasets.cityscapes \
+ cityscapes/leftImg8bit/train cityscapes/gtFine/train
+ """
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("image_dir")
+ parser.add_argument("gt_dir")
+ parser.add_argument("--type", choices=["instance", "semantic"], default="instance")
+ args = parser.parse_args()
+ from detectron2.data.catalog import Metadata
+ from detectron2.utils.visualizer import Visualizer
+ from cityscapesscripts.helpers.labels import labels
+
+ logger = setup_logger(name=__name__)
+
+ dirname = "cityscapes-data-vis"
+ os.makedirs(dirname, exist_ok=True)
+
+ if args.type == "instance":
+ dicts = load_cityscapes_instances(
+ args.image_dir, args.gt_dir, from_json=True, to_polygons=True
+ )
+ logger.info("Done loading {} samples.".format(len(dicts)))
+
+ thing_classes = [k.name for k in labels if k.hasInstances and not k.ignoreInEval]
+ meta = Metadata().set(thing_classes=thing_classes)
+
+ else:
+ dicts = load_cityscapes_semantic(args.image_dir, args.gt_dir)
+ logger.info("Done loading {} samples.".format(len(dicts)))
+
+ stuff_classes = [k.name for k in labels if k.trainId != 255]
+ stuff_colors = [k.color for k in labels if k.trainId != 255]
+ meta = Metadata().set(stuff_classes=stuff_classes, stuff_colors=stuff_colors)
+
+ for d in dicts:
+ img = np.array(Image.open(PathManager.open(d["file_name"], "rb")))
+ visualizer = Visualizer(img, metadata=meta)
+ vis = visualizer.draw_dataset_dict(d)
+ # cv2.imshow("a", vis.get_image()[:, :, ::-1])
+ # cv2.waitKey()
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
+ vis.save(fpath)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/cityscapes_panoptic.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/cityscapes_panoptic.py
new file mode 100644
index 0000000000000000000000000000000000000000..48c136f1623261b079591065fec7c7fc38165076
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/cityscapes_panoptic.py
@@ -0,0 +1,187 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import json
+import logging
+import os
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from detectron2.data.datasets.builtin_meta import CITYSCAPES_CATEGORIES
+from detectron2.utils.file_io import PathManager
+
+"""
+This file contains functions to register the Cityscapes panoptic dataset to the DatasetCatalog.
+"""
+
+
+logger = logging.getLogger(__name__)
+
+
+def get_cityscapes_panoptic_files(image_dir, gt_dir, json_info):
+ files = []
+ # scan through the directory
+ cities = PathManager.ls(image_dir)
+ logger.info(f"{len(cities)} cities found in '{image_dir}'.")
+ image_dict = {}
+ for city in cities:
+ city_img_dir = os.path.join(image_dir, city)
+ for basename in PathManager.ls(city_img_dir):
+ image_file = os.path.join(city_img_dir, basename)
+
+ suffix = "_leftImg8bit.png"
+ assert basename.endswith(suffix), basename
+ basename = os.path.basename(basename)[: -len(suffix)]
+
+ image_dict[basename] = image_file
+
+ for ann in json_info["annotations"]:
+ image_file = image_dict.get(ann["image_id"], None)
+ assert image_file is not None, "No image {} found for annotation {}".format(
+ ann["image_id"], ann["file_name"]
+ )
+ label_file = os.path.join(gt_dir, ann["file_name"])
+ segments_info = ann["segments_info"]
+
+ files.append((image_file, label_file, segments_info))
+
+ assert len(files), "No images found in {}".format(image_dir)
+ assert PathManager.isfile(files[0][0]), files[0][0]
+ assert PathManager.isfile(files[0][1]), files[0][1]
+ return files
+
+
+def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta):
+ """
+ Args:
+ image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
+ gt_dir (str): path to the raw annotations. e.g.,
+ "~/cityscapes/gtFine/cityscapes_panoptic_train".
+ gt_json (str): path to the json file. e.g.,
+ "~/cityscapes/gtFine/cityscapes_panoptic_train.json".
+ meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id"
+ and "stuff_dataset_id_to_contiguous_id" to map category ids to
+ contiguous ids for training.
+
+ Returns:
+ list[dict]: a list of dicts in Detectron2 standard format. (See
+ `Using Custom Datasets `_ )
+ """
+
+ def _convert_category_id(segment_info, meta):
+ if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
+ segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
+ segment_info["category_id"]
+ ]
+ else:
+ segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
+ segment_info["category_id"]
+ ]
+ return segment_info
+
+ assert os.path.exists(
+ gt_json
+ ), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa
+ with open(gt_json) as f:
+ json_info = json.load(f)
+ files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info)
+ ret = []
+ for image_file, label_file, segments_info in files:
+ sem_label_file = (
+ image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png"
+ )
+ segments_info = [_convert_category_id(x, meta) for x in segments_info]
+ ret.append(
+ {
+ "file_name": image_file,
+ "image_id": "_".join(
+ os.path.splitext(os.path.basename(image_file))[0].split("_")[:3]
+ ),
+ "sem_seg_file_name": sem_label_file,
+ "pan_seg_file_name": label_file,
+ "segments_info": segments_info,
+ }
+ )
+ assert len(ret), f"No images found in {image_dir}!"
+ assert PathManager.isfile(
+ ret[0]["sem_seg_file_name"]
+ ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
+ assert PathManager.isfile(
+ ret[0]["pan_seg_file_name"]
+ ), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa
+ return ret
+
+
+_RAW_CITYSCAPES_PANOPTIC_SPLITS = {
+ "cityscapes_fine_panoptic_train": (
+ "cityscapes/leftImg8bit/train",
+ "cityscapes/gtFine/cityscapes_panoptic_train",
+ "cityscapes/gtFine/cityscapes_panoptic_train.json",
+ ),
+ "cityscapes_fine_panoptic_val": (
+ "cityscapes/leftImg8bit/val",
+ "cityscapes/gtFine/cityscapes_panoptic_val",
+ "cityscapes/gtFine/cityscapes_panoptic_val.json",
+ ),
+ # "cityscapes_fine_panoptic_test": not supported yet
+}
+
+
+def register_all_cityscapes_panoptic(root):
+ meta = {}
+ # The following metadata maps contiguous id from [0, #thing categories +
+ # #stuff categories) to their names and colors. We have to replica of the
+ # same name and color under "thing_*" and "stuff_*" because the current
+ # visualization function in D2 handles thing and class classes differently
+ # due to some heuristic used in Panoptic FPN. We keep the same naming to
+ # enable reusing existing visualization functions.
+ thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
+ thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
+ stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
+ stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
+
+ meta["thing_classes"] = thing_classes
+ meta["thing_colors"] = thing_colors
+ meta["stuff_classes"] = stuff_classes
+ meta["stuff_colors"] = stuff_colors
+
+ # There are three types of ids in cityscapes panoptic segmentation:
+ # (1) category id: like semantic segmentation, it is the class id for each
+ # pixel. Since there are some classes not used in evaluation, the category
+ # id is not always contiguous and thus we have two set of category ids:
+ # - original category id: category id in the original dataset, mainly
+ # used for evaluation.
+ # - contiguous category id: [0, #classes), in order to train the classifier
+ # (2) instance id: this id is used to differentiate different instances from
+ # the same category. For "stuff" classes, the instance id is always 0; for
+ # "thing" classes, the instance id starts from 1 and 0 is reserved for
+ # ignored instances (e.g. crowd annotation).
+ # (3) panoptic id: this is the compact id that encode both category and
+ # instance id by: category_id * 1000 + instance_id.
+ thing_dataset_id_to_contiguous_id = {}
+ stuff_dataset_id_to_contiguous_id = {}
+
+ for k in CITYSCAPES_CATEGORIES:
+ if k["isthing"] == 1:
+ thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
+ else:
+ stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
+
+ meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
+ meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
+
+ for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items():
+ image_dir = os.path.join(root, image_dir)
+ gt_dir = os.path.join(root, gt_dir)
+ gt_json = os.path.join(root, gt_json)
+
+ DatasetCatalog.register(
+ key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta)
+ )
+ MetadataCatalog.get(key).set(
+ panoptic_root=gt_dir,
+ image_root=image_dir,
+ panoptic_json=gt_json,
+ gt_dir=gt_dir.replace("cityscapes_panoptic_", ""),
+ evaluator_type="cityscapes_panoptic_seg",
+ ignore_label=255,
+ label_divisor=1000,
+ **meta,
+ )
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed4f7ccb20efa3b54c719783e279c381ca5d8587
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco.py
@@ -0,0 +1,539 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import contextlib
+import datetime
+import io
+import json
+import logging
+import numpy as np
+import os
+import shutil
+import pycocotools.mask as mask_util
+from fvcore.common.timer import Timer
+from iopath.common.file_io import file_lock
+from PIL import Image
+
+from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes
+from detectron2.utils.file_io import PathManager
+
+from .. import DatasetCatalog, MetadataCatalog
+
+"""
+This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format".
+"""
+
+
+logger = logging.getLogger(__name__)
+
+__all__ = ["load_coco_json", "load_sem_seg", "convert_to_coco_json", "register_coco_instances"]
+
+
+def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
+ """
+ Load a json file with COCO's instances annotation format.
+ Currently supports instance detection, instance segmentation,
+ and person keypoints annotations.
+
+ Args:
+ json_file (str): full path to the json file in COCO instances annotation format.
+ image_root (str or path-like): the directory where the images in this json file exists.
+ dataset_name (str or None): the name of the dataset (e.g., coco_2017_train).
+ When provided, this function will also do the following:
+
+ * Put "thing_classes" into the metadata associated with this dataset.
+ * Map the category ids into a contiguous range (needed by standard dataset format),
+ and add "thing_dataset_id_to_contiguous_id" to the metadata associated
+ with this dataset.
+
+ This option should usually be provided, unless users need to load
+ the original json content and apply more processing manually.
+ extra_annotation_keys (list[str]): list of per-annotation keys that should also be
+ loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
+ "category_id", "segmentation"). The values for these keys will be returned as-is.
+ For example, the densepose annotations are loaded in this way.
+
+ Returns:
+ list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See
+ `Using Custom Datasets `_ ) when `dataset_name` is not None.
+ If `dataset_name` is None, the returned `category_ids` may be
+ incontiguous and may not conform to the Detectron2 standard format.
+
+ Notes:
+ 1. This function does not read the image files.
+ The results do not have the "image" field.
+ """
+ from pycocotools.coco import COCO
+
+ timer = Timer()
+ json_file = PathManager.get_local_path(json_file)
+ with contextlib.redirect_stdout(io.StringIO()):
+ coco_api = COCO(json_file)
+ if timer.seconds() > 1:
+ logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
+
+ id_map = None
+ if dataset_name is not None:
+ meta = MetadataCatalog.get(dataset_name)
+ cat_ids = sorted(coco_api.getCatIds())
+ cats = coco_api.loadCats(cat_ids)
+ # The categories in a custom json file may not be sorted.
+ thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
+ meta.thing_classes = thing_classes
+
+ # In COCO, certain category ids are artificially removed,
+ # and by convention they are always ignored.
+ # We deal with COCO's id issue and translate
+ # the category ids to contiguous ids in [0, 80).
+
+ # It works by looking at the "categories" field in the json, therefore
+ # if users' own json also have incontiguous ids, we'll
+ # apply this mapping as well but print a warning.
+ if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
+ if "coco" not in dataset_name:
+ logger.warning(
+ """
+Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
+"""
+ )
+ id_map = {v: i for i, v in enumerate(cat_ids)}
+ meta.thing_dataset_id_to_contiguous_id = id_map
+
+ # sort indices for reproducible results
+ img_ids = sorted(coco_api.imgs.keys())
+ # imgs is a list of dicts, each looks something like:
+ # {'license': 4,
+ # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
+ # 'file_name': 'COCO_val2014_000000001268.jpg',
+ # 'height': 427,
+ # 'width': 640,
+ # 'date_captured': '2013-11-17 05:57:24',
+ # 'id': 1268}
+ imgs = coco_api.loadImgs(img_ids)
+ # anns is a list[list[dict]], where each dict is an annotation
+ # record for an object. The inner list enumerates the objects in an image
+ # and the outer list enumerates over images. Example of anns[0]:
+ # [{'segmentation': [[192.81,
+ # 247.09,
+ # ...
+ # 219.03,
+ # 249.06]],
+ # 'area': 1035.749,
+ # 'iscrowd': 0,
+ # 'image_id': 1268,
+ # 'bbox': [192.81, 224.8, 74.73, 33.43],
+ # 'category_id': 16,
+ # 'id': 42986},
+ # ...]
+ anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
+ total_num_valid_anns = sum([len(x) for x in anns])
+ total_num_anns = len(coco_api.anns)
+ if total_num_valid_anns < total_num_anns:
+ logger.warning(
+ f"{json_file} contains {total_num_anns} annotations, but only "
+ f"{total_num_valid_anns} of them match to images in the file."
+ )
+
+ if "minival" not in json_file:
+ # The popular valminusminival & minival annotations for COCO2014 contain this bug.
+ # However the ratio of buggy annotations there is tiny and does not affect accuracy.
+ # Therefore we explicitly white-list them.
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
+ assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
+ json_file
+ )
+
+ imgs_anns = list(zip(imgs, anns))
+ logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file))
+
+ dataset_dicts = []
+
+ ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or [])
+
+ num_instances_without_valid_segmentation = 0
+
+ for (img_dict, anno_dict_list) in imgs_anns:
+ record = {}
+ record["file_name"] = os.path.join(image_root, img_dict["file_name"])
+ record["height"] = img_dict["height"]
+ record["width"] = img_dict["width"]
+ image_id = record["image_id"] = img_dict["id"]
+
+ objs = []
+ for anno in anno_dict_list:
+ # Check that the image_id in this annotation is the same as
+ # the image_id we're looking at.
+ # This fails only when the data parsing logic or the annotation file is buggy.
+
+ # The original COCO valminusminival2014 & minival2014 annotation files
+ # actually contains bugs that, together with certain ways of using COCO API,
+ # can trigger this assertion.
+ assert anno["image_id"] == image_id
+
+ assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.'
+
+ obj = {key: anno[key] for key in ann_keys if key in anno}
+ if "bbox" in obj and len(obj["bbox"]) == 0:
+ raise ValueError(
+ f"One annotation of image {image_id} contains empty 'bbox' value! "
+ "This json does not have valid COCO format."
+ )
+
+ segm = anno.get("segmentation", None)
+ if segm: # either list[list[float]] or dict(RLE)
+ if isinstance(segm, dict):
+ if isinstance(segm["counts"], list):
+ # convert to compressed RLE
+ segm = mask_util.frPyObjects(segm, *segm["size"])
+ else:
+ # filter out invalid polygons (< 3 points)
+ segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
+ if len(segm) == 0:
+ num_instances_without_valid_segmentation += 1
+ continue # ignore this instance
+ obj["segmentation"] = segm
+
+ keypts = anno.get("keypoints", None)
+ if keypts: # list[int]
+ for idx, v in enumerate(keypts):
+ if idx % 3 != 2:
+ # COCO's segmentation coordinates are floating points in [0, H or W],
+ # but keypoint coordinates are integers in [0, H-1 or W-1]
+ # Therefore we assume the coordinates are "pixel indices" and
+ # add 0.5 to convert to floating point coordinates.
+ keypts[idx] = v + 0.5
+ obj["keypoints"] = keypts
+
+ obj["bbox_mode"] = BoxMode.XYWH_ABS
+ if id_map:
+ annotation_category_id = obj["category_id"]
+ try:
+ obj["category_id"] = id_map[annotation_category_id]
+ except KeyError as e:
+ raise KeyError(
+ f"Encountered category_id={annotation_category_id} "
+ "but this id does not exist in 'categories' of the json file."
+ ) from e
+ objs.append(obj)
+ record["annotations"] = objs
+ dataset_dicts.append(record)
+
+ if num_instances_without_valid_segmentation > 0:
+ logger.warning(
+ "Filtered out {} instances without valid segmentation. ".format(
+ num_instances_without_valid_segmentation
+ )
+ + "There might be issues in your dataset generation process. Please "
+ "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully"
+ )
+ return dataset_dicts
+
+
+def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"):
+ """
+ Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are
+ treated as ground truth annotations and all files under "image_root" with "image_ext" extension
+ as input images. Ground truth and input images are matched using file paths relative to
+ "gt_root" and "image_root" respectively without taking into account file extensions.
+ This works for COCO as well as some other datasets.
+
+ Args:
+ gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation
+ annotations are stored as images with integer values in pixels that represent
+ corresponding semantic labels.
+ image_root (str): the directory where the input images are.
+ gt_ext (str): file extension for ground truth annotations.
+ image_ext (str): file extension for input images.
+
+ Returns:
+ list[dict]:
+ a list of dicts in detectron2 standard format without instance-level
+ annotation.
+
+ Notes:
+ 1. This function does not read the image and ground truth files.
+ The results do not have the "image" and "sem_seg" fields.
+ """
+
+ # We match input images with ground truth based on their relative filepaths (without file
+ # extensions) starting from 'image_root' and 'gt_root' respectively.
+ def file2id(folder_path, file_path):
+ # extract relative path starting from `folder_path`
+ image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))
+ # remove file extension
+ image_id = os.path.splitext(image_id)[0]
+ return image_id
+
+ input_files = sorted(
+ (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)),
+ key=lambda file_path: file2id(image_root, file_path),
+ )
+ gt_files = sorted(
+ (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)),
+ key=lambda file_path: file2id(gt_root, file_path),
+ )
+
+ assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root)
+
+ # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images
+ if len(input_files) != len(gt_files):
+ logger.warn(
+ "Directory {} and {} has {} and {} files, respectively.".format(
+ image_root, gt_root, len(input_files), len(gt_files)
+ )
+ )
+ input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files]
+ gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files]
+ intersect = list(set(input_basenames) & set(gt_basenames))
+ # sort, otherwise each worker may obtain a list[dict] in different order
+ intersect = sorted(intersect)
+ logger.warn("Will use their intersection of {} files.".format(len(intersect)))
+ input_files = [os.path.join(image_root, f + image_ext) for f in intersect]
+ gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect]
+
+ logger.info(
+ "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root)
+ )
+
+ dataset_dicts = []
+ for (img_path, gt_path) in zip(input_files, gt_files):
+ record = {}
+ record["file_name"] = img_path
+ record["sem_seg_file_name"] = gt_path
+ dataset_dicts.append(record)
+
+ return dataset_dicts
+
+
+def convert_to_coco_dict(dataset_name):
+ """
+ Convert an instance detection/segmentation or keypoint detection dataset
+ in detectron2's standard format into COCO json format.
+
+ Generic dataset description can be found here:
+ https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset
+
+ COCO data format description can be found here:
+ http://cocodataset.org/#format-data
+
+ Args:
+ dataset_name (str):
+ name of the source dataset
+ Must be registered in DatastCatalog and in detectron2's standard format.
+ Must have corresponding metadata "thing_classes"
+ Returns:
+ coco_dict: serializable dict in COCO json format
+ """
+
+ dataset_dicts = DatasetCatalog.get(dataset_name)
+ metadata = MetadataCatalog.get(dataset_name)
+
+ # unmap the category mapping ids for COCO
+ if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
+ reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()}
+ reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa
+ else:
+ reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa
+
+ categories = [
+ {"id": reverse_id_mapper(id), "name": name}
+ for id, name in enumerate(metadata.thing_classes)
+ ]
+
+ logger.info("Converting dataset dicts into COCO format")
+ coco_images = []
+ coco_annotations = []
+
+ for image_id, image_dict in enumerate(dataset_dicts):
+ coco_image = {
+ "id": image_dict.get("image_id", image_id),
+ "width": int(image_dict["width"]),
+ "height": int(image_dict["height"]),
+ "file_name": str(image_dict["file_name"]),
+ }
+ coco_images.append(coco_image)
+
+ anns_per_image = image_dict.get("annotations", [])
+ for annotation in anns_per_image:
+ # create a new dict with only COCO fields
+ coco_annotation = {}
+
+ # COCO requirement: XYWH box format for axis-align and XYWHA for rotated
+ bbox = annotation["bbox"]
+ if isinstance(bbox, np.ndarray):
+ if bbox.ndim != 1:
+ raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.")
+ bbox = bbox.tolist()
+ if len(bbox) not in [4, 5]:
+ raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.")
+ from_bbox_mode = annotation["bbox_mode"]
+ to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS
+ bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode)
+
+ # COCO requirement: instance area
+ if "segmentation" in annotation:
+ # Computing areas for instances by counting the pixels
+ segmentation = annotation["segmentation"]
+ # TODO: check segmentation type: RLE, BinaryMask or Polygon
+ if isinstance(segmentation, list):
+ polygons = PolygonMasks([segmentation])
+ area = polygons.area()[0].item()
+ elif isinstance(segmentation, dict): # RLE
+ area = mask_util.area(segmentation).item()
+ else:
+ raise TypeError(f"Unknown segmentation type {type(segmentation)}!")
+ else:
+ # Computing areas using bounding boxes
+ if to_bbox_mode == BoxMode.XYWH_ABS:
+ bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS)
+ area = Boxes([bbox_xy]).area()[0].item()
+ else:
+ area = RotatedBoxes([bbox]).area()[0].item()
+
+ if "keypoints" in annotation:
+ keypoints = annotation["keypoints"] # list[int]
+ for idx, v in enumerate(keypoints):
+ if idx % 3 != 2:
+ # COCO's segmentation coordinates are floating points in [0, H or W],
+ # but keypoint coordinates are integers in [0, H-1 or W-1]
+ # For COCO format consistency we substract 0.5
+ # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163
+ keypoints[idx] = v - 0.5
+ if "num_keypoints" in annotation:
+ num_keypoints = annotation["num_keypoints"]
+ else:
+ num_keypoints = sum(kp > 0 for kp in keypoints[2::3])
+
+ # COCO requirement:
+ # linking annotations to images
+ # "id" field must start with 1
+ coco_annotation["id"] = len(coco_annotations) + 1
+ coco_annotation["image_id"] = coco_image["id"]
+ coco_annotation["bbox"] = [round(float(x), 3) for x in bbox]
+ coco_annotation["area"] = float(area)
+ coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0))
+ coco_annotation["category_id"] = int(reverse_id_mapper(annotation["category_id"]))
+
+ # Add optional fields
+ if "keypoints" in annotation:
+ coco_annotation["keypoints"] = keypoints
+ coco_annotation["num_keypoints"] = num_keypoints
+
+ if "segmentation" in annotation:
+ seg = coco_annotation["segmentation"] = annotation["segmentation"]
+ if isinstance(seg, dict): # RLE
+ counts = seg["counts"]
+ if not isinstance(counts, str):
+ # make it json-serializable
+ seg["counts"] = counts.decode("ascii")
+
+ coco_annotations.append(coco_annotation)
+
+ logger.info(
+ "Conversion finished, "
+ f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}"
+ )
+
+ info = {
+ "date_created": str(datetime.datetime.now()),
+ "description": "Automatically generated COCO json file for Detectron2.",
+ }
+ coco_dict = {"info": info, "images": coco_images, "categories": categories, "licenses": None}
+ if len(coco_annotations) > 0:
+ coco_dict["annotations"] = coco_annotations
+ return coco_dict
+
+
+def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
+ """
+ Converts dataset into COCO format and saves it to a json file.
+ dataset_name must be registered in DatasetCatalog and in detectron2's standard format.
+
+ Args:
+ dataset_name:
+ reference from the config file to the catalogs
+ must be registered in DatasetCatalog and in detectron2's standard format
+ output_file: path of json file that will be saved to
+ allow_cached: if json file is already present then skip conversion
+ """
+
+ # TODO: The dataset or the conversion script *may* change,
+ # a checksum would be useful for validating the cached data
+
+ PathManager.mkdirs(os.path.dirname(output_file))
+ with file_lock(output_file):
+ if PathManager.exists(output_file) and allow_cached:
+ logger.warning(
+ f"Using previously cached COCO format annotations at '{output_file}'. "
+ "You need to clear the cache file if your dataset has been modified."
+ )
+ else:
+ logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)")
+ coco_dict = convert_to_coco_dict(dataset_name)
+
+ logger.info(f"Caching COCO format annotations at '{output_file}' ...")
+ tmp_file = output_file + ".tmp"
+ with PathManager.open(tmp_file, "w") as f:
+ json.dump(coco_dict, f)
+ shutil.move(tmp_file, output_file)
+
+
+def register_coco_instances(name, metadata, json_file, image_root):
+ """
+ Register a dataset in COCO's json annotation format for
+ instance detection, instance segmentation and keypoint detection.
+ (i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
+ `instances*.json` and `person_keypoints*.json` in the dataset).
+
+ This is an example of how to register a new dataset.
+ You can do something similar to this function, to register new datasets.
+
+ Args:
+ name (str): the name that identifies a dataset, e.g. "coco_2014_train".
+ metadata (dict): extra metadata associated with this dataset. You can
+ leave it as an empty dict.
+ json_file (str): path to the json instance annotation file.
+ image_root (str or path-like): directory which contains all the images.
+ """
+ assert isinstance(name, str), name
+ assert isinstance(json_file, (str, os.PathLike)), json_file
+ assert isinstance(image_root, (str, os.PathLike)), image_root
+ # 1. register a function which returns dicts
+ DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
+
+ # 2. Optionally, add metadata about this dataset,
+ # since they might be useful in evaluation, visualization or logging
+ MetadataCatalog.get(name).set(
+ json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
+ )
+
+
+if __name__ == "__main__":
+ """
+ Test the COCO json dataset loader.
+
+ Usage:
+ python -m detectron2.data.datasets.coco \
+ path/to/json path/to/image_root dataset_name
+
+ "dataset_name" can be "coco_2014_minival_100", or other
+ pre-registered ones
+ """
+ from detectron2.utils.logger import setup_logger
+ from detectron2.utils.visualizer import Visualizer
+ import detectron2.data.datasets # noqa # add pre-defined metadata
+ import sys
+
+ logger = setup_logger(name=__name__)
+ assert sys.argv[3] in DatasetCatalog.list()
+ meta = MetadataCatalog.get(sys.argv[3])
+
+ dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3])
+ logger.info("Done loading {} samples.".format(len(dicts)))
+
+ dirname = "coco-data-vis"
+ os.makedirs(dirname, exist_ok=True)
+ for d in dicts:
+ img = np.array(Image.open(d["file_name"]))
+ visualizer = Visualizer(img, metadata=meta)
+ vis = visualizer.draw_dataset_dict(d)
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
+ vis.save(fpath)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco_panoptic.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco_panoptic.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8dae44317b556610d7fed39017e082d7e855956
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco_panoptic.py
@@ -0,0 +1,228 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import json
+import os
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from detectron2.utils.file_io import PathManager
+
+from .coco import load_coco_json, load_sem_seg
+
+__all__ = ["register_coco_panoptic", "register_coco_panoptic_separated"]
+
+
+def load_coco_panoptic_json(json_file, image_dir, gt_dir, meta):
+ """
+ Args:
+ image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
+ gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
+ json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
+
+ Returns:
+ list[dict]: a list of dicts in Detectron2 standard format. (See
+ `Using Custom Datasets `_ )
+ """
+
+ def _convert_category_id(segment_info, meta):
+ if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
+ segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
+ segment_info["category_id"]
+ ]
+ segment_info["isthing"] = True
+ else:
+ segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
+ segment_info["category_id"]
+ ]
+ segment_info["isthing"] = False
+ return segment_info
+
+ with PathManager.open(json_file) as f:
+ json_info = json.load(f)
+
+ ret = []
+ for ann in json_info["annotations"]:
+ image_id = int(ann["image_id"])
+ # TODO: currently we assume image and label has the same filename but
+ # different extension, and images have extension ".jpg" for COCO. Need
+ # to make image extension a user-provided argument if we extend this
+ # function to support other COCO-like datasets.
+ image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
+ label_file = os.path.join(gt_dir, ann["file_name"])
+ segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
+ ret.append(
+ {
+ "file_name": image_file,
+ "image_id": image_id,
+ "pan_seg_file_name": label_file,
+ "segments_info": segments_info,
+ }
+ )
+ assert len(ret), f"No images found in {image_dir}!"
+ assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
+ assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
+ return ret
+
+
+def register_coco_panoptic(
+ name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None
+):
+ """
+ Register a "standard" version of COCO panoptic segmentation dataset named `name`.
+ The dictionaries in this registered dataset follows detectron2's standard format.
+ Hence it's called "standard".
+
+ Args:
+ name (str): the name that identifies a dataset,
+ e.g. "coco_2017_train_panoptic"
+ metadata (dict): extra metadata associated with this dataset.
+ image_root (str): directory which contains all the images
+ panoptic_root (str): directory which contains panoptic annotation images in COCO format
+ panoptic_json (str): path to the json panoptic annotation file in COCO format
+ sem_seg_root (none): not used, to be consistent with
+ `register_coco_panoptic_separated`.
+ instances_json (str): path to the json instance annotation file
+ """
+ panoptic_name = name
+ DatasetCatalog.register(
+ panoptic_name,
+ lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata),
+ )
+ MetadataCatalog.get(panoptic_name).set(
+ panoptic_root=panoptic_root,
+ image_root=image_root,
+ panoptic_json=panoptic_json,
+ json_file=instances_json,
+ evaluator_type="coco_panoptic_seg",
+ ignore_label=255,
+ label_divisor=1000,
+ **metadata,
+ )
+
+
+def register_coco_panoptic_separated(
+ name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json
+):
+ """
+ Register a "separated" version of COCO panoptic segmentation dataset named `name`.
+ The annotations in this registered dataset will contain both instance annotations and
+ semantic annotations, each with its own contiguous ids. Hence it's called "separated".
+
+ It follows the setting used by the PanopticFPN paper:
+
+ 1. The instance annotations directly come from polygons in the COCO
+ instances annotation task, rather than from the masks in the COCO panoptic annotations.
+
+ The two format have small differences:
+ Polygons in the instance annotations may have overlaps.
+ The mask annotations are produced by labeling the overlapped polygons
+ with depth ordering.
+
+ 2. The semantic annotations are converted from panoptic annotations, where
+ all "things" are assigned a semantic id of 0.
+ All semantic categories will therefore have ids in contiguous
+ range [1, #stuff_categories].
+
+ This function will also register a pure semantic segmentation dataset
+ named ``name + '_stuffonly'``.
+
+ Args:
+ name (str): the name that identifies a dataset,
+ e.g. "coco_2017_train_panoptic"
+ metadata (dict): extra metadata associated with this dataset.
+ image_root (str): directory which contains all the images
+ panoptic_root (str): directory which contains panoptic annotation images
+ panoptic_json (str): path to the json panoptic annotation file
+ sem_seg_root (str): directory which contains all the ground truth segmentation annotations.
+ instances_json (str): path to the json instance annotation file
+ """
+ panoptic_name = name + "_separated"
+ DatasetCatalog.register(
+ panoptic_name,
+ lambda: merge_to_panoptic(
+ load_coco_json(instances_json, image_root, panoptic_name),
+ load_sem_seg(sem_seg_root, image_root),
+ ),
+ )
+ MetadataCatalog.get(panoptic_name).set(
+ panoptic_root=panoptic_root,
+ image_root=image_root,
+ panoptic_json=panoptic_json,
+ sem_seg_root=sem_seg_root,
+ json_file=instances_json, # TODO rename
+ evaluator_type="coco_panoptic_seg",
+ ignore_label=255,
+ **metadata,
+ )
+
+ semantic_name = name + "_stuffonly"
+ DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root))
+ MetadataCatalog.get(semantic_name).set(
+ sem_seg_root=sem_seg_root,
+ image_root=image_root,
+ evaluator_type="sem_seg",
+ ignore_label=255,
+ **metadata,
+ )
+
+
+def merge_to_panoptic(detection_dicts, sem_seg_dicts):
+ """
+ Create dataset dicts for panoptic segmentation, by
+ merging two dicts using "file_name" field to match their entries.
+
+ Args:
+ detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation.
+ sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation.
+
+ Returns:
+ list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in
+ both detection_dicts and sem_seg_dicts that correspond to the same image.
+ The function assumes that the same key in different dicts has the same value.
+ """
+ results = []
+ sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts}
+ assert len(sem_seg_file_to_entry) > 0
+
+ for det_dict in detection_dicts:
+ dic = copy.copy(det_dict)
+ dic.update(sem_seg_file_to_entry[dic["file_name"]])
+ results.append(dic)
+ return results
+
+
+if __name__ == "__main__":
+ """
+ Test the COCO panoptic dataset loader.
+
+ Usage:
+ python -m detectron2.data.datasets.coco_panoptic \
+ path/to/image_root path/to/panoptic_root path/to/panoptic_json dataset_name 10
+
+ "dataset_name" can be "coco_2017_train_panoptic", or other
+ pre-registered ones
+ """
+ from detectron2.utils.logger import setup_logger
+ from detectron2.utils.visualizer import Visualizer
+ import detectron2.data.datasets # noqa # add pre-defined metadata
+ import sys
+ from PIL import Image
+ import numpy as np
+
+ logger = setup_logger(name=__name__)
+ assert sys.argv[4] in DatasetCatalog.list()
+ meta = MetadataCatalog.get(sys.argv[4])
+
+ dicts = load_coco_panoptic_json(sys.argv[3], sys.argv[1], sys.argv[2], meta.as_dict())
+ logger.info("Done loading {} samples.".format(len(dicts)))
+
+ dirname = "coco-data-vis"
+ os.makedirs(dirname, exist_ok=True)
+ num_imgs_to_vis = int(sys.argv[5])
+ for i, d in enumerate(dicts):
+ img = np.array(Image.open(d["file_name"]))
+ visualizer = Visualizer(img, metadata=meta)
+ vis = visualizer.draw_dataset_dict(d)
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
+ vis.save(fpath)
+ if i + 1 >= num_imgs_to_vis:
+ break
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis.py
new file mode 100644
index 0000000000000000000000000000000000000000..78b396534cc1a119677d2af1015fc78a18b83846
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis.py
@@ -0,0 +1,240 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+import os
+from fvcore.common.timer import Timer
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from detectron2.structures import BoxMode
+from detectron2.utils.file_io import PathManager
+
+from .builtin_meta import _get_coco_instances_meta
+from .lvis_v0_5_categories import LVIS_CATEGORIES as LVIS_V0_5_CATEGORIES
+from .lvis_v1_categories import LVIS_CATEGORIES as LVIS_V1_CATEGORIES
+
+"""
+This file contains functions to parse LVIS-format annotations into dicts in the
+"Detectron2 format".
+"""
+
+logger = logging.getLogger(__name__)
+
+__all__ = ["load_lvis_json", "register_lvis_instances", "get_lvis_instances_meta"]
+
+
+def register_lvis_instances(name, metadata, json_file, image_root):
+ """
+ Register a dataset in LVIS's json annotation format for instance detection and segmentation.
+
+ Args:
+ name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train".
+ metadata (dict): extra metadata associated with this dataset. It can be an empty dict.
+ json_file (str): path to the json instance annotation file.
+ image_root (str or path-like): directory which contains all the images.
+ """
+ DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name))
+ MetadataCatalog.get(name).set(
+ json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata
+ )
+
+
+def load_lvis_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
+ """
+ Load a json file in LVIS's annotation format.
+
+ Args:
+ json_file (str): full path to the LVIS json annotation file.
+ image_root (str): the directory where the images in this json file exists.
+ dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train").
+ If provided, this function will put "thing_classes" into the metadata
+ associated with this dataset.
+ extra_annotation_keys (list[str]): list of per-annotation keys that should also be
+ loaded into the dataset dict (besides "bbox", "bbox_mode", "category_id",
+ "segmentation"). The values for these keys will be returned as-is.
+
+ Returns:
+ list[dict]: a list of dicts in Detectron2 standard format. (See
+ `Using Custom Datasets `_ )
+
+ Notes:
+ 1. This function does not read the image files.
+ The results do not have the "image" field.
+ """
+ from lvis import LVIS
+
+ json_file = PathManager.get_local_path(json_file)
+
+ timer = Timer()
+ lvis_api = LVIS(json_file)
+ if timer.seconds() > 1:
+ logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
+
+ if dataset_name is not None:
+ meta = get_lvis_instances_meta(dataset_name)
+ MetadataCatalog.get(dataset_name).set(**meta)
+
+ # sort indices for reproducible results
+ img_ids = sorted(lvis_api.imgs.keys())
+ # imgs is a list of dicts, each looks something like:
+ # {'license': 4,
+ # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
+ # 'file_name': 'COCO_val2014_000000001268.jpg',
+ # 'height': 427,
+ # 'width': 640,
+ # 'date_captured': '2013-11-17 05:57:24',
+ # 'id': 1268}
+ imgs = lvis_api.load_imgs(img_ids)
+ # anns is a list[list[dict]], where each dict is an annotation
+ # record for an object. The inner list enumerates the objects in an image
+ # and the outer list enumerates over images. Example of anns[0]:
+ # [{'segmentation': [[192.81,
+ # 247.09,
+ # ...
+ # 219.03,
+ # 249.06]],
+ # 'area': 1035.749,
+ # 'image_id': 1268,
+ # 'bbox': [192.81, 224.8, 74.73, 33.43],
+ # 'category_id': 16,
+ # 'id': 42986},
+ # ...]
+ anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
+
+ # Sanity check that each annotation has a unique id
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
+ assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique".format(
+ json_file
+ )
+
+ imgs_anns = list(zip(imgs, anns))
+
+ logger.info("Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file))
+
+ if extra_annotation_keys:
+ logger.info(
+ "The following extra annotation keys will be loaded: {} ".format(extra_annotation_keys)
+ )
+ else:
+ extra_annotation_keys = []
+
+ def get_file_name(img_root, img_dict):
+ # Determine the path including the split folder ("train2017", "val2017", "test2017") from
+ # the coco_url field. Example:
+ # 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg'
+ split_folder, file_name = img_dict["coco_url"].split("/")[-2:]
+ return os.path.join(img_root + split_folder, file_name)
+
+ dataset_dicts = []
+
+ for (img_dict, anno_dict_list) in imgs_anns:
+ record = {}
+ record["file_name"] = get_file_name(image_root, img_dict)
+ record["height"] = img_dict["height"]
+ record["width"] = img_dict["width"]
+ record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
+ record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
+ image_id = record["image_id"] = img_dict["id"]
+
+ objs = []
+ for anno in anno_dict_list:
+ # Check that the image_id in this annotation is the same as
+ # the image_id we're looking at.
+ # This fails only when the data parsing logic or the annotation file is buggy.
+ assert anno["image_id"] == image_id
+ obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
+ # LVIS data loader can be used to load COCO dataset categories. In this case `meta`
+ # variable will have a field with COCO-specific category mapping.
+ if dataset_name is not None and "thing_dataset_id_to_contiguous_id" in meta:
+ obj["category_id"] = meta["thing_dataset_id_to_contiguous_id"][anno["category_id"]]
+ else:
+ obj["category_id"] = anno["category_id"] - 1 # Convert 1-indexed to 0-indexed
+ segm = anno["segmentation"] # list[list[float]]
+ # filter out invalid polygons (< 3 points)
+ valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
+ assert len(segm) == len(
+ valid_segm
+ ), "Annotation contains an invalid polygon with < 3 points"
+ assert len(segm) > 0
+ obj["segmentation"] = segm
+ for extra_ann_key in extra_annotation_keys:
+ obj[extra_ann_key] = anno[extra_ann_key]
+ objs.append(obj)
+ record["annotations"] = objs
+ dataset_dicts.append(record)
+
+ return dataset_dicts
+
+
+def get_lvis_instances_meta(dataset_name):
+ """
+ Load LVIS metadata.
+
+ Args:
+ dataset_name (str): LVIS dataset name without the split name (e.g., "lvis_v0.5").
+
+ Returns:
+ dict: LVIS metadata with keys: thing_classes
+ """
+ if "cocofied" in dataset_name:
+ return _get_coco_instances_meta()
+ if "v0.5" in dataset_name:
+ return _get_lvis_instances_meta_v0_5()
+ elif "v1" in dataset_name:
+ return _get_lvis_instances_meta_v1()
+ raise ValueError("No built-in metadata for dataset {}".format(dataset_name))
+
+
+def _get_lvis_instances_meta_v0_5():
+ assert len(LVIS_V0_5_CATEGORIES) == 1230
+ cat_ids = [k["id"] for k in LVIS_V0_5_CATEGORIES]
+ assert min(cat_ids) == 1 and max(cat_ids) == len(
+ cat_ids
+ ), "Category ids are not in [1, #categories], as expected"
+ # Ensure that the category list is sorted by id
+ lvis_categories = sorted(LVIS_V0_5_CATEGORIES, key=lambda x: x["id"])
+ thing_classes = [k["synonyms"][0] for k in lvis_categories]
+ meta = {"thing_classes": thing_classes}
+ return meta
+
+
+def _get_lvis_instances_meta_v1():
+ assert len(LVIS_V1_CATEGORIES) == 1203
+ cat_ids = [k["id"] for k in LVIS_V1_CATEGORIES]
+ assert min(cat_ids) == 1 and max(cat_ids) == len(
+ cat_ids
+ ), "Category ids are not in [1, #categories], as expected"
+ # Ensure that the category list is sorted by id
+ lvis_categories = sorted(LVIS_V1_CATEGORIES, key=lambda x: x["id"])
+ thing_classes = [k["synonyms"][0] for k in lvis_categories]
+ meta = {"thing_classes": thing_classes}
+ return meta
+
+
+if __name__ == "__main__":
+ """
+ Test the LVIS json dataset loader.
+
+ Usage:
+ python -m detectron2.data.datasets.lvis \
+ path/to/json path/to/image_root dataset_name vis_limit
+ """
+ import sys
+ import numpy as np
+ from detectron2.utils.logger import setup_logger
+ from PIL import Image
+ import detectron2.data.datasets # noqa # add pre-defined metadata
+ from detectron2.utils.visualizer import Visualizer
+
+ logger = setup_logger(name=__name__)
+ meta = MetadataCatalog.get(sys.argv[3])
+
+ dicts = load_lvis_json(sys.argv[1], sys.argv[2], sys.argv[3])
+ logger.info("Done loading {} samples.".format(len(dicts)))
+
+ dirname = "lvis-data-vis"
+ os.makedirs(dirname, exist_ok=True)
+ for d in dicts[: int(sys.argv[4])]:
+ img = np.array(Image.open(d["file_name"]))
+ visualizer = Visualizer(img, metadata=meta)
+ vis = visualizer.draw_dataset_dict(d)
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
+ vis.save(fpath)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis_v0_5_categories.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis_v0_5_categories.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3dab6198da614937b08682f4c9edf52bdf1d236
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis_v0_5_categories.py
@@ -0,0 +1,13 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# Autogen with
+# with open("lvis_v0.5_val.json", "r") as f:
+# a = json.load(f)
+# c = a["categories"]
+# for x in c:
+# del x["image_count"]
+# del x["instance_count"]
+# LVIS_CATEGORIES = repr(c) + " # noqa"
+
+# fmt: off
+LVIS_CATEGORIES = [{'frequency': 'r', 'id': 1, 'synset': 'acorn.n.01', 'synonyms': ['acorn'], 'def': 'nut from an oak tree', 'name': 'acorn'}, {'frequency': 'c', 'id': 2, 'synset': 'aerosol.n.02', 'synonyms': ['aerosol_can', 'spray_can'], 'def': 'a dispenser that holds a substance under pressure', 'name': 'aerosol_can'}, {'frequency': 'f', 'id': 3, 'synset': 'air_conditioner.n.01', 'synonyms': ['air_conditioner'], 'def': 'a machine that keeps air cool and dry', 'name': 'air_conditioner'}, {'frequency': 'f', 'id': 4, 'synset': 'airplane.n.01', 'synonyms': ['airplane', 'aeroplane'], 'def': 'an aircraft that has a fixed wing and is powered by propellers or jets', 'name': 'airplane'}, {'frequency': 'c', 'id': 5, 'synset': 'alarm_clock.n.01', 'synonyms': ['alarm_clock'], 'def': 'a clock that wakes a sleeper at some preset time', 'name': 'alarm_clock'}, {'frequency': 'c', 'id': 6, 'synset': 'alcohol.n.01', 'synonyms': ['alcohol', 'alcoholic_beverage'], 'def': 'a liquor or brew containing alcohol as the active agent', 'name': 'alcohol'}, {'frequency': 'r', 'id': 7, 'synset': 'alligator.n.02', 'synonyms': ['alligator', 'gator'], 'def': 'amphibious reptiles related to crocodiles but with shorter broader snouts', 'name': 'alligator'}, {'frequency': 'c', 'id': 8, 'synset': 'almond.n.02', 'synonyms': ['almond'], 'def': 'oval-shaped edible seed of the almond tree', 'name': 'almond'}, {'frequency': 'c', 'id': 9, 'synset': 'ambulance.n.01', 'synonyms': ['ambulance'], 'def': 'a vehicle that takes people to and from hospitals', 'name': 'ambulance'}, {'frequency': 'r', 'id': 10, 'synset': 'amplifier.n.01', 'synonyms': ['amplifier'], 'def': 'electronic equipment that increases strength of signals', 'name': 'amplifier'}, {'frequency': 'c', 'id': 11, 'synset': 'anklet.n.03', 'synonyms': ['anklet', 'ankle_bracelet'], 'def': 'an ornament worn around the ankle', 'name': 'anklet'}, {'frequency': 'f', 'id': 12, 'synset': 'antenna.n.01', 'synonyms': ['antenna', 'aerial', 'transmitting_aerial'], 'def': 'an electrical device that sends or receives radio or television signals', 'name': 'antenna'}, {'frequency': 'f', 'id': 13, 'synset': 'apple.n.01', 'synonyms': ['apple'], 'def': 'fruit with red or yellow or green skin and sweet to tart crisp whitish flesh', 'name': 'apple'}, {'frequency': 'r', 'id': 14, 'synset': 'apple_juice.n.01', 'synonyms': ['apple_juice'], 'def': 'the juice of apples', 'name': 'apple_juice'}, {'frequency': 'r', 'id': 15, 'synset': 'applesauce.n.01', 'synonyms': ['applesauce'], 'def': 'puree of stewed apples usually sweetened and spiced', 'name': 'applesauce'}, {'frequency': 'r', 'id': 16, 'synset': 'apricot.n.02', 'synonyms': ['apricot'], 'def': 'downy yellow to rosy-colored fruit resembling a small peach', 'name': 'apricot'}, {'frequency': 'f', 'id': 17, 'synset': 'apron.n.01', 'synonyms': ['apron'], 'def': 'a garment of cloth that is tied about the waist and worn to protect clothing', 'name': 'apron'}, {'frequency': 'c', 'id': 18, 'synset': 'aquarium.n.01', 'synonyms': ['aquarium', 'fish_tank'], 'def': 'a tank/pool/bowl filled with water for keeping live fish and underwater animals', 'name': 'aquarium'}, {'frequency': 'c', 'id': 19, 'synset': 'armband.n.02', 'synonyms': ['armband'], 'def': 'a band worn around the upper arm', 'name': 'armband'}, {'frequency': 'f', 'id': 20, 'synset': 'armchair.n.01', 'synonyms': ['armchair'], 'def': 'chair with a support on each side for arms', 'name': 'armchair'}, {'frequency': 'r', 'id': 21, 'synset': 'armoire.n.01', 'synonyms': ['armoire'], 'def': 'a large wardrobe or cabinet', 'name': 'armoire'}, {'frequency': 'r', 'id': 22, 'synset': 'armor.n.01', 'synonyms': ['armor', 'armour'], 'def': 'protective covering made of metal and used in combat', 'name': 'armor'}, {'frequency': 'c', 'id': 23, 'synset': 'artichoke.n.02', 'synonyms': ['artichoke'], 'def': 'a thistlelike flower head with edible fleshy leaves and heart', 'name': 'artichoke'}, {'frequency': 'f', 'id': 24, 'synset': 'ashcan.n.01', 'synonyms': ['trash_can', 'garbage_can', 'wastebin', 'dustbin', 'trash_barrel', 'trash_bin'], 'def': 'a bin that holds rubbish until it is collected', 'name': 'trash_can'}, {'frequency': 'c', 'id': 25, 'synset': 'ashtray.n.01', 'synonyms': ['ashtray'], 'def': "a receptacle for the ash from smokers' cigars or cigarettes", 'name': 'ashtray'}, {'frequency': 'c', 'id': 26, 'synset': 'asparagus.n.02', 'synonyms': ['asparagus'], 'def': 'edible young shoots of the asparagus plant', 'name': 'asparagus'}, {'frequency': 'c', 'id': 27, 'synset': 'atomizer.n.01', 'synonyms': ['atomizer', 'atomiser', 'spray', 'sprayer', 'nebulizer', 'nebuliser'], 'def': 'a dispenser that turns a liquid (such as perfume) into a fine mist', 'name': 'atomizer'}, {'frequency': 'c', 'id': 28, 'synset': 'avocado.n.01', 'synonyms': ['avocado'], 'def': 'a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed', 'name': 'avocado'}, {'frequency': 'c', 'id': 29, 'synset': 'award.n.02', 'synonyms': ['award', 'accolade'], 'def': 'a tangible symbol signifying approval or distinction', 'name': 'award'}, {'frequency': 'f', 'id': 30, 'synset': 'awning.n.01', 'synonyms': ['awning'], 'def': 'a canopy made of canvas to shelter people or things from rain or sun', 'name': 'awning'}, {'frequency': 'r', 'id': 31, 'synset': 'ax.n.01', 'synonyms': ['ax', 'axe'], 'def': 'an edge tool with a heavy bladed head mounted across a handle', 'name': 'ax'}, {'frequency': 'f', 'id': 32, 'synset': 'baby_buggy.n.01', 'synonyms': ['baby_buggy', 'baby_carriage', 'perambulator', 'pram', 'stroller'], 'def': 'a small vehicle with four wheels in which a baby or child is pushed around', 'name': 'baby_buggy'}, {'frequency': 'c', 'id': 33, 'synset': 'backboard.n.01', 'synonyms': ['basketball_backboard'], 'def': 'a raised vertical board with basket attached; used to play basketball', 'name': 'basketball_backboard'}, {'frequency': 'f', 'id': 34, 'synset': 'backpack.n.01', 'synonyms': ['backpack', 'knapsack', 'packsack', 'rucksack', 'haversack'], 'def': 'a bag carried by a strap on your back or shoulder', 'name': 'backpack'}, {'frequency': 'f', 'id': 35, 'synset': 'bag.n.04', 'synonyms': ['handbag', 'purse', 'pocketbook'], 'def': 'a container used for carrying money and small personal items or accessories', 'name': 'handbag'}, {'frequency': 'f', 'id': 36, 'synset': 'bag.n.06', 'synonyms': ['suitcase', 'baggage', 'luggage'], 'def': 'cases used to carry belongings when traveling', 'name': 'suitcase'}, {'frequency': 'c', 'id': 37, 'synset': 'bagel.n.01', 'synonyms': ['bagel', 'beigel'], 'def': 'glazed yeast-raised doughnut-shaped roll with hard crust', 'name': 'bagel'}, {'frequency': 'r', 'id': 38, 'synset': 'bagpipe.n.01', 'synonyms': ['bagpipe'], 'def': 'a tubular wind instrument; the player blows air into a bag and squeezes it out', 'name': 'bagpipe'}, {'frequency': 'r', 'id': 39, 'synset': 'baguet.n.01', 'synonyms': ['baguet', 'baguette'], 'def': 'narrow French stick loaf', 'name': 'baguet'}, {'frequency': 'r', 'id': 40, 'synset': 'bait.n.02', 'synonyms': ['bait', 'lure'], 'def': 'something used to lure fish or other animals into danger so they can be trapped or killed', 'name': 'bait'}, {'frequency': 'f', 'id': 41, 'synset': 'ball.n.06', 'synonyms': ['ball'], 'def': 'a spherical object used as a plaything', 'name': 'ball'}, {'frequency': 'r', 'id': 42, 'synset': 'ballet_skirt.n.01', 'synonyms': ['ballet_skirt', 'tutu'], 'def': 'very short skirt worn by ballerinas', 'name': 'ballet_skirt'}, {'frequency': 'f', 'id': 43, 'synset': 'balloon.n.01', 'synonyms': ['balloon'], 'def': 'large tough nonrigid bag filled with gas or heated air', 'name': 'balloon'}, {'frequency': 'c', 'id': 44, 'synset': 'bamboo.n.02', 'synonyms': ['bamboo'], 'def': 'woody tropical grass having hollow woody stems', 'name': 'bamboo'}, {'frequency': 'f', 'id': 45, 'synset': 'banana.n.02', 'synonyms': ['banana'], 'def': 'elongated crescent-shaped yellow fruit with soft sweet flesh', 'name': 'banana'}, {'frequency': 'r', 'id': 46, 'synset': 'band_aid.n.01', 'synonyms': ['Band_Aid'], 'def': 'trade name for an adhesive bandage to cover small cuts or blisters', 'name': 'Band_Aid'}, {'frequency': 'c', 'id': 47, 'synset': 'bandage.n.01', 'synonyms': ['bandage'], 'def': 'a piece of soft material that covers and protects an injured part of the body', 'name': 'bandage'}, {'frequency': 'c', 'id': 48, 'synset': 'bandanna.n.01', 'synonyms': ['bandanna', 'bandana'], 'def': 'large and brightly colored handkerchief; often used as a neckerchief', 'name': 'bandanna'}, {'frequency': 'r', 'id': 49, 'synset': 'banjo.n.01', 'synonyms': ['banjo'], 'def': 'a stringed instrument of the guitar family with a long neck and circular body', 'name': 'banjo'}, {'frequency': 'f', 'id': 50, 'synset': 'banner.n.01', 'synonyms': ['banner', 'streamer'], 'def': 'long strip of cloth or paper used for decoration or advertising', 'name': 'banner'}, {'frequency': 'r', 'id': 51, 'synset': 'barbell.n.01', 'synonyms': ['barbell'], 'def': 'a bar to which heavy discs are attached at each end; used in weightlifting', 'name': 'barbell'}, {'frequency': 'r', 'id': 52, 'synset': 'barge.n.01', 'synonyms': ['barge'], 'def': 'a flatbottom boat for carrying heavy loads (especially on canals)', 'name': 'barge'}, {'frequency': 'f', 'id': 53, 'synset': 'barrel.n.02', 'synonyms': ['barrel', 'cask'], 'def': 'a cylindrical container that holds liquids', 'name': 'barrel'}, {'frequency': 'c', 'id': 54, 'synset': 'barrette.n.01', 'synonyms': ['barrette'], 'def': "a pin for holding women's hair in place", 'name': 'barrette'}, {'frequency': 'c', 'id': 55, 'synset': 'barrow.n.03', 'synonyms': ['barrow', 'garden_cart', 'lawn_cart', 'wheelbarrow'], 'def': 'a cart for carrying small loads; has handles and one or more wheels', 'name': 'barrow'}, {'frequency': 'f', 'id': 56, 'synset': 'base.n.03', 'synonyms': ['baseball_base'], 'def': 'a place that the runner must touch before scoring', 'name': 'baseball_base'}, {'frequency': 'f', 'id': 57, 'synset': 'baseball.n.02', 'synonyms': ['baseball'], 'def': 'a ball used in playing baseball', 'name': 'baseball'}, {'frequency': 'f', 'id': 58, 'synset': 'baseball_bat.n.01', 'synonyms': ['baseball_bat'], 'def': 'an implement used in baseball by the batter', 'name': 'baseball_bat'}, {'frequency': 'f', 'id': 59, 'synset': 'baseball_cap.n.01', 'synonyms': ['baseball_cap', 'jockey_cap', 'golf_cap'], 'def': 'a cap with a bill', 'name': 'baseball_cap'}, {'frequency': 'f', 'id': 60, 'synset': 'baseball_glove.n.01', 'synonyms': ['baseball_glove', 'baseball_mitt'], 'def': 'the handwear used by fielders in playing baseball', 'name': 'baseball_glove'}, {'frequency': 'f', 'id': 61, 'synset': 'basket.n.01', 'synonyms': ['basket', 'handbasket'], 'def': 'a container that is usually woven and has handles', 'name': 'basket'}, {'frequency': 'c', 'id': 62, 'synset': 'basket.n.03', 'synonyms': ['basketball_hoop'], 'def': 'metal hoop supporting a net through which players try to throw the basketball', 'name': 'basketball_hoop'}, {'frequency': 'c', 'id': 63, 'synset': 'basketball.n.02', 'synonyms': ['basketball'], 'def': 'an inflated ball used in playing basketball', 'name': 'basketball'}, {'frequency': 'r', 'id': 64, 'synset': 'bass_horn.n.01', 'synonyms': ['bass_horn', 'sousaphone', 'tuba'], 'def': 'the lowest brass wind instrument', 'name': 'bass_horn'}, {'frequency': 'r', 'id': 65, 'synset': 'bat.n.01', 'synonyms': ['bat_(animal)'], 'def': 'nocturnal mouselike mammal with forelimbs modified to form membranous wings', 'name': 'bat_(animal)'}, {'frequency': 'f', 'id': 66, 'synset': 'bath_mat.n.01', 'synonyms': ['bath_mat'], 'def': 'a heavy towel or mat to stand on while drying yourself after a bath', 'name': 'bath_mat'}, {'frequency': 'f', 'id': 67, 'synset': 'bath_towel.n.01', 'synonyms': ['bath_towel'], 'def': 'a large towel; to dry yourself after a bath', 'name': 'bath_towel'}, {'frequency': 'c', 'id': 68, 'synset': 'bathrobe.n.01', 'synonyms': ['bathrobe'], 'def': 'a loose-fitting robe of towelling; worn after a bath or swim', 'name': 'bathrobe'}, {'frequency': 'f', 'id': 69, 'synset': 'bathtub.n.01', 'synonyms': ['bathtub', 'bathing_tub'], 'def': 'a large open container that you fill with water and use to wash the body', 'name': 'bathtub'}, {'frequency': 'r', 'id': 70, 'synset': 'batter.n.02', 'synonyms': ['batter_(food)'], 'def': 'a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking', 'name': 'batter_(food)'}, {'frequency': 'c', 'id': 71, 'synset': 'battery.n.02', 'synonyms': ['battery'], 'def': 'a portable device that produces electricity', 'name': 'battery'}, {'frequency': 'r', 'id': 72, 'synset': 'beach_ball.n.01', 'synonyms': ['beachball'], 'def': 'large and light ball; for play at the seaside', 'name': 'beachball'}, {'frequency': 'c', 'id': 73, 'synset': 'bead.n.01', 'synonyms': ['bead'], 'def': 'a small ball with a hole through the middle used for ornamentation, jewellery, etc.', 'name': 'bead'}, {'frequency': 'r', 'id': 74, 'synset': 'beaker.n.01', 'synonyms': ['beaker'], 'def': 'a flatbottomed jar made of glass or plastic; used for chemistry', 'name': 'beaker'}, {'frequency': 'c', 'id': 75, 'synset': 'bean_curd.n.01', 'synonyms': ['bean_curd', 'tofu'], 'def': 'cheeselike food made of curdled soybean milk', 'name': 'bean_curd'}, {'frequency': 'c', 'id': 76, 'synset': 'beanbag.n.01', 'synonyms': ['beanbag'], 'def': 'a bag filled with dried beans or similar items; used in games or to sit on', 'name': 'beanbag'}, {'frequency': 'f', 'id': 77, 'synset': 'beanie.n.01', 'synonyms': ['beanie', 'beany'], 'def': 'a small skullcap; formerly worn by schoolboys and college freshmen', 'name': 'beanie'}, {'frequency': 'f', 'id': 78, 'synset': 'bear.n.01', 'synonyms': ['bear'], 'def': 'large carnivorous or omnivorous mammals with shaggy coats and claws', 'name': 'bear'}, {'frequency': 'f', 'id': 79, 'synset': 'bed.n.01', 'synonyms': ['bed'], 'def': 'a piece of furniture that provides a place to sleep', 'name': 'bed'}, {'frequency': 'c', 'id': 80, 'synset': 'bedspread.n.01', 'synonyms': ['bedspread', 'bedcover', 'bed_covering', 'counterpane', 'spread'], 'def': 'decorative cover for a bed', 'name': 'bedspread'}, {'frequency': 'f', 'id': 81, 'synset': 'beef.n.01', 'synonyms': ['cow'], 'def': 'cattle that are reared for their meat', 'name': 'cow'}, {'frequency': 'c', 'id': 82, 'synset': 'beef.n.02', 'synonyms': ['beef_(food)', 'boeuf_(food)'], 'def': 'meat from an adult domestic bovine', 'name': 'beef_(food)'}, {'frequency': 'r', 'id': 83, 'synset': 'beeper.n.01', 'synonyms': ['beeper', 'pager'], 'def': 'an device that beeps when the person carrying it is being paged', 'name': 'beeper'}, {'frequency': 'f', 'id': 84, 'synset': 'beer_bottle.n.01', 'synonyms': ['beer_bottle'], 'def': 'a bottle that holds beer', 'name': 'beer_bottle'}, {'frequency': 'c', 'id': 85, 'synset': 'beer_can.n.01', 'synonyms': ['beer_can'], 'def': 'a can that holds beer', 'name': 'beer_can'}, {'frequency': 'r', 'id': 86, 'synset': 'beetle.n.01', 'synonyms': ['beetle'], 'def': 'insect with hard wing covers', 'name': 'beetle'}, {'frequency': 'f', 'id': 87, 'synset': 'bell.n.01', 'synonyms': ['bell'], 'def': 'a hollow device made of metal that makes a ringing sound when struck', 'name': 'bell'}, {'frequency': 'f', 'id': 88, 'synset': 'bell_pepper.n.02', 'synonyms': ['bell_pepper', 'capsicum'], 'def': 'large bell-shaped sweet pepper in green or red or yellow or orange or black varieties', 'name': 'bell_pepper'}, {'frequency': 'f', 'id': 89, 'synset': 'belt.n.02', 'synonyms': ['belt'], 'def': 'a band to tie or buckle around the body (usually at the waist)', 'name': 'belt'}, {'frequency': 'f', 'id': 90, 'synset': 'belt_buckle.n.01', 'synonyms': ['belt_buckle'], 'def': 'the buckle used to fasten a belt', 'name': 'belt_buckle'}, {'frequency': 'f', 'id': 91, 'synset': 'bench.n.01', 'synonyms': ['bench'], 'def': 'a long seat for more than one person', 'name': 'bench'}, {'frequency': 'c', 'id': 92, 'synset': 'beret.n.01', 'synonyms': ['beret'], 'def': 'a cap with no brim or bill; made of soft cloth', 'name': 'beret'}, {'frequency': 'c', 'id': 93, 'synset': 'bib.n.02', 'synonyms': ['bib'], 'def': 'a napkin tied under the chin of a child while eating', 'name': 'bib'}, {'frequency': 'r', 'id': 94, 'synset': 'bible.n.01', 'synonyms': ['Bible'], 'def': 'the sacred writings of the Christian religions', 'name': 'Bible'}, {'frequency': 'f', 'id': 95, 'synset': 'bicycle.n.01', 'synonyms': ['bicycle', 'bike_(bicycle)'], 'def': 'a wheeled vehicle that has two wheels and is moved by foot pedals', 'name': 'bicycle'}, {'frequency': 'f', 'id': 96, 'synset': 'bill.n.09', 'synonyms': ['visor', 'vizor'], 'def': 'a brim that projects to the front to shade the eyes', 'name': 'visor'}, {'frequency': 'c', 'id': 97, 'synset': 'binder.n.03', 'synonyms': ['binder', 'ring-binder'], 'def': 'holds loose papers or magazines', 'name': 'binder'}, {'frequency': 'c', 'id': 98, 'synset': 'binoculars.n.01', 'synonyms': ['binoculars', 'field_glasses', 'opera_glasses'], 'def': 'an optical instrument designed for simultaneous use by both eyes', 'name': 'binoculars'}, {'frequency': 'f', 'id': 99, 'synset': 'bird.n.01', 'synonyms': ['bird'], 'def': 'animal characterized by feathers and wings', 'name': 'bird'}, {'frequency': 'r', 'id': 100, 'synset': 'bird_feeder.n.01', 'synonyms': ['birdfeeder'], 'def': 'an outdoor device that supplies food for wild birds', 'name': 'birdfeeder'}, {'frequency': 'r', 'id': 101, 'synset': 'birdbath.n.01', 'synonyms': ['birdbath'], 'def': 'an ornamental basin (usually in a garden) for birds to bathe in', 'name': 'birdbath'}, {'frequency': 'c', 'id': 102, 'synset': 'birdcage.n.01', 'synonyms': ['birdcage'], 'def': 'a cage in which a bird can be kept', 'name': 'birdcage'}, {'frequency': 'c', 'id': 103, 'synset': 'birdhouse.n.01', 'synonyms': ['birdhouse'], 'def': 'a shelter for birds', 'name': 'birdhouse'}, {'frequency': 'f', 'id': 104, 'synset': 'birthday_cake.n.01', 'synonyms': ['birthday_cake'], 'def': 'decorated cake served at a birthday party', 'name': 'birthday_cake'}, {'frequency': 'r', 'id': 105, 'synset': 'birthday_card.n.01', 'synonyms': ['birthday_card'], 'def': 'a card expressing a birthday greeting', 'name': 'birthday_card'}, {'frequency': 'r', 'id': 106, 'synset': 'biscuit.n.01', 'synonyms': ['biscuit_(bread)'], 'def': 'small round bread leavened with baking-powder or soda', 'name': 'biscuit_(bread)'}, {'frequency': 'r', 'id': 107, 'synset': 'black_flag.n.01', 'synonyms': ['pirate_flag'], 'def': 'a flag usually bearing a white skull and crossbones on a black background', 'name': 'pirate_flag'}, {'frequency': 'c', 'id': 108, 'synset': 'black_sheep.n.02', 'synonyms': ['black_sheep'], 'def': 'sheep with a black coat', 'name': 'black_sheep'}, {'frequency': 'c', 'id': 109, 'synset': 'blackboard.n.01', 'synonyms': ['blackboard', 'chalkboard'], 'def': 'sheet of slate; for writing with chalk', 'name': 'blackboard'}, {'frequency': 'f', 'id': 110, 'synset': 'blanket.n.01', 'synonyms': ['blanket'], 'def': 'bedding that keeps a person warm in bed', 'name': 'blanket'}, {'frequency': 'c', 'id': 111, 'synset': 'blazer.n.01', 'synonyms': ['blazer', 'sport_jacket', 'sport_coat', 'sports_jacket', 'sports_coat'], 'def': 'lightweight jacket; often striped in the colors of a club or school', 'name': 'blazer'}, {'frequency': 'f', 'id': 112, 'synset': 'blender.n.01', 'synonyms': ['blender', 'liquidizer', 'liquidiser'], 'def': 'an electrically powered mixer that mix or chop or liquefy foods', 'name': 'blender'}, {'frequency': 'r', 'id': 113, 'synset': 'blimp.n.02', 'synonyms': ['blimp'], 'def': 'a small nonrigid airship used for observation or as a barrage balloon', 'name': 'blimp'}, {'frequency': 'c', 'id': 114, 'synset': 'blinker.n.01', 'synonyms': ['blinker', 'flasher'], 'def': 'a light that flashes on and off; used as a signal or to send messages', 'name': 'blinker'}, {'frequency': 'c', 'id': 115, 'synset': 'blueberry.n.02', 'synonyms': ['blueberry'], 'def': 'sweet edible dark-blue berries of blueberry plants', 'name': 'blueberry'}, {'frequency': 'r', 'id': 116, 'synset': 'boar.n.02', 'synonyms': ['boar'], 'def': 'an uncastrated male hog', 'name': 'boar'}, {'frequency': 'r', 'id': 117, 'synset': 'board.n.09', 'synonyms': ['gameboard'], 'def': 'a flat portable surface (usually rectangular) designed for board games', 'name': 'gameboard'}, {'frequency': 'f', 'id': 118, 'synset': 'boat.n.01', 'synonyms': ['boat', 'ship_(boat)'], 'def': 'a vessel for travel on water', 'name': 'boat'}, {'frequency': 'c', 'id': 119, 'synset': 'bobbin.n.01', 'synonyms': ['bobbin', 'spool', 'reel'], 'def': 'a thing around which thread/tape/film or other flexible materials can be wound', 'name': 'bobbin'}, {'frequency': 'r', 'id': 120, 'synset': 'bobby_pin.n.01', 'synonyms': ['bobby_pin', 'hairgrip'], 'def': 'a flat wire hairpin used to hold bobbed hair in place', 'name': 'bobby_pin'}, {'frequency': 'c', 'id': 121, 'synset': 'boiled_egg.n.01', 'synonyms': ['boiled_egg', 'coddled_egg'], 'def': 'egg cooked briefly in the shell in gently boiling water', 'name': 'boiled_egg'}, {'frequency': 'r', 'id': 122, 'synset': 'bolo_tie.n.01', 'synonyms': ['bolo_tie', 'bolo', 'bola_tie', 'bola'], 'def': 'a cord fastened around the neck with an ornamental clasp and worn as a necktie', 'name': 'bolo_tie'}, {'frequency': 'c', 'id': 123, 'synset': 'bolt.n.03', 'synonyms': ['deadbolt'], 'def': 'the part of a lock that is engaged or withdrawn with a key', 'name': 'deadbolt'}, {'frequency': 'f', 'id': 124, 'synset': 'bolt.n.06', 'synonyms': ['bolt'], 'def': 'a screw that screws into a nut to form a fastener', 'name': 'bolt'}, {'frequency': 'r', 'id': 125, 'synset': 'bonnet.n.01', 'synonyms': ['bonnet'], 'def': 'a hat tied under the chin', 'name': 'bonnet'}, {'frequency': 'f', 'id': 126, 'synset': 'book.n.01', 'synonyms': ['book'], 'def': 'a written work or composition that has been published', 'name': 'book'}, {'frequency': 'r', 'id': 127, 'synset': 'book_bag.n.01', 'synonyms': ['book_bag'], 'def': 'a bag in which students carry their books', 'name': 'book_bag'}, {'frequency': 'c', 'id': 128, 'synset': 'bookcase.n.01', 'synonyms': ['bookcase'], 'def': 'a piece of furniture with shelves for storing books', 'name': 'bookcase'}, {'frequency': 'c', 'id': 129, 'synset': 'booklet.n.01', 'synonyms': ['booklet', 'brochure', 'leaflet', 'pamphlet'], 'def': 'a small book usually having a paper cover', 'name': 'booklet'}, {'frequency': 'r', 'id': 130, 'synset': 'bookmark.n.01', 'synonyms': ['bookmark', 'bookmarker'], 'def': 'a marker (a piece of paper or ribbon) placed between the pages of a book', 'name': 'bookmark'}, {'frequency': 'r', 'id': 131, 'synset': 'boom.n.04', 'synonyms': ['boom_microphone', 'microphone_boom'], 'def': 'a pole carrying an overhead microphone projected over a film or tv set', 'name': 'boom_microphone'}, {'frequency': 'f', 'id': 132, 'synset': 'boot.n.01', 'synonyms': ['boot'], 'def': 'footwear that covers the whole foot and lower leg', 'name': 'boot'}, {'frequency': 'f', 'id': 133, 'synset': 'bottle.n.01', 'synonyms': ['bottle'], 'def': 'a glass or plastic vessel used for storing drinks or other liquids', 'name': 'bottle'}, {'frequency': 'c', 'id': 134, 'synset': 'bottle_opener.n.01', 'synonyms': ['bottle_opener'], 'def': 'an opener for removing caps or corks from bottles', 'name': 'bottle_opener'}, {'frequency': 'c', 'id': 135, 'synset': 'bouquet.n.01', 'synonyms': ['bouquet'], 'def': 'an arrangement of flowers that is usually given as a present', 'name': 'bouquet'}, {'frequency': 'r', 'id': 136, 'synset': 'bow.n.04', 'synonyms': ['bow_(weapon)'], 'def': 'a weapon for shooting arrows', 'name': 'bow_(weapon)'}, {'frequency': 'f', 'id': 137, 'synset': 'bow.n.08', 'synonyms': ['bow_(decorative_ribbons)'], 'def': 'a decorative interlacing of ribbons', 'name': 'bow_(decorative_ribbons)'}, {'frequency': 'f', 'id': 138, 'synset': 'bow_tie.n.01', 'synonyms': ['bow-tie', 'bowtie'], 'def': "a man's tie that ties in a bow", 'name': 'bow-tie'}, {'frequency': 'f', 'id': 139, 'synset': 'bowl.n.03', 'synonyms': ['bowl'], 'def': 'a dish that is round and open at the top for serving foods', 'name': 'bowl'}, {'frequency': 'r', 'id': 140, 'synset': 'bowl.n.08', 'synonyms': ['pipe_bowl'], 'def': 'a small round container that is open at the top for holding tobacco', 'name': 'pipe_bowl'}, {'frequency': 'c', 'id': 141, 'synset': 'bowler_hat.n.01', 'synonyms': ['bowler_hat', 'bowler', 'derby_hat', 'derby', 'plug_hat'], 'def': 'a felt hat that is round and hard with a narrow brim', 'name': 'bowler_hat'}, {'frequency': 'r', 'id': 142, 'synset': 'bowling_ball.n.01', 'synonyms': ['bowling_ball'], 'def': 'a large ball with finger holes used in the sport of bowling', 'name': 'bowling_ball'}, {'frequency': 'r', 'id': 143, 'synset': 'bowling_pin.n.01', 'synonyms': ['bowling_pin'], 'def': 'a club-shaped wooden object used in bowling', 'name': 'bowling_pin'}, {'frequency': 'r', 'id': 144, 'synset': 'boxing_glove.n.01', 'synonyms': ['boxing_glove'], 'def': 'large glove coverings the fists of a fighter worn for the sport of boxing', 'name': 'boxing_glove'}, {'frequency': 'c', 'id': 145, 'synset': 'brace.n.06', 'synonyms': ['suspenders'], 'def': 'elastic straps that hold trousers up (usually used in the plural)', 'name': 'suspenders'}, {'frequency': 'f', 'id': 146, 'synset': 'bracelet.n.02', 'synonyms': ['bracelet', 'bangle'], 'def': 'jewelry worn around the wrist for decoration', 'name': 'bracelet'}, {'frequency': 'r', 'id': 147, 'synset': 'brass.n.07', 'synonyms': ['brass_plaque'], 'def': 'a memorial made of brass', 'name': 'brass_plaque'}, {'frequency': 'c', 'id': 148, 'synset': 'brassiere.n.01', 'synonyms': ['brassiere', 'bra', 'bandeau'], 'def': 'an undergarment worn by women to support their breasts', 'name': 'brassiere'}, {'frequency': 'c', 'id': 149, 'synset': 'bread-bin.n.01', 'synonyms': ['bread-bin', 'breadbox'], 'def': 'a container used to keep bread or cake in', 'name': 'bread-bin'}, {'frequency': 'r', 'id': 150, 'synset': 'breechcloth.n.01', 'synonyms': ['breechcloth', 'breechclout', 'loincloth'], 'def': 'a garment that provides covering for the loins', 'name': 'breechcloth'}, {'frequency': 'c', 'id': 151, 'synset': 'bridal_gown.n.01', 'synonyms': ['bridal_gown', 'wedding_gown', 'wedding_dress'], 'def': 'a gown worn by the bride at a wedding', 'name': 'bridal_gown'}, {'frequency': 'c', 'id': 152, 'synset': 'briefcase.n.01', 'synonyms': ['briefcase'], 'def': 'a case with a handle; for carrying papers or files or books', 'name': 'briefcase'}, {'frequency': 'c', 'id': 153, 'synset': 'bristle_brush.n.01', 'synonyms': ['bristle_brush'], 'def': 'a brush that is made with the short stiff hairs of an animal or plant', 'name': 'bristle_brush'}, {'frequency': 'f', 'id': 154, 'synset': 'broccoli.n.01', 'synonyms': ['broccoli'], 'def': 'plant with dense clusters of tight green flower buds', 'name': 'broccoli'}, {'frequency': 'r', 'id': 155, 'synset': 'brooch.n.01', 'synonyms': ['broach'], 'def': 'a decorative pin worn by women', 'name': 'broach'}, {'frequency': 'c', 'id': 156, 'synset': 'broom.n.01', 'synonyms': ['broom'], 'def': 'bundle of straws or twigs attached to a long handle; used for cleaning', 'name': 'broom'}, {'frequency': 'c', 'id': 157, 'synset': 'brownie.n.03', 'synonyms': ['brownie'], 'def': 'square or bar of very rich chocolate cake usually with nuts', 'name': 'brownie'}, {'frequency': 'c', 'id': 158, 'synset': 'brussels_sprouts.n.01', 'synonyms': ['brussels_sprouts'], 'def': 'the small edible cabbage-like buds growing along a stalk', 'name': 'brussels_sprouts'}, {'frequency': 'r', 'id': 159, 'synset': 'bubble_gum.n.01', 'synonyms': ['bubble_gum'], 'def': 'a kind of chewing gum that can be blown into bubbles', 'name': 'bubble_gum'}, {'frequency': 'f', 'id': 160, 'synset': 'bucket.n.01', 'synonyms': ['bucket', 'pail'], 'def': 'a roughly cylindrical vessel that is open at the top', 'name': 'bucket'}, {'frequency': 'r', 'id': 161, 'synset': 'buggy.n.01', 'synonyms': ['horse_buggy'], 'def': 'a small lightweight carriage; drawn by a single horse', 'name': 'horse_buggy'}, {'frequency': 'c', 'id': 162, 'synset': 'bull.n.11', 'synonyms': ['bull'], 'def': 'mature male cow', 'name': 'bull'}, {'frequency': 'r', 'id': 163, 'synset': 'bulldog.n.01', 'synonyms': ['bulldog'], 'def': 'a thickset short-haired dog with a large head and strong undershot lower jaw', 'name': 'bulldog'}, {'frequency': 'r', 'id': 164, 'synset': 'bulldozer.n.01', 'synonyms': ['bulldozer', 'dozer'], 'def': 'large powerful tractor; a large blade in front flattens areas of ground', 'name': 'bulldozer'}, {'frequency': 'c', 'id': 165, 'synset': 'bullet_train.n.01', 'synonyms': ['bullet_train'], 'def': 'a high-speed passenger train', 'name': 'bullet_train'}, {'frequency': 'c', 'id': 166, 'synset': 'bulletin_board.n.02', 'synonyms': ['bulletin_board', 'notice_board'], 'def': 'a board that hangs on a wall; displays announcements', 'name': 'bulletin_board'}, {'frequency': 'r', 'id': 167, 'synset': 'bulletproof_vest.n.01', 'synonyms': ['bulletproof_vest'], 'def': 'a vest capable of resisting the impact of a bullet', 'name': 'bulletproof_vest'}, {'frequency': 'c', 'id': 168, 'synset': 'bullhorn.n.01', 'synonyms': ['bullhorn', 'megaphone'], 'def': 'a portable loudspeaker with built-in microphone and amplifier', 'name': 'bullhorn'}, {'frequency': 'r', 'id': 169, 'synset': 'bully_beef.n.01', 'synonyms': ['corned_beef', 'corn_beef'], 'def': 'beef cured or pickled in brine', 'name': 'corned_beef'}, {'frequency': 'f', 'id': 170, 'synset': 'bun.n.01', 'synonyms': ['bun', 'roll'], 'def': 'small rounded bread either plain or sweet', 'name': 'bun'}, {'frequency': 'c', 'id': 171, 'synset': 'bunk_bed.n.01', 'synonyms': ['bunk_bed'], 'def': 'beds built one above the other', 'name': 'bunk_bed'}, {'frequency': 'f', 'id': 172, 'synset': 'buoy.n.01', 'synonyms': ['buoy'], 'def': 'a float attached by rope to the seabed to mark channels in a harbor or underwater hazards', 'name': 'buoy'}, {'frequency': 'r', 'id': 173, 'synset': 'burrito.n.01', 'synonyms': ['burrito'], 'def': 'a flour tortilla folded around a filling', 'name': 'burrito'}, {'frequency': 'f', 'id': 174, 'synset': 'bus.n.01', 'synonyms': ['bus_(vehicle)', 'autobus', 'charabanc', 'double-decker', 'motorbus', 'motorcoach'], 'def': 'a vehicle carrying many passengers; used for public transport', 'name': 'bus_(vehicle)'}, {'frequency': 'c', 'id': 175, 'synset': 'business_card.n.01', 'synonyms': ['business_card'], 'def': "a card on which are printed the person's name and business affiliation", 'name': 'business_card'}, {'frequency': 'c', 'id': 176, 'synset': 'butcher_knife.n.01', 'synonyms': ['butcher_knife'], 'def': 'a large sharp knife for cutting or trimming meat', 'name': 'butcher_knife'}, {'frequency': 'c', 'id': 177, 'synset': 'butter.n.01', 'synonyms': ['butter'], 'def': 'an edible emulsion of fat globules made by churning milk or cream; for cooking and table use', 'name': 'butter'}, {'frequency': 'c', 'id': 178, 'synset': 'butterfly.n.01', 'synonyms': ['butterfly'], 'def': 'insect typically having a slender body with knobbed antennae and broad colorful wings', 'name': 'butterfly'}, {'frequency': 'f', 'id': 179, 'synset': 'button.n.01', 'synonyms': ['button'], 'def': 'a round fastener sewn to shirts and coats etc to fit through buttonholes', 'name': 'button'}, {'frequency': 'f', 'id': 180, 'synset': 'cab.n.03', 'synonyms': ['cab_(taxi)', 'taxi', 'taxicab'], 'def': 'a car that takes passengers where they want to go in exchange for money', 'name': 'cab_(taxi)'}, {'frequency': 'r', 'id': 181, 'synset': 'cabana.n.01', 'synonyms': ['cabana'], 'def': 'a small tent used as a dressing room beside the sea or a swimming pool', 'name': 'cabana'}, {'frequency': 'r', 'id': 182, 'synset': 'cabin_car.n.01', 'synonyms': ['cabin_car', 'caboose'], 'def': 'a car on a freight train for use of the train crew; usually the last car on the train', 'name': 'cabin_car'}, {'frequency': 'f', 'id': 183, 'synset': 'cabinet.n.01', 'synonyms': ['cabinet'], 'def': 'a piece of furniture resembling a cupboard with doors and shelves and drawers', 'name': 'cabinet'}, {'frequency': 'r', 'id': 184, 'synset': 'cabinet.n.03', 'synonyms': ['locker', 'storage_locker'], 'def': 'a storage compartment for clothes and valuables; usually it has a lock', 'name': 'locker'}, {'frequency': 'f', 'id': 185, 'synset': 'cake.n.03', 'synonyms': ['cake'], 'def': 'baked goods made from or based on a mixture of flour, sugar, eggs, and fat', 'name': 'cake'}, {'frequency': 'c', 'id': 186, 'synset': 'calculator.n.02', 'synonyms': ['calculator'], 'def': 'a small machine that is used for mathematical calculations', 'name': 'calculator'}, {'frequency': 'f', 'id': 187, 'synset': 'calendar.n.02', 'synonyms': ['calendar'], 'def': 'a list or register of events (appointments/social events/court cases, etc)', 'name': 'calendar'}, {'frequency': 'c', 'id': 188, 'synset': 'calf.n.01', 'synonyms': ['calf'], 'def': 'young of domestic cattle', 'name': 'calf'}, {'frequency': 'c', 'id': 189, 'synset': 'camcorder.n.01', 'synonyms': ['camcorder'], 'def': 'a portable television camera and videocassette recorder', 'name': 'camcorder'}, {'frequency': 'c', 'id': 190, 'synset': 'camel.n.01', 'synonyms': ['camel'], 'def': 'cud-chewing mammal used as a draft or saddle animal in desert regions', 'name': 'camel'}, {'frequency': 'f', 'id': 191, 'synset': 'camera.n.01', 'synonyms': ['camera'], 'def': 'equipment for taking photographs', 'name': 'camera'}, {'frequency': 'c', 'id': 192, 'synset': 'camera_lens.n.01', 'synonyms': ['camera_lens'], 'def': 'a lens that focuses the image in a camera', 'name': 'camera_lens'}, {'frequency': 'c', 'id': 193, 'synset': 'camper.n.02', 'synonyms': ['camper_(vehicle)', 'camping_bus', 'motor_home'], 'def': 'a recreational vehicle equipped for camping out while traveling', 'name': 'camper_(vehicle)'}, {'frequency': 'f', 'id': 194, 'synset': 'can.n.01', 'synonyms': ['can', 'tin_can'], 'def': 'airtight sealed metal container for food or drink or paint etc.', 'name': 'can'}, {'frequency': 'c', 'id': 195, 'synset': 'can_opener.n.01', 'synonyms': ['can_opener', 'tin_opener'], 'def': 'a device for cutting cans open', 'name': 'can_opener'}, {'frequency': 'r', 'id': 196, 'synset': 'candelabrum.n.01', 'synonyms': ['candelabrum', 'candelabra'], 'def': 'branched candlestick; ornamental; has several lights', 'name': 'candelabrum'}, {'frequency': 'f', 'id': 197, 'synset': 'candle.n.01', 'synonyms': ['candle', 'candlestick'], 'def': 'stick of wax with a wick in the middle', 'name': 'candle'}, {'frequency': 'f', 'id': 198, 'synset': 'candlestick.n.01', 'synonyms': ['candle_holder'], 'def': 'a holder with sockets for candles', 'name': 'candle_holder'}, {'frequency': 'r', 'id': 199, 'synset': 'candy_bar.n.01', 'synonyms': ['candy_bar'], 'def': 'a candy shaped as a bar', 'name': 'candy_bar'}, {'frequency': 'c', 'id': 200, 'synset': 'candy_cane.n.01', 'synonyms': ['candy_cane'], 'def': 'a hard candy in the shape of a rod (usually with stripes)', 'name': 'candy_cane'}, {'frequency': 'c', 'id': 201, 'synset': 'cane.n.01', 'synonyms': ['walking_cane'], 'def': 'a stick that people can lean on to help them walk', 'name': 'walking_cane'}, {'frequency': 'c', 'id': 202, 'synset': 'canister.n.02', 'synonyms': ['canister', 'cannister'], 'def': 'metal container for storing dry foods such as tea or flour', 'name': 'canister'}, {'frequency': 'r', 'id': 203, 'synset': 'cannon.n.02', 'synonyms': ['cannon'], 'def': 'heavy gun fired from a tank', 'name': 'cannon'}, {'frequency': 'c', 'id': 204, 'synset': 'canoe.n.01', 'synonyms': ['canoe'], 'def': 'small and light boat; pointed at both ends; propelled with a paddle', 'name': 'canoe'}, {'frequency': 'r', 'id': 205, 'synset': 'cantaloup.n.02', 'synonyms': ['cantaloup', 'cantaloupe'], 'def': 'the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh', 'name': 'cantaloup'}, {'frequency': 'r', 'id': 206, 'synset': 'canteen.n.01', 'synonyms': ['canteen'], 'def': 'a flask for carrying water; used by soldiers or travelers', 'name': 'canteen'}, {'frequency': 'c', 'id': 207, 'synset': 'cap.n.01', 'synonyms': ['cap_(headwear)'], 'def': 'a tight-fitting headwear', 'name': 'cap_(headwear)'}, {'frequency': 'f', 'id': 208, 'synset': 'cap.n.02', 'synonyms': ['bottle_cap', 'cap_(container_lid)'], 'def': 'a top (as for a bottle)', 'name': 'bottle_cap'}, {'frequency': 'r', 'id': 209, 'synset': 'cape.n.02', 'synonyms': ['cape'], 'def': 'a sleeveless garment like a cloak but shorter', 'name': 'cape'}, {'frequency': 'c', 'id': 210, 'synset': 'cappuccino.n.01', 'synonyms': ['cappuccino', 'coffee_cappuccino'], 'def': 'equal parts of espresso and steamed milk', 'name': 'cappuccino'}, {'frequency': 'f', 'id': 211, 'synset': 'car.n.01', 'synonyms': ['car_(automobile)', 'auto_(automobile)', 'automobile'], 'def': 'a motor vehicle with four wheels', 'name': 'car_(automobile)'}, {'frequency': 'f', 'id': 212, 'synset': 'car.n.02', 'synonyms': ['railcar_(part_of_a_train)', 'railway_car_(part_of_a_train)', 'railroad_car_(part_of_a_train)'], 'def': 'a wheeled vehicle adapted to the rails of railroad', 'name': 'railcar_(part_of_a_train)'}, {'frequency': 'r', 'id': 213, 'synset': 'car.n.04', 'synonyms': ['elevator_car'], 'def': 'where passengers ride up and down', 'name': 'elevator_car'}, {'frequency': 'r', 'id': 214, 'synset': 'car_battery.n.01', 'synonyms': ['car_battery', 'automobile_battery'], 'def': 'a battery in a motor vehicle', 'name': 'car_battery'}, {'frequency': 'c', 'id': 215, 'synset': 'card.n.02', 'synonyms': ['identity_card'], 'def': 'a card certifying the identity of the bearer', 'name': 'identity_card'}, {'frequency': 'c', 'id': 216, 'synset': 'card.n.03', 'synonyms': ['card'], 'def': 'a rectangular piece of paper used to send messages (e.g. greetings or pictures)', 'name': 'card'}, {'frequency': 'r', 'id': 217, 'synset': 'cardigan.n.01', 'synonyms': ['cardigan'], 'def': 'knitted jacket that is fastened up the front with buttons or a zipper', 'name': 'cardigan'}, {'frequency': 'r', 'id': 218, 'synset': 'cargo_ship.n.01', 'synonyms': ['cargo_ship', 'cargo_vessel'], 'def': 'a ship designed to carry cargo', 'name': 'cargo_ship'}, {'frequency': 'r', 'id': 219, 'synset': 'carnation.n.01', 'synonyms': ['carnation'], 'def': 'plant with pink to purple-red spice-scented usually double flowers', 'name': 'carnation'}, {'frequency': 'c', 'id': 220, 'synset': 'carriage.n.02', 'synonyms': ['horse_carriage'], 'def': 'a vehicle with wheels drawn by one or more horses', 'name': 'horse_carriage'}, {'frequency': 'f', 'id': 221, 'synset': 'carrot.n.01', 'synonyms': ['carrot'], 'def': 'deep orange edible root of the cultivated carrot plant', 'name': 'carrot'}, {'frequency': 'c', 'id': 222, 'synset': 'carryall.n.01', 'synonyms': ['tote_bag'], 'def': 'a capacious bag or basket', 'name': 'tote_bag'}, {'frequency': 'c', 'id': 223, 'synset': 'cart.n.01', 'synonyms': ['cart'], 'def': 'a heavy open wagon usually having two wheels and drawn by an animal', 'name': 'cart'}, {'frequency': 'c', 'id': 224, 'synset': 'carton.n.02', 'synonyms': ['carton'], 'def': 'a box made of cardboard; opens by flaps on top', 'name': 'carton'}, {'frequency': 'c', 'id': 225, 'synset': 'cash_register.n.01', 'synonyms': ['cash_register', 'register_(for_cash_transactions)'], 'def': 'a cashbox with an adding machine to register transactions', 'name': 'cash_register'}, {'frequency': 'r', 'id': 226, 'synset': 'casserole.n.01', 'synonyms': ['casserole'], 'def': 'food cooked and served in a casserole', 'name': 'casserole'}, {'frequency': 'r', 'id': 227, 'synset': 'cassette.n.01', 'synonyms': ['cassette'], 'def': 'a container that holds a magnetic tape used for recording or playing sound or video', 'name': 'cassette'}, {'frequency': 'c', 'id': 228, 'synset': 'cast.n.05', 'synonyms': ['cast', 'plaster_cast', 'plaster_bandage'], 'def': 'bandage consisting of a firm covering that immobilizes broken bones while they heal', 'name': 'cast'}, {'frequency': 'f', 'id': 229, 'synset': 'cat.n.01', 'synonyms': ['cat'], 'def': 'a domestic house cat', 'name': 'cat'}, {'frequency': 'c', 'id': 230, 'synset': 'cauliflower.n.02', 'synonyms': ['cauliflower'], 'def': 'edible compact head of white undeveloped flowers', 'name': 'cauliflower'}, {'frequency': 'r', 'id': 231, 'synset': 'caviar.n.01', 'synonyms': ['caviar', 'caviare'], 'def': "salted roe of sturgeon or other large fish; usually served as an hors d'oeuvre", 'name': 'caviar'}, {'frequency': 'c', 'id': 232, 'synset': 'cayenne.n.02', 'synonyms': ['cayenne_(spice)', 'cayenne_pepper_(spice)', 'red_pepper_(spice)'], 'def': 'ground pods and seeds of pungent red peppers of the genus Capsicum', 'name': 'cayenne_(spice)'}, {'frequency': 'c', 'id': 233, 'synset': 'cd_player.n.01', 'synonyms': ['CD_player'], 'def': 'electronic equipment for playing compact discs (CDs)', 'name': 'CD_player'}, {'frequency': 'c', 'id': 234, 'synset': 'celery.n.01', 'synonyms': ['celery'], 'def': 'widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked', 'name': 'celery'}, {'frequency': 'f', 'id': 235, 'synset': 'cellular_telephone.n.01', 'synonyms': ['cellular_telephone', 'cellular_phone', 'cellphone', 'mobile_phone', 'smart_phone'], 'def': 'a hand-held mobile telephone', 'name': 'cellular_telephone'}, {'frequency': 'r', 'id': 236, 'synset': 'chain_mail.n.01', 'synonyms': ['chain_mail', 'ring_mail', 'chain_armor', 'chain_armour', 'ring_armor', 'ring_armour'], 'def': '(Middle Ages) flexible armor made of interlinked metal rings', 'name': 'chain_mail'}, {'frequency': 'f', 'id': 237, 'synset': 'chair.n.01', 'synonyms': ['chair'], 'def': 'a seat for one person, with a support for the back', 'name': 'chair'}, {'frequency': 'r', 'id': 238, 'synset': 'chaise_longue.n.01', 'synonyms': ['chaise_longue', 'chaise', 'daybed'], 'def': 'a long chair; for reclining', 'name': 'chaise_longue'}, {'frequency': 'r', 'id': 239, 'synset': 'champagne.n.01', 'synonyms': ['champagne'], 'def': 'a white sparkling wine produced in Champagne or resembling that produced there', 'name': 'champagne'}, {'frequency': 'f', 'id': 240, 'synset': 'chandelier.n.01', 'synonyms': ['chandelier'], 'def': 'branched lighting fixture; often ornate; hangs from the ceiling', 'name': 'chandelier'}, {'frequency': 'r', 'id': 241, 'synset': 'chap.n.04', 'synonyms': ['chap'], 'def': 'leather leggings without a seat; worn over trousers by cowboys to protect their legs', 'name': 'chap'}, {'frequency': 'r', 'id': 242, 'synset': 'checkbook.n.01', 'synonyms': ['checkbook', 'chequebook'], 'def': 'a book issued to holders of checking accounts', 'name': 'checkbook'}, {'frequency': 'r', 'id': 243, 'synset': 'checkerboard.n.01', 'synonyms': ['checkerboard'], 'def': 'a board having 64 squares of two alternating colors', 'name': 'checkerboard'}, {'frequency': 'c', 'id': 244, 'synset': 'cherry.n.03', 'synonyms': ['cherry'], 'def': 'a red fruit with a single hard stone', 'name': 'cherry'}, {'frequency': 'r', 'id': 245, 'synset': 'chessboard.n.01', 'synonyms': ['chessboard'], 'def': 'a checkerboard used to play chess', 'name': 'chessboard'}, {'frequency': 'r', 'id': 246, 'synset': 'chest_of_drawers.n.01', 'synonyms': ['chest_of_drawers_(furniture)', 'bureau_(furniture)', 'chest_(furniture)'], 'def': 'furniture with drawers for keeping clothes', 'name': 'chest_of_drawers_(furniture)'}, {'frequency': 'c', 'id': 247, 'synset': 'chicken.n.02', 'synonyms': ['chicken_(animal)'], 'def': 'a domestic fowl bred for flesh or eggs', 'name': 'chicken_(animal)'}, {'frequency': 'c', 'id': 248, 'synset': 'chicken_wire.n.01', 'synonyms': ['chicken_wire'], 'def': 'a galvanized wire network with a hexagonal mesh; used to build fences', 'name': 'chicken_wire'}, {'frequency': 'r', 'id': 249, 'synset': 'chickpea.n.01', 'synonyms': ['chickpea', 'garbanzo'], 'def': 'the seed of the chickpea plant; usually dried', 'name': 'chickpea'}, {'frequency': 'r', 'id': 250, 'synset': 'chihuahua.n.03', 'synonyms': ['Chihuahua'], 'def': 'an old breed of tiny short-haired dog with protruding eyes from Mexico', 'name': 'Chihuahua'}, {'frequency': 'r', 'id': 251, 'synset': 'chili.n.02', 'synonyms': ['chili_(vegetable)', 'chili_pepper_(vegetable)', 'chilli_(vegetable)', 'chilly_(vegetable)', 'chile_(vegetable)'], 'def': 'very hot and finely tapering pepper of special pungency', 'name': 'chili_(vegetable)'}, {'frequency': 'r', 'id': 252, 'synset': 'chime.n.01', 'synonyms': ['chime', 'gong'], 'def': 'an instrument consisting of a set of bells that are struck with a hammer', 'name': 'chime'}, {'frequency': 'r', 'id': 253, 'synset': 'chinaware.n.01', 'synonyms': ['chinaware'], 'def': 'dishware made of high quality porcelain', 'name': 'chinaware'}, {'frequency': 'c', 'id': 254, 'synset': 'chip.n.04', 'synonyms': ['crisp_(potato_chip)', 'potato_chip'], 'def': 'a thin crisp slice of potato fried in deep fat', 'name': 'crisp_(potato_chip)'}, {'frequency': 'r', 'id': 255, 'synset': 'chip.n.06', 'synonyms': ['poker_chip'], 'def': 'a small disk-shaped counter used to represent money when gambling', 'name': 'poker_chip'}, {'frequency': 'c', 'id': 256, 'synset': 'chocolate_bar.n.01', 'synonyms': ['chocolate_bar'], 'def': 'a bar of chocolate candy', 'name': 'chocolate_bar'}, {'frequency': 'c', 'id': 257, 'synset': 'chocolate_cake.n.01', 'synonyms': ['chocolate_cake'], 'def': 'cake containing chocolate', 'name': 'chocolate_cake'}, {'frequency': 'r', 'id': 258, 'synset': 'chocolate_milk.n.01', 'synonyms': ['chocolate_milk'], 'def': 'milk flavored with chocolate syrup', 'name': 'chocolate_milk'}, {'frequency': 'r', 'id': 259, 'synset': 'chocolate_mousse.n.01', 'synonyms': ['chocolate_mousse'], 'def': 'dessert mousse made with chocolate', 'name': 'chocolate_mousse'}, {'frequency': 'f', 'id': 260, 'synset': 'choker.n.03', 'synonyms': ['choker', 'collar', 'neckband'], 'def': 'necklace that fits tightly around the neck', 'name': 'choker'}, {'frequency': 'f', 'id': 261, 'synset': 'chopping_board.n.01', 'synonyms': ['chopping_board', 'cutting_board', 'chopping_block'], 'def': 'a wooden board where meats or vegetables can be cut', 'name': 'chopping_board'}, {'frequency': 'c', 'id': 262, 'synset': 'chopstick.n.01', 'synonyms': ['chopstick'], 'def': 'one of a pair of slender sticks used as oriental tableware to eat food with', 'name': 'chopstick'}, {'frequency': 'f', 'id': 263, 'synset': 'christmas_tree.n.05', 'synonyms': ['Christmas_tree'], 'def': 'an ornamented evergreen used as a Christmas decoration', 'name': 'Christmas_tree'}, {'frequency': 'c', 'id': 264, 'synset': 'chute.n.02', 'synonyms': ['slide'], 'def': 'sloping channel through which things can descend', 'name': 'slide'}, {'frequency': 'r', 'id': 265, 'synset': 'cider.n.01', 'synonyms': ['cider', 'cyder'], 'def': 'a beverage made from juice pressed from apples', 'name': 'cider'}, {'frequency': 'r', 'id': 266, 'synset': 'cigar_box.n.01', 'synonyms': ['cigar_box'], 'def': 'a box for holding cigars', 'name': 'cigar_box'}, {'frequency': 'c', 'id': 267, 'synset': 'cigarette.n.01', 'synonyms': ['cigarette'], 'def': 'finely ground tobacco wrapped in paper; for smoking', 'name': 'cigarette'}, {'frequency': 'c', 'id': 268, 'synset': 'cigarette_case.n.01', 'synonyms': ['cigarette_case', 'cigarette_pack'], 'def': 'a small flat case for holding cigarettes', 'name': 'cigarette_case'}, {'frequency': 'f', 'id': 269, 'synset': 'cistern.n.02', 'synonyms': ['cistern', 'water_tank'], 'def': 'a tank that holds the water used to flush a toilet', 'name': 'cistern'}, {'frequency': 'r', 'id': 270, 'synset': 'clarinet.n.01', 'synonyms': ['clarinet'], 'def': 'a single-reed instrument with a straight tube', 'name': 'clarinet'}, {'frequency': 'r', 'id': 271, 'synset': 'clasp.n.01', 'synonyms': ['clasp'], 'def': 'a fastener (as a buckle or hook) that is used to hold two things together', 'name': 'clasp'}, {'frequency': 'c', 'id': 272, 'synset': 'cleansing_agent.n.01', 'synonyms': ['cleansing_agent', 'cleanser', 'cleaner'], 'def': 'a preparation used in cleaning something', 'name': 'cleansing_agent'}, {'frequency': 'r', 'id': 273, 'synset': 'clementine.n.01', 'synonyms': ['clementine'], 'def': 'a variety of mandarin orange', 'name': 'clementine'}, {'frequency': 'c', 'id': 274, 'synset': 'clip.n.03', 'synonyms': ['clip'], 'def': 'any of various small fasteners used to hold loose articles together', 'name': 'clip'}, {'frequency': 'c', 'id': 275, 'synset': 'clipboard.n.01', 'synonyms': ['clipboard'], 'def': 'a small writing board with a clip at the top for holding papers', 'name': 'clipboard'}, {'frequency': 'f', 'id': 276, 'synset': 'clock.n.01', 'synonyms': ['clock', 'timepiece', 'timekeeper'], 'def': 'a timepiece that shows the time of day', 'name': 'clock'}, {'frequency': 'f', 'id': 277, 'synset': 'clock_tower.n.01', 'synonyms': ['clock_tower'], 'def': 'a tower with a large clock visible high up on an outside face', 'name': 'clock_tower'}, {'frequency': 'c', 'id': 278, 'synset': 'clothes_hamper.n.01', 'synonyms': ['clothes_hamper', 'laundry_basket', 'clothes_basket'], 'def': 'a hamper that holds dirty clothes to be washed or wet clothes to be dried', 'name': 'clothes_hamper'}, {'frequency': 'c', 'id': 279, 'synset': 'clothespin.n.01', 'synonyms': ['clothespin', 'clothes_peg'], 'def': 'wood or plastic fastener; for holding clothes on a clothesline', 'name': 'clothespin'}, {'frequency': 'r', 'id': 280, 'synset': 'clutch_bag.n.01', 'synonyms': ['clutch_bag'], 'def': "a woman's strapless purse that is carried in the hand", 'name': 'clutch_bag'}, {'frequency': 'f', 'id': 281, 'synset': 'coaster.n.03', 'synonyms': ['coaster'], 'def': 'a covering (plate or mat) that protects the surface of a table', 'name': 'coaster'}, {'frequency': 'f', 'id': 282, 'synset': 'coat.n.01', 'synonyms': ['coat'], 'def': 'an outer garment that has sleeves and covers the body from shoulder down', 'name': 'coat'}, {'frequency': 'c', 'id': 283, 'synset': 'coat_hanger.n.01', 'synonyms': ['coat_hanger', 'clothes_hanger', 'dress_hanger'], 'def': "a hanger that is shaped like a person's shoulders", 'name': 'coat_hanger'}, {'frequency': 'r', 'id': 284, 'synset': 'coatrack.n.01', 'synonyms': ['coatrack', 'hatrack'], 'def': 'a rack with hooks for temporarily holding coats and hats', 'name': 'coatrack'}, {'frequency': 'c', 'id': 285, 'synset': 'cock.n.04', 'synonyms': ['cock', 'rooster'], 'def': 'adult male chicken', 'name': 'cock'}, {'frequency': 'c', 'id': 286, 'synset': 'coconut.n.02', 'synonyms': ['coconut', 'cocoanut'], 'def': 'large hard-shelled brown oval nut with a fibrous husk', 'name': 'coconut'}, {'frequency': 'r', 'id': 287, 'synset': 'coffee_filter.n.01', 'synonyms': ['coffee_filter'], 'def': 'filter (usually of paper) that passes the coffee and retains the coffee grounds', 'name': 'coffee_filter'}, {'frequency': 'f', 'id': 288, 'synset': 'coffee_maker.n.01', 'synonyms': ['coffee_maker', 'coffee_machine'], 'def': 'a kitchen appliance for brewing coffee automatically', 'name': 'coffee_maker'}, {'frequency': 'f', 'id': 289, 'synset': 'coffee_table.n.01', 'synonyms': ['coffee_table', 'cocktail_table'], 'def': 'low table where magazines can be placed and coffee or cocktails are served', 'name': 'coffee_table'}, {'frequency': 'c', 'id': 290, 'synset': 'coffeepot.n.01', 'synonyms': ['coffeepot'], 'def': 'tall pot in which coffee is brewed', 'name': 'coffeepot'}, {'frequency': 'r', 'id': 291, 'synset': 'coil.n.05', 'synonyms': ['coil'], 'def': 'tubing that is wound in a spiral', 'name': 'coil'}, {'frequency': 'c', 'id': 292, 'synset': 'coin.n.01', 'synonyms': ['coin'], 'def': 'a flat metal piece (usually a disc) used as money', 'name': 'coin'}, {'frequency': 'r', 'id': 293, 'synset': 'colander.n.01', 'synonyms': ['colander', 'cullender'], 'def': 'bowl-shaped strainer; used to wash or drain foods', 'name': 'colander'}, {'frequency': 'c', 'id': 294, 'synset': 'coleslaw.n.01', 'synonyms': ['coleslaw', 'slaw'], 'def': 'basically shredded cabbage', 'name': 'coleslaw'}, {'frequency': 'r', 'id': 295, 'synset': 'coloring_material.n.01', 'synonyms': ['coloring_material', 'colouring_material'], 'def': 'any material used for its color', 'name': 'coloring_material'}, {'frequency': 'r', 'id': 296, 'synset': 'combination_lock.n.01', 'synonyms': ['combination_lock'], 'def': 'lock that can be opened only by turning dials in a special sequence', 'name': 'combination_lock'}, {'frequency': 'c', 'id': 297, 'synset': 'comforter.n.04', 'synonyms': ['pacifier', 'teething_ring'], 'def': 'device used for an infant to suck or bite on', 'name': 'pacifier'}, {'frequency': 'r', 'id': 298, 'synset': 'comic_book.n.01', 'synonyms': ['comic_book'], 'def': 'a magazine devoted to comic strips', 'name': 'comic_book'}, {'frequency': 'f', 'id': 299, 'synset': 'computer_keyboard.n.01', 'synonyms': ['computer_keyboard', 'keyboard_(computer)'], 'def': 'a keyboard that is a data input device for computers', 'name': 'computer_keyboard'}, {'frequency': 'r', 'id': 300, 'synset': 'concrete_mixer.n.01', 'synonyms': ['concrete_mixer', 'cement_mixer'], 'def': 'a machine with a large revolving drum in which cement/concrete is mixed', 'name': 'concrete_mixer'}, {'frequency': 'f', 'id': 301, 'synset': 'cone.n.01', 'synonyms': ['cone', 'traffic_cone'], 'def': 'a cone-shaped object used to direct traffic', 'name': 'cone'}, {'frequency': 'f', 'id': 302, 'synset': 'control.n.09', 'synonyms': ['control', 'controller'], 'def': 'a mechanism that controls the operation of a machine', 'name': 'control'}, {'frequency': 'r', 'id': 303, 'synset': 'convertible.n.01', 'synonyms': ['convertible_(automobile)'], 'def': 'a car that has top that can be folded or removed', 'name': 'convertible_(automobile)'}, {'frequency': 'r', 'id': 304, 'synset': 'convertible.n.03', 'synonyms': ['sofa_bed'], 'def': 'a sofa that can be converted into a bed', 'name': 'sofa_bed'}, {'frequency': 'c', 'id': 305, 'synset': 'cookie.n.01', 'synonyms': ['cookie', 'cooky', 'biscuit_(cookie)'], 'def': "any of various small flat sweet cakes (`biscuit' is the British term)", 'name': 'cookie'}, {'frequency': 'r', 'id': 306, 'synset': 'cookie_jar.n.01', 'synonyms': ['cookie_jar', 'cooky_jar'], 'def': 'a jar in which cookies are kept (and sometimes money is hidden)', 'name': 'cookie_jar'}, {'frequency': 'r', 'id': 307, 'synset': 'cooking_utensil.n.01', 'synonyms': ['cooking_utensil'], 'def': 'a kitchen utensil made of material that does not melt easily; used for cooking', 'name': 'cooking_utensil'}, {'frequency': 'f', 'id': 308, 'synset': 'cooler.n.01', 'synonyms': ['cooler_(for_food)', 'ice_chest'], 'def': 'an insulated box for storing food often with ice', 'name': 'cooler_(for_food)'}, {'frequency': 'c', 'id': 309, 'synset': 'cork.n.04', 'synonyms': ['cork_(bottle_plug)', 'bottle_cork'], 'def': 'the plug in the mouth of a bottle (especially a wine bottle)', 'name': 'cork_(bottle_plug)'}, {'frequency': 'r', 'id': 310, 'synset': 'corkboard.n.01', 'synonyms': ['corkboard'], 'def': 'a sheet consisting of cork granules', 'name': 'corkboard'}, {'frequency': 'r', 'id': 311, 'synset': 'corkscrew.n.01', 'synonyms': ['corkscrew', 'bottle_screw'], 'def': 'a bottle opener that pulls corks', 'name': 'corkscrew'}, {'frequency': 'c', 'id': 312, 'synset': 'corn.n.03', 'synonyms': ['edible_corn', 'corn', 'maize'], 'def': 'ears of corn that can be prepared and served for human food', 'name': 'edible_corn'}, {'frequency': 'r', 'id': 313, 'synset': 'cornbread.n.01', 'synonyms': ['cornbread'], 'def': 'bread made primarily of cornmeal', 'name': 'cornbread'}, {'frequency': 'c', 'id': 314, 'synset': 'cornet.n.01', 'synonyms': ['cornet', 'horn', 'trumpet'], 'def': 'a brass musical instrument with a narrow tube and a flared bell and many valves', 'name': 'cornet'}, {'frequency': 'c', 'id': 315, 'synset': 'cornice.n.01', 'synonyms': ['cornice', 'valance', 'valance_board', 'pelmet'], 'def': 'a decorative framework to conceal curtain fixtures at the top of a window casing', 'name': 'cornice'}, {'frequency': 'r', 'id': 316, 'synset': 'cornmeal.n.01', 'synonyms': ['cornmeal'], 'def': 'coarsely ground corn', 'name': 'cornmeal'}, {'frequency': 'r', 'id': 317, 'synset': 'corset.n.01', 'synonyms': ['corset', 'girdle'], 'def': "a woman's close-fitting foundation garment", 'name': 'corset'}, {'frequency': 'r', 'id': 318, 'synset': 'cos.n.02', 'synonyms': ['romaine_lettuce'], 'def': 'lettuce with long dark-green leaves in a loosely packed elongated head', 'name': 'romaine_lettuce'}, {'frequency': 'c', 'id': 319, 'synset': 'costume.n.04', 'synonyms': ['costume'], 'def': 'the attire characteristic of a country or a time or a social class', 'name': 'costume'}, {'frequency': 'r', 'id': 320, 'synset': 'cougar.n.01', 'synonyms': ['cougar', 'puma', 'catamount', 'mountain_lion', 'panther'], 'def': 'large American feline resembling a lion', 'name': 'cougar'}, {'frequency': 'r', 'id': 321, 'synset': 'coverall.n.01', 'synonyms': ['coverall'], 'def': 'a loose-fitting protective garment that is worn over other clothing', 'name': 'coverall'}, {'frequency': 'r', 'id': 322, 'synset': 'cowbell.n.01', 'synonyms': ['cowbell'], 'def': 'a bell hung around the neck of cow so that the cow can be easily located', 'name': 'cowbell'}, {'frequency': 'f', 'id': 323, 'synset': 'cowboy_hat.n.01', 'synonyms': ['cowboy_hat', 'ten-gallon_hat'], 'def': 'a hat with a wide brim and a soft crown; worn by American ranch hands', 'name': 'cowboy_hat'}, {'frequency': 'r', 'id': 324, 'synset': 'crab.n.01', 'synonyms': ['crab_(animal)'], 'def': 'decapod having eyes on short stalks and a broad flattened shell and pincers', 'name': 'crab_(animal)'}, {'frequency': 'c', 'id': 325, 'synset': 'cracker.n.01', 'synonyms': ['cracker'], 'def': 'a thin crisp wafer', 'name': 'cracker'}, {'frequency': 'r', 'id': 326, 'synset': 'crape.n.01', 'synonyms': ['crape', 'crepe', 'French_pancake'], 'def': 'small very thin pancake', 'name': 'crape'}, {'frequency': 'f', 'id': 327, 'synset': 'crate.n.01', 'synonyms': ['crate'], 'def': 'a rugged box (usually made of wood); used for shipping', 'name': 'crate'}, {'frequency': 'r', 'id': 328, 'synset': 'crayon.n.01', 'synonyms': ['crayon', 'wax_crayon'], 'def': 'writing or drawing implement made of a colored stick of composition wax', 'name': 'crayon'}, {'frequency': 'r', 'id': 329, 'synset': 'cream_pitcher.n.01', 'synonyms': ['cream_pitcher'], 'def': 'a small pitcher for serving cream', 'name': 'cream_pitcher'}, {'frequency': 'r', 'id': 330, 'synset': 'credit_card.n.01', 'synonyms': ['credit_card', 'charge_card', 'debit_card'], 'def': 'a card, usually plastic, used to pay for goods and services', 'name': 'credit_card'}, {'frequency': 'c', 'id': 331, 'synset': 'crescent_roll.n.01', 'synonyms': ['crescent_roll', 'croissant'], 'def': 'very rich flaky crescent-shaped roll', 'name': 'crescent_roll'}, {'frequency': 'c', 'id': 332, 'synset': 'crib.n.01', 'synonyms': ['crib', 'cot'], 'def': 'baby bed with high sides made of slats', 'name': 'crib'}, {'frequency': 'c', 'id': 333, 'synset': 'crock.n.03', 'synonyms': ['crock_pot', 'earthenware_jar'], 'def': 'an earthen jar (made of baked clay)', 'name': 'crock_pot'}, {'frequency': 'f', 'id': 334, 'synset': 'crossbar.n.01', 'synonyms': ['crossbar'], 'def': 'a horizontal bar that goes across something', 'name': 'crossbar'}, {'frequency': 'r', 'id': 335, 'synset': 'crouton.n.01', 'synonyms': ['crouton'], 'def': 'a small piece of toasted or fried bread; served in soup or salads', 'name': 'crouton'}, {'frequency': 'r', 'id': 336, 'synset': 'crow.n.01', 'synonyms': ['crow'], 'def': 'black birds having a raucous call', 'name': 'crow'}, {'frequency': 'c', 'id': 337, 'synset': 'crown.n.04', 'synonyms': ['crown'], 'def': 'an ornamental jeweled headdress signifying sovereignty', 'name': 'crown'}, {'frequency': 'c', 'id': 338, 'synset': 'crucifix.n.01', 'synonyms': ['crucifix'], 'def': 'representation of the cross on which Jesus died', 'name': 'crucifix'}, {'frequency': 'c', 'id': 339, 'synset': 'cruise_ship.n.01', 'synonyms': ['cruise_ship', 'cruise_liner'], 'def': 'a passenger ship used commercially for pleasure cruises', 'name': 'cruise_ship'}, {'frequency': 'c', 'id': 340, 'synset': 'cruiser.n.01', 'synonyms': ['police_cruiser', 'patrol_car', 'police_car', 'squad_car'], 'def': 'a car in which policemen cruise the streets', 'name': 'police_cruiser'}, {'frequency': 'c', 'id': 341, 'synset': 'crumb.n.03', 'synonyms': ['crumb'], 'def': 'small piece of e.g. bread or cake', 'name': 'crumb'}, {'frequency': 'r', 'id': 342, 'synset': 'crutch.n.01', 'synonyms': ['crutch'], 'def': 'a wooden or metal staff that fits under the armpit and reaches to the ground', 'name': 'crutch'}, {'frequency': 'c', 'id': 343, 'synset': 'cub.n.03', 'synonyms': ['cub_(animal)'], 'def': 'the young of certain carnivorous mammals such as the bear or wolf or lion', 'name': 'cub_(animal)'}, {'frequency': 'r', 'id': 344, 'synset': 'cube.n.05', 'synonyms': ['cube', 'square_block'], 'def': 'a block in the (approximate) shape of a cube', 'name': 'cube'}, {'frequency': 'f', 'id': 345, 'synset': 'cucumber.n.02', 'synonyms': ['cucumber', 'cuke'], 'def': 'cylindrical green fruit with thin green rind and white flesh eaten as a vegetable', 'name': 'cucumber'}, {'frequency': 'c', 'id': 346, 'synset': 'cufflink.n.01', 'synonyms': ['cufflink'], 'def': 'jewelry consisting of linked buttons used to fasten the cuffs of a shirt', 'name': 'cufflink'}, {'frequency': 'f', 'id': 347, 'synset': 'cup.n.01', 'synonyms': ['cup'], 'def': 'a small open container usually used for drinking; usually has a handle', 'name': 'cup'}, {'frequency': 'c', 'id': 348, 'synset': 'cup.n.08', 'synonyms': ['trophy_cup'], 'def': 'a metal vessel with handles that is awarded as a trophy to a competition winner', 'name': 'trophy_cup'}, {'frequency': 'c', 'id': 349, 'synset': 'cupcake.n.01', 'synonyms': ['cupcake'], 'def': 'small cake baked in a muffin tin', 'name': 'cupcake'}, {'frequency': 'r', 'id': 350, 'synset': 'curler.n.01', 'synonyms': ['hair_curler', 'hair_roller', 'hair_crimper'], 'def': 'a cylindrical tube around which the hair is wound to curl it', 'name': 'hair_curler'}, {'frequency': 'r', 'id': 351, 'synset': 'curling_iron.n.01', 'synonyms': ['curling_iron'], 'def': 'a cylindrical home appliance that heats hair that has been curled around it', 'name': 'curling_iron'}, {'frequency': 'f', 'id': 352, 'synset': 'curtain.n.01', 'synonyms': ['curtain', 'drapery'], 'def': 'hanging cloth used as a blind (especially for a window)', 'name': 'curtain'}, {'frequency': 'f', 'id': 353, 'synset': 'cushion.n.03', 'synonyms': ['cushion'], 'def': 'a soft bag filled with air or padding such as feathers or foam rubber', 'name': 'cushion'}, {'frequency': 'r', 'id': 354, 'synset': 'custard.n.01', 'synonyms': ['custard'], 'def': 'sweetened mixture of milk and eggs baked or boiled or frozen', 'name': 'custard'}, {'frequency': 'c', 'id': 355, 'synset': 'cutter.n.06', 'synonyms': ['cutting_tool'], 'def': 'a cutting implement; a tool for cutting', 'name': 'cutting_tool'}, {'frequency': 'r', 'id': 356, 'synset': 'cylinder.n.04', 'synonyms': ['cylinder'], 'def': 'a cylindrical container', 'name': 'cylinder'}, {'frequency': 'r', 'id': 357, 'synset': 'cymbal.n.01', 'synonyms': ['cymbal'], 'def': 'a percussion instrument consisting of a concave brass disk', 'name': 'cymbal'}, {'frequency': 'r', 'id': 358, 'synset': 'dachshund.n.01', 'synonyms': ['dachshund', 'dachsie', 'badger_dog'], 'def': 'small long-bodied short-legged breed of dog having a short sleek coat and long drooping ears', 'name': 'dachshund'}, {'frequency': 'r', 'id': 359, 'synset': 'dagger.n.01', 'synonyms': ['dagger'], 'def': 'a short knife with a pointed blade used for piercing or stabbing', 'name': 'dagger'}, {'frequency': 'r', 'id': 360, 'synset': 'dartboard.n.01', 'synonyms': ['dartboard'], 'def': 'a circular board of wood or cork used as the target in the game of darts', 'name': 'dartboard'}, {'frequency': 'r', 'id': 361, 'synset': 'date.n.08', 'synonyms': ['date_(fruit)'], 'def': 'sweet edible fruit of the date palm with a single long woody seed', 'name': 'date_(fruit)'}, {'frequency': 'f', 'id': 362, 'synset': 'deck_chair.n.01', 'synonyms': ['deck_chair', 'beach_chair'], 'def': 'a folding chair for use outdoors; a wooden frame supports a length of canvas', 'name': 'deck_chair'}, {'frequency': 'c', 'id': 363, 'synset': 'deer.n.01', 'synonyms': ['deer', 'cervid'], 'def': "distinguished from Bovidae by the male's having solid deciduous antlers", 'name': 'deer'}, {'frequency': 'c', 'id': 364, 'synset': 'dental_floss.n.01', 'synonyms': ['dental_floss', 'floss'], 'def': 'a soft thread for cleaning the spaces between the teeth', 'name': 'dental_floss'}, {'frequency': 'f', 'id': 365, 'synset': 'desk.n.01', 'synonyms': ['desk'], 'def': 'a piece of furniture with a writing surface and usually drawers or other compartments', 'name': 'desk'}, {'frequency': 'r', 'id': 366, 'synset': 'detergent.n.01', 'synonyms': ['detergent'], 'def': 'a surface-active chemical widely used in industry and laundering', 'name': 'detergent'}, {'frequency': 'c', 'id': 367, 'synset': 'diaper.n.01', 'synonyms': ['diaper'], 'def': 'garment consisting of a folded cloth drawn up between the legs and fastened at the waist', 'name': 'diaper'}, {'frequency': 'r', 'id': 368, 'synset': 'diary.n.01', 'synonyms': ['diary', 'journal'], 'def': 'a daily written record of (usually personal) experiences and observations', 'name': 'diary'}, {'frequency': 'r', 'id': 369, 'synset': 'die.n.01', 'synonyms': ['die', 'dice'], 'def': 'a small cube with 1 to 6 spots on the six faces; used in gambling', 'name': 'die'}, {'frequency': 'r', 'id': 370, 'synset': 'dinghy.n.01', 'synonyms': ['dinghy', 'dory', 'rowboat'], 'def': 'a small boat of shallow draft with seats and oars with which it is propelled', 'name': 'dinghy'}, {'frequency': 'f', 'id': 371, 'synset': 'dining_table.n.01', 'synonyms': ['dining_table'], 'def': 'a table at which meals are served', 'name': 'dining_table'}, {'frequency': 'r', 'id': 372, 'synset': 'dinner_jacket.n.01', 'synonyms': ['tux', 'tuxedo'], 'def': 'semiformal evening dress for men', 'name': 'tux'}, {'frequency': 'c', 'id': 373, 'synset': 'dish.n.01', 'synonyms': ['dish'], 'def': 'a piece of dishware normally used as a container for holding or serving food', 'name': 'dish'}, {'frequency': 'c', 'id': 374, 'synset': 'dish.n.05', 'synonyms': ['dish_antenna'], 'def': 'directional antenna consisting of a parabolic reflector', 'name': 'dish_antenna'}, {'frequency': 'c', 'id': 375, 'synset': 'dishrag.n.01', 'synonyms': ['dishrag', 'dishcloth'], 'def': 'a cloth for washing dishes', 'name': 'dishrag'}, {'frequency': 'c', 'id': 376, 'synset': 'dishtowel.n.01', 'synonyms': ['dishtowel', 'tea_towel'], 'def': 'a towel for drying dishes', 'name': 'dishtowel'}, {'frequency': 'f', 'id': 377, 'synset': 'dishwasher.n.01', 'synonyms': ['dishwasher', 'dishwashing_machine'], 'def': 'a machine for washing dishes', 'name': 'dishwasher'}, {'frequency': 'r', 'id': 378, 'synset': 'dishwasher_detergent.n.01', 'synonyms': ['dishwasher_detergent', 'dishwashing_detergent', 'dishwashing_liquid'], 'def': 'a low-sudsing detergent designed for use in dishwashers', 'name': 'dishwasher_detergent'}, {'frequency': 'r', 'id': 379, 'synset': 'diskette.n.01', 'synonyms': ['diskette', 'floppy', 'floppy_disk'], 'def': 'a small plastic magnetic disk enclosed in a stiff envelope used to store data', 'name': 'diskette'}, {'frequency': 'c', 'id': 380, 'synset': 'dispenser.n.01', 'synonyms': ['dispenser'], 'def': 'a container so designed that the contents can be used in prescribed amounts', 'name': 'dispenser'}, {'frequency': 'c', 'id': 381, 'synset': 'dixie_cup.n.01', 'synonyms': ['Dixie_cup', 'paper_cup'], 'def': 'a disposable cup made of paper; for holding drinks', 'name': 'Dixie_cup'}, {'frequency': 'f', 'id': 382, 'synset': 'dog.n.01', 'synonyms': ['dog'], 'def': 'a common domesticated dog', 'name': 'dog'}, {'frequency': 'f', 'id': 383, 'synset': 'dog_collar.n.01', 'synonyms': ['dog_collar'], 'def': 'a collar for a dog', 'name': 'dog_collar'}, {'frequency': 'c', 'id': 384, 'synset': 'doll.n.01', 'synonyms': ['doll'], 'def': 'a toy replica of a HUMAN (NOT AN ANIMAL)', 'name': 'doll'}, {'frequency': 'r', 'id': 385, 'synset': 'dollar.n.02', 'synonyms': ['dollar', 'dollar_bill', 'one_dollar_bill'], 'def': 'a piece of paper money worth one dollar', 'name': 'dollar'}, {'frequency': 'r', 'id': 386, 'synset': 'dolphin.n.02', 'synonyms': ['dolphin'], 'def': 'any of various small toothed whales with a beaklike snout; larger than porpoises', 'name': 'dolphin'}, {'frequency': 'c', 'id': 387, 'synset': 'domestic_ass.n.01', 'synonyms': ['domestic_ass', 'donkey'], 'def': 'domestic beast of burden descended from the African wild ass; patient but stubborn', 'name': 'domestic_ass'}, {'frequency': 'r', 'id': 388, 'synset': 'domino.n.03', 'synonyms': ['eye_mask'], 'def': 'a mask covering the upper part of the face but with holes for the eyes', 'name': 'eye_mask'}, {'frequency': 'r', 'id': 389, 'synset': 'doorbell.n.01', 'synonyms': ['doorbell', 'buzzer'], 'def': 'a button at an outer door that gives a ringing or buzzing signal when pushed', 'name': 'doorbell'}, {'frequency': 'f', 'id': 390, 'synset': 'doorknob.n.01', 'synonyms': ['doorknob', 'doorhandle'], 'def': "a knob used to open a door (often called `doorhandle' in Great Britain)", 'name': 'doorknob'}, {'frequency': 'c', 'id': 391, 'synset': 'doormat.n.02', 'synonyms': ['doormat', 'welcome_mat'], 'def': 'a mat placed outside an exterior door for wiping the shoes before entering', 'name': 'doormat'}, {'frequency': 'f', 'id': 392, 'synset': 'doughnut.n.02', 'synonyms': ['doughnut', 'donut'], 'def': 'a small ring-shaped friedcake', 'name': 'doughnut'}, {'frequency': 'r', 'id': 393, 'synset': 'dove.n.01', 'synonyms': ['dove'], 'def': 'any of numerous small pigeons', 'name': 'dove'}, {'frequency': 'r', 'id': 394, 'synset': 'dragonfly.n.01', 'synonyms': ['dragonfly'], 'def': 'slender-bodied non-stinging insect having iridescent wings that are outspread at rest', 'name': 'dragonfly'}, {'frequency': 'f', 'id': 395, 'synset': 'drawer.n.01', 'synonyms': ['drawer'], 'def': 'a boxlike container in a piece of furniture; made so as to slide in and out', 'name': 'drawer'}, {'frequency': 'c', 'id': 396, 'synset': 'drawers.n.01', 'synonyms': ['underdrawers', 'boxers', 'boxershorts'], 'def': 'underpants worn by men', 'name': 'underdrawers'}, {'frequency': 'f', 'id': 397, 'synset': 'dress.n.01', 'synonyms': ['dress', 'frock'], 'def': 'a one-piece garment for a woman; has skirt and bodice', 'name': 'dress'}, {'frequency': 'c', 'id': 398, 'synset': 'dress_hat.n.01', 'synonyms': ['dress_hat', 'high_hat', 'opera_hat', 'silk_hat', 'top_hat'], 'def': "a man's hat with a tall crown; usually covered with silk or with beaver fur", 'name': 'dress_hat'}, {'frequency': 'c', 'id': 399, 'synset': 'dress_suit.n.01', 'synonyms': ['dress_suit'], 'def': 'formalwear consisting of full evening dress for men', 'name': 'dress_suit'}, {'frequency': 'c', 'id': 400, 'synset': 'dresser.n.05', 'synonyms': ['dresser'], 'def': 'a cabinet with shelves', 'name': 'dresser'}, {'frequency': 'c', 'id': 401, 'synset': 'drill.n.01', 'synonyms': ['drill'], 'def': 'a tool with a sharp rotating point for making holes in hard materials', 'name': 'drill'}, {'frequency': 'r', 'id': 402, 'synset': 'drinking_fountain.n.01', 'synonyms': ['drinking_fountain'], 'def': 'a public fountain to provide a jet of drinking water', 'name': 'drinking_fountain'}, {'frequency': 'r', 'id': 403, 'synset': 'drone.n.04', 'synonyms': ['drone'], 'def': 'an aircraft without a pilot that is operated by remote control', 'name': 'drone'}, {'frequency': 'r', 'id': 404, 'synset': 'dropper.n.01', 'synonyms': ['dropper', 'eye_dropper'], 'def': 'pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time', 'name': 'dropper'}, {'frequency': 'c', 'id': 405, 'synset': 'drum.n.01', 'synonyms': ['drum_(musical_instrument)'], 'def': 'a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end', 'name': 'drum_(musical_instrument)'}, {'frequency': 'r', 'id': 406, 'synset': 'drumstick.n.02', 'synonyms': ['drumstick'], 'def': 'a stick used for playing a drum', 'name': 'drumstick'}, {'frequency': 'f', 'id': 407, 'synset': 'duck.n.01', 'synonyms': ['duck'], 'def': 'small web-footed broad-billed swimming bird', 'name': 'duck'}, {'frequency': 'r', 'id': 408, 'synset': 'duckling.n.02', 'synonyms': ['duckling'], 'def': 'young duck', 'name': 'duckling'}, {'frequency': 'c', 'id': 409, 'synset': 'duct_tape.n.01', 'synonyms': ['duct_tape'], 'def': 'a wide silvery adhesive tape', 'name': 'duct_tape'}, {'frequency': 'f', 'id': 410, 'synset': 'duffel_bag.n.01', 'synonyms': ['duffel_bag', 'duffle_bag', 'duffel', 'duffle'], 'def': 'a large cylindrical bag of heavy cloth', 'name': 'duffel_bag'}, {'frequency': 'r', 'id': 411, 'synset': 'dumbbell.n.01', 'synonyms': ['dumbbell'], 'def': 'an exercising weight with two ball-like ends connected by a short handle', 'name': 'dumbbell'}, {'frequency': 'c', 'id': 412, 'synset': 'dumpster.n.01', 'synonyms': ['dumpster'], 'def': 'a container designed to receive and transport and dump waste', 'name': 'dumpster'}, {'frequency': 'r', 'id': 413, 'synset': 'dustpan.n.02', 'synonyms': ['dustpan'], 'def': 'a short-handled receptacle into which dust can be swept', 'name': 'dustpan'}, {'frequency': 'r', 'id': 414, 'synset': 'dutch_oven.n.02', 'synonyms': ['Dutch_oven'], 'def': 'iron or earthenware cooking pot; used for stews', 'name': 'Dutch_oven'}, {'frequency': 'c', 'id': 415, 'synset': 'eagle.n.01', 'synonyms': ['eagle'], 'def': 'large birds of prey noted for their broad wings and strong soaring flight', 'name': 'eagle'}, {'frequency': 'f', 'id': 416, 'synset': 'earphone.n.01', 'synonyms': ['earphone', 'earpiece', 'headphone'], 'def': 'device for listening to audio that is held over or inserted into the ear', 'name': 'earphone'}, {'frequency': 'r', 'id': 417, 'synset': 'earplug.n.01', 'synonyms': ['earplug'], 'def': 'a soft plug that is inserted into the ear canal to block sound', 'name': 'earplug'}, {'frequency': 'f', 'id': 418, 'synset': 'earring.n.01', 'synonyms': ['earring'], 'def': 'jewelry to ornament the ear', 'name': 'earring'}, {'frequency': 'c', 'id': 419, 'synset': 'easel.n.01', 'synonyms': ['easel'], 'def': "an upright tripod for displaying something (usually an artist's canvas)", 'name': 'easel'}, {'frequency': 'r', 'id': 420, 'synset': 'eclair.n.01', 'synonyms': ['eclair'], 'def': 'oblong cream puff', 'name': 'eclair'}, {'frequency': 'r', 'id': 421, 'synset': 'eel.n.01', 'synonyms': ['eel'], 'def': 'an elongate fish with fatty flesh', 'name': 'eel'}, {'frequency': 'f', 'id': 422, 'synset': 'egg.n.02', 'synonyms': ['egg', 'eggs'], 'def': 'oval reproductive body of a fowl (especially a hen) used as food', 'name': 'egg'}, {'frequency': 'r', 'id': 423, 'synset': 'egg_roll.n.01', 'synonyms': ['egg_roll', 'spring_roll'], 'def': 'minced vegetables and meat wrapped in a pancake and fried', 'name': 'egg_roll'}, {'frequency': 'c', 'id': 424, 'synset': 'egg_yolk.n.01', 'synonyms': ['egg_yolk', 'yolk_(egg)'], 'def': 'the yellow spherical part of an egg', 'name': 'egg_yolk'}, {'frequency': 'c', 'id': 425, 'synset': 'eggbeater.n.02', 'synonyms': ['eggbeater', 'eggwhisk'], 'def': 'a mixer for beating eggs or whipping cream', 'name': 'eggbeater'}, {'frequency': 'c', 'id': 426, 'synset': 'eggplant.n.01', 'synonyms': ['eggplant', 'aubergine'], 'def': 'egg-shaped vegetable having a shiny skin typically dark purple', 'name': 'eggplant'}, {'frequency': 'r', 'id': 427, 'synset': 'electric_chair.n.01', 'synonyms': ['electric_chair'], 'def': 'a chair-shaped instrument of execution by electrocution', 'name': 'electric_chair'}, {'frequency': 'f', 'id': 428, 'synset': 'electric_refrigerator.n.01', 'synonyms': ['refrigerator'], 'def': 'a refrigerator in which the coolant is pumped around by an electric motor', 'name': 'refrigerator'}, {'frequency': 'f', 'id': 429, 'synset': 'elephant.n.01', 'synonyms': ['elephant'], 'def': 'a common elephant', 'name': 'elephant'}, {'frequency': 'r', 'id': 430, 'synset': 'elk.n.01', 'synonyms': ['elk', 'moose'], 'def': 'large northern deer with enormous flattened antlers in the male', 'name': 'elk'}, {'frequency': 'c', 'id': 431, 'synset': 'envelope.n.01', 'synonyms': ['envelope'], 'def': 'a flat (usually rectangular) container for a letter, thin package, etc.', 'name': 'envelope'}, {'frequency': 'c', 'id': 432, 'synset': 'eraser.n.01', 'synonyms': ['eraser'], 'def': 'an implement used to erase something', 'name': 'eraser'}, {'frequency': 'r', 'id': 433, 'synset': 'escargot.n.01', 'synonyms': ['escargot'], 'def': 'edible snail usually served in the shell with a sauce of melted butter and garlic', 'name': 'escargot'}, {'frequency': 'r', 'id': 434, 'synset': 'eyepatch.n.01', 'synonyms': ['eyepatch'], 'def': 'a protective cloth covering for an injured eye', 'name': 'eyepatch'}, {'frequency': 'r', 'id': 435, 'synset': 'falcon.n.01', 'synonyms': ['falcon'], 'def': 'birds of prey having long pointed powerful wings adapted for swift flight', 'name': 'falcon'}, {'frequency': 'f', 'id': 436, 'synset': 'fan.n.01', 'synonyms': ['fan'], 'def': 'a device for creating a current of air by movement of a surface or surfaces', 'name': 'fan'}, {'frequency': 'f', 'id': 437, 'synset': 'faucet.n.01', 'synonyms': ['faucet', 'spigot', 'tap'], 'def': 'a regulator for controlling the flow of a liquid from a reservoir', 'name': 'faucet'}, {'frequency': 'r', 'id': 438, 'synset': 'fedora.n.01', 'synonyms': ['fedora'], 'def': 'a hat made of felt with a creased crown', 'name': 'fedora'}, {'frequency': 'r', 'id': 439, 'synset': 'ferret.n.02', 'synonyms': ['ferret'], 'def': 'domesticated albino variety of the European polecat bred for hunting rats and rabbits', 'name': 'ferret'}, {'frequency': 'c', 'id': 440, 'synset': 'ferris_wheel.n.01', 'synonyms': ['Ferris_wheel'], 'def': 'a large wheel with suspended seats that remain upright as the wheel rotates', 'name': 'Ferris_wheel'}, {'frequency': 'r', 'id': 441, 'synset': 'ferry.n.01', 'synonyms': ['ferry', 'ferryboat'], 'def': 'a boat that transports people or vehicles across a body of water and operates on a regular schedule', 'name': 'ferry'}, {'frequency': 'r', 'id': 442, 'synset': 'fig.n.04', 'synonyms': ['fig_(fruit)'], 'def': 'fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried', 'name': 'fig_(fruit)'}, {'frequency': 'c', 'id': 443, 'synset': 'fighter.n.02', 'synonyms': ['fighter_jet', 'fighter_aircraft', 'attack_aircraft'], 'def': 'a high-speed military or naval airplane designed to destroy enemy targets', 'name': 'fighter_jet'}, {'frequency': 'f', 'id': 444, 'synset': 'figurine.n.01', 'synonyms': ['figurine'], 'def': 'a small carved or molded figure', 'name': 'figurine'}, {'frequency': 'c', 'id': 445, 'synset': 'file.n.03', 'synonyms': ['file_cabinet', 'filing_cabinet'], 'def': 'office furniture consisting of a container for keeping papers in order', 'name': 'file_cabinet'}, {'frequency': 'r', 'id': 446, 'synset': 'file.n.04', 'synonyms': ['file_(tool)'], 'def': 'a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal', 'name': 'file_(tool)'}, {'frequency': 'f', 'id': 447, 'synset': 'fire_alarm.n.02', 'synonyms': ['fire_alarm', 'smoke_alarm'], 'def': 'an alarm that is tripped off by fire or smoke', 'name': 'fire_alarm'}, {'frequency': 'c', 'id': 448, 'synset': 'fire_engine.n.01', 'synonyms': ['fire_engine', 'fire_truck'], 'def': 'large trucks that carry firefighters and equipment to the site of a fire', 'name': 'fire_engine'}, {'frequency': 'c', 'id': 449, 'synset': 'fire_extinguisher.n.01', 'synonyms': ['fire_extinguisher', 'extinguisher'], 'def': 'a manually operated device for extinguishing small fires', 'name': 'fire_extinguisher'}, {'frequency': 'c', 'id': 450, 'synset': 'fire_hose.n.01', 'synonyms': ['fire_hose'], 'def': 'a large hose that carries water from a fire hydrant to the site of the fire', 'name': 'fire_hose'}, {'frequency': 'f', 'id': 451, 'synset': 'fireplace.n.01', 'synonyms': ['fireplace'], 'def': 'an open recess in a wall at the base of a chimney where a fire can be built', 'name': 'fireplace'}, {'frequency': 'f', 'id': 452, 'synset': 'fireplug.n.01', 'synonyms': ['fireplug', 'fire_hydrant', 'hydrant'], 'def': 'an upright hydrant for drawing water to use in fighting a fire', 'name': 'fireplug'}, {'frequency': 'c', 'id': 453, 'synset': 'fish.n.01', 'synonyms': ['fish'], 'def': 'any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills', 'name': 'fish'}, {'frequency': 'r', 'id': 454, 'synset': 'fish.n.02', 'synonyms': ['fish_(food)'], 'def': 'the flesh of fish used as food', 'name': 'fish_(food)'}, {'frequency': 'r', 'id': 455, 'synset': 'fishbowl.n.02', 'synonyms': ['fishbowl', 'goldfish_bowl'], 'def': 'a transparent bowl in which small fish are kept', 'name': 'fishbowl'}, {'frequency': 'r', 'id': 456, 'synset': 'fishing_boat.n.01', 'synonyms': ['fishing_boat', 'fishing_vessel'], 'def': 'a vessel for fishing', 'name': 'fishing_boat'}, {'frequency': 'c', 'id': 457, 'synset': 'fishing_rod.n.01', 'synonyms': ['fishing_rod', 'fishing_pole'], 'def': 'a rod that is used in fishing to extend the fishing line', 'name': 'fishing_rod'}, {'frequency': 'f', 'id': 458, 'synset': 'flag.n.01', 'synonyms': ['flag'], 'def': 'emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)', 'name': 'flag'}, {'frequency': 'f', 'id': 459, 'synset': 'flagpole.n.02', 'synonyms': ['flagpole', 'flagstaff'], 'def': 'a tall staff or pole on which a flag is raised', 'name': 'flagpole'}, {'frequency': 'c', 'id': 460, 'synset': 'flamingo.n.01', 'synonyms': ['flamingo'], 'def': 'large pink web-footed bird with down-bent bill', 'name': 'flamingo'}, {'frequency': 'c', 'id': 461, 'synset': 'flannel.n.01', 'synonyms': ['flannel'], 'def': 'a soft light woolen fabric; used for clothing', 'name': 'flannel'}, {'frequency': 'r', 'id': 462, 'synset': 'flash.n.10', 'synonyms': ['flash', 'flashbulb'], 'def': 'a lamp for providing momentary light to take a photograph', 'name': 'flash'}, {'frequency': 'c', 'id': 463, 'synset': 'flashlight.n.01', 'synonyms': ['flashlight', 'torch'], 'def': 'a small portable battery-powered electric lamp', 'name': 'flashlight'}, {'frequency': 'r', 'id': 464, 'synset': 'fleece.n.03', 'synonyms': ['fleece'], 'def': 'a soft bulky fabric with deep pile; used chiefly for clothing', 'name': 'fleece'}, {'frequency': 'f', 'id': 465, 'synset': 'flip-flop.n.02', 'synonyms': ['flip-flop_(sandal)'], 'def': 'a backless sandal held to the foot by a thong between two toes', 'name': 'flip-flop_(sandal)'}, {'frequency': 'c', 'id': 466, 'synset': 'flipper.n.01', 'synonyms': ['flipper_(footwear)', 'fin_(footwear)'], 'def': 'a shoe to aid a person in swimming', 'name': 'flipper_(footwear)'}, {'frequency': 'f', 'id': 467, 'synset': 'flower_arrangement.n.01', 'synonyms': ['flower_arrangement', 'floral_arrangement'], 'def': 'a decorative arrangement of flowers', 'name': 'flower_arrangement'}, {'frequency': 'c', 'id': 468, 'synset': 'flute.n.02', 'synonyms': ['flute_glass', 'champagne_flute'], 'def': 'a tall narrow wineglass', 'name': 'flute_glass'}, {'frequency': 'r', 'id': 469, 'synset': 'foal.n.01', 'synonyms': ['foal'], 'def': 'a young horse', 'name': 'foal'}, {'frequency': 'c', 'id': 470, 'synset': 'folding_chair.n.01', 'synonyms': ['folding_chair'], 'def': 'a chair that can be folded flat for storage', 'name': 'folding_chair'}, {'frequency': 'c', 'id': 471, 'synset': 'food_processor.n.01', 'synonyms': ['food_processor'], 'def': 'a kitchen appliance for shredding, blending, chopping, or slicing food', 'name': 'food_processor'}, {'frequency': 'c', 'id': 472, 'synset': 'football.n.02', 'synonyms': ['football_(American)'], 'def': 'the inflated oblong ball used in playing American football', 'name': 'football_(American)'}, {'frequency': 'r', 'id': 473, 'synset': 'football_helmet.n.01', 'synonyms': ['football_helmet'], 'def': 'a padded helmet with a face mask to protect the head of football players', 'name': 'football_helmet'}, {'frequency': 'c', 'id': 474, 'synset': 'footstool.n.01', 'synonyms': ['footstool', 'footrest'], 'def': 'a low seat or a stool to rest the feet of a seated person', 'name': 'footstool'}, {'frequency': 'f', 'id': 475, 'synset': 'fork.n.01', 'synonyms': ['fork'], 'def': 'cutlery used for serving and eating food', 'name': 'fork'}, {'frequency': 'r', 'id': 476, 'synset': 'forklift.n.01', 'synonyms': ['forklift'], 'def': 'an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them', 'name': 'forklift'}, {'frequency': 'r', 'id': 477, 'synset': 'freight_car.n.01', 'synonyms': ['freight_car'], 'def': 'a railway car that carries freight', 'name': 'freight_car'}, {'frequency': 'r', 'id': 478, 'synset': 'french_toast.n.01', 'synonyms': ['French_toast'], 'def': 'bread slice dipped in egg and milk and fried', 'name': 'French_toast'}, {'frequency': 'c', 'id': 479, 'synset': 'freshener.n.01', 'synonyms': ['freshener', 'air_freshener'], 'def': 'anything that freshens', 'name': 'freshener'}, {'frequency': 'f', 'id': 480, 'synset': 'frisbee.n.01', 'synonyms': ['frisbee'], 'def': 'a light, plastic disk propelled with a flip of the wrist for recreation or competition', 'name': 'frisbee'}, {'frequency': 'c', 'id': 481, 'synset': 'frog.n.01', 'synonyms': ['frog', 'toad', 'toad_frog'], 'def': 'a tailless stout-bodied amphibians with long hind limbs for leaping', 'name': 'frog'}, {'frequency': 'c', 'id': 482, 'synset': 'fruit_juice.n.01', 'synonyms': ['fruit_juice'], 'def': 'drink produced by squeezing or crushing fruit', 'name': 'fruit_juice'}, {'frequency': 'r', 'id': 483, 'synset': 'fruit_salad.n.01', 'synonyms': ['fruit_salad'], 'def': 'salad composed of fruits', 'name': 'fruit_salad'}, {'frequency': 'c', 'id': 484, 'synset': 'frying_pan.n.01', 'synonyms': ['frying_pan', 'frypan', 'skillet'], 'def': 'a pan used for frying foods', 'name': 'frying_pan'}, {'frequency': 'r', 'id': 485, 'synset': 'fudge.n.01', 'synonyms': ['fudge'], 'def': 'soft creamy candy', 'name': 'fudge'}, {'frequency': 'r', 'id': 486, 'synset': 'funnel.n.02', 'synonyms': ['funnel'], 'def': 'a cone-shaped utensil used to channel a substance into a container with a small mouth', 'name': 'funnel'}, {'frequency': 'c', 'id': 487, 'synset': 'futon.n.01', 'synonyms': ['futon'], 'def': 'a pad that is used for sleeping on the floor or on a raised frame', 'name': 'futon'}, {'frequency': 'r', 'id': 488, 'synset': 'gag.n.02', 'synonyms': ['gag', 'muzzle'], 'def': "restraint put into a person's mouth to prevent speaking or shouting", 'name': 'gag'}, {'frequency': 'r', 'id': 489, 'synset': 'garbage.n.03', 'synonyms': ['garbage'], 'def': 'a receptacle where waste can be discarded', 'name': 'garbage'}, {'frequency': 'c', 'id': 490, 'synset': 'garbage_truck.n.01', 'synonyms': ['garbage_truck'], 'def': 'a truck for collecting domestic refuse', 'name': 'garbage_truck'}, {'frequency': 'c', 'id': 491, 'synset': 'garden_hose.n.01', 'synonyms': ['garden_hose'], 'def': 'a hose used for watering a lawn or garden', 'name': 'garden_hose'}, {'frequency': 'c', 'id': 492, 'synset': 'gargle.n.01', 'synonyms': ['gargle', 'mouthwash'], 'def': 'a medicated solution used for gargling and rinsing the mouth', 'name': 'gargle'}, {'frequency': 'r', 'id': 493, 'synset': 'gargoyle.n.02', 'synonyms': ['gargoyle'], 'def': 'an ornament consisting of a grotesquely carved figure of a person or animal', 'name': 'gargoyle'}, {'frequency': 'c', 'id': 494, 'synset': 'garlic.n.02', 'synonyms': ['garlic', 'ail'], 'def': 'aromatic bulb used as seasoning', 'name': 'garlic'}, {'frequency': 'r', 'id': 495, 'synset': 'gasmask.n.01', 'synonyms': ['gasmask', 'respirator', 'gas_helmet'], 'def': 'a protective face mask with a filter', 'name': 'gasmask'}, {'frequency': 'r', 'id': 496, 'synset': 'gazelle.n.01', 'synonyms': ['gazelle'], 'def': 'small swift graceful antelope of Africa and Asia having lustrous eyes', 'name': 'gazelle'}, {'frequency': 'c', 'id': 497, 'synset': 'gelatin.n.02', 'synonyms': ['gelatin', 'jelly'], 'def': 'an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods', 'name': 'gelatin'}, {'frequency': 'r', 'id': 498, 'synset': 'gem.n.02', 'synonyms': ['gemstone'], 'def': 'a crystalline rock that can be cut and polished for jewelry', 'name': 'gemstone'}, {'frequency': 'c', 'id': 499, 'synset': 'giant_panda.n.01', 'synonyms': ['giant_panda', 'panda', 'panda_bear'], 'def': 'large black-and-white herbivorous mammal of bamboo forests of China and Tibet', 'name': 'giant_panda'}, {'frequency': 'c', 'id': 500, 'synset': 'gift_wrap.n.01', 'synonyms': ['gift_wrap'], 'def': 'attractive wrapping paper suitable for wrapping gifts', 'name': 'gift_wrap'}, {'frequency': 'c', 'id': 501, 'synset': 'ginger.n.03', 'synonyms': ['ginger', 'gingerroot'], 'def': 'the root of the common ginger plant; used fresh as a seasoning', 'name': 'ginger'}, {'frequency': 'f', 'id': 502, 'synset': 'giraffe.n.01', 'synonyms': ['giraffe'], 'def': 'tall animal having a spotted coat and small horns and very long neck and legs', 'name': 'giraffe'}, {'frequency': 'c', 'id': 503, 'synset': 'girdle.n.02', 'synonyms': ['cincture', 'sash', 'waistband', 'waistcloth'], 'def': 'a band of material around the waist that strengthens a skirt or trousers', 'name': 'cincture'}, {'frequency': 'f', 'id': 504, 'synset': 'glass.n.02', 'synonyms': ['glass_(drink_container)', 'drinking_glass'], 'def': 'a container for holding liquids while drinking', 'name': 'glass_(drink_container)'}, {'frequency': 'c', 'id': 505, 'synset': 'globe.n.03', 'synonyms': ['globe'], 'def': 'a sphere on which a map (especially of the earth) is represented', 'name': 'globe'}, {'frequency': 'f', 'id': 506, 'synset': 'glove.n.02', 'synonyms': ['glove'], 'def': 'handwear covering the hand', 'name': 'glove'}, {'frequency': 'c', 'id': 507, 'synset': 'goat.n.01', 'synonyms': ['goat'], 'def': 'a common goat', 'name': 'goat'}, {'frequency': 'f', 'id': 508, 'synset': 'goggles.n.01', 'synonyms': ['goggles'], 'def': 'tight-fitting spectacles worn to protect the eyes', 'name': 'goggles'}, {'frequency': 'r', 'id': 509, 'synset': 'goldfish.n.01', 'synonyms': ['goldfish'], 'def': 'small golden or orange-red freshwater fishes used as pond or aquarium pets', 'name': 'goldfish'}, {'frequency': 'r', 'id': 510, 'synset': 'golf_club.n.02', 'synonyms': ['golf_club', 'golf-club'], 'def': 'golf equipment used by a golfer to hit a golf ball', 'name': 'golf_club'}, {'frequency': 'c', 'id': 511, 'synset': 'golfcart.n.01', 'synonyms': ['golfcart'], 'def': 'a small motor vehicle in which golfers can ride between shots', 'name': 'golfcart'}, {'frequency': 'r', 'id': 512, 'synset': 'gondola.n.02', 'synonyms': ['gondola_(boat)'], 'def': 'long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice', 'name': 'gondola_(boat)'}, {'frequency': 'c', 'id': 513, 'synset': 'goose.n.01', 'synonyms': ['goose'], 'def': 'loud, web-footed long-necked aquatic birds usually larger than ducks', 'name': 'goose'}, {'frequency': 'r', 'id': 514, 'synset': 'gorilla.n.01', 'synonyms': ['gorilla'], 'def': 'largest ape', 'name': 'gorilla'}, {'frequency': 'r', 'id': 515, 'synset': 'gourd.n.02', 'synonyms': ['gourd'], 'def': 'any of numerous inedible fruits with hard rinds', 'name': 'gourd'}, {'frequency': 'r', 'id': 516, 'synset': 'gown.n.04', 'synonyms': ['surgical_gown', 'scrubs_(surgical_clothing)'], 'def': 'protective garment worn by surgeons during operations', 'name': 'surgical_gown'}, {'frequency': 'f', 'id': 517, 'synset': 'grape.n.01', 'synonyms': ['grape'], 'def': 'any of various juicy fruit with green or purple skins; grow in clusters', 'name': 'grape'}, {'frequency': 'r', 'id': 518, 'synset': 'grasshopper.n.01', 'synonyms': ['grasshopper'], 'def': 'plant-eating insect with hind legs adapted for leaping', 'name': 'grasshopper'}, {'frequency': 'c', 'id': 519, 'synset': 'grater.n.01', 'synonyms': ['grater'], 'def': 'utensil with sharp perforations for shredding foods (as vegetables or cheese)', 'name': 'grater'}, {'frequency': 'c', 'id': 520, 'synset': 'gravestone.n.01', 'synonyms': ['gravestone', 'headstone', 'tombstone'], 'def': 'a stone that is used to mark a grave', 'name': 'gravestone'}, {'frequency': 'r', 'id': 521, 'synset': 'gravy_boat.n.01', 'synonyms': ['gravy_boat', 'gravy_holder'], 'def': 'a dish (often boat-shaped) for serving gravy or sauce', 'name': 'gravy_boat'}, {'frequency': 'c', 'id': 522, 'synset': 'green_bean.n.02', 'synonyms': ['green_bean'], 'def': 'a common bean plant cultivated for its slender green edible pods', 'name': 'green_bean'}, {'frequency': 'c', 'id': 523, 'synset': 'green_onion.n.01', 'synonyms': ['green_onion', 'spring_onion', 'scallion'], 'def': 'a young onion before the bulb has enlarged', 'name': 'green_onion'}, {'frequency': 'r', 'id': 524, 'synset': 'griddle.n.01', 'synonyms': ['griddle'], 'def': 'cooking utensil consisting of a flat heated surface on which food is cooked', 'name': 'griddle'}, {'frequency': 'r', 'id': 525, 'synset': 'grillroom.n.01', 'synonyms': ['grillroom', 'grill_(restaurant)'], 'def': 'a restaurant where food is cooked on a grill', 'name': 'grillroom'}, {'frequency': 'r', 'id': 526, 'synset': 'grinder.n.04', 'synonyms': ['grinder_(tool)'], 'def': 'a machine tool that polishes metal', 'name': 'grinder_(tool)'}, {'frequency': 'r', 'id': 527, 'synset': 'grits.n.01', 'synonyms': ['grits', 'hominy_grits'], 'def': 'coarsely ground corn boiled as a breakfast dish', 'name': 'grits'}, {'frequency': 'c', 'id': 528, 'synset': 'grizzly.n.01', 'synonyms': ['grizzly', 'grizzly_bear'], 'def': 'powerful brownish-yellow bear of the uplands of western North America', 'name': 'grizzly'}, {'frequency': 'c', 'id': 529, 'synset': 'grocery_bag.n.01', 'synonyms': ['grocery_bag'], 'def': "a sack for holding customer's groceries", 'name': 'grocery_bag'}, {'frequency': 'r', 'id': 530, 'synset': 'guacamole.n.01', 'synonyms': ['guacamole'], 'def': 'a dip made of mashed avocado mixed with chopped onions and other seasonings', 'name': 'guacamole'}, {'frequency': 'f', 'id': 531, 'synset': 'guitar.n.01', 'synonyms': ['guitar'], 'def': 'a stringed instrument usually having six strings; played by strumming or plucking', 'name': 'guitar'}, {'frequency': 'c', 'id': 532, 'synset': 'gull.n.02', 'synonyms': ['gull', 'seagull'], 'def': 'mostly white aquatic bird having long pointed wings and short legs', 'name': 'gull'}, {'frequency': 'c', 'id': 533, 'synset': 'gun.n.01', 'synonyms': ['gun'], 'def': 'a weapon that discharges a bullet at high velocity from a metal tube', 'name': 'gun'}, {'frequency': 'r', 'id': 534, 'synset': 'hair_spray.n.01', 'synonyms': ['hair_spray'], 'def': 'substance sprayed on the hair to hold it in place', 'name': 'hair_spray'}, {'frequency': 'c', 'id': 535, 'synset': 'hairbrush.n.01', 'synonyms': ['hairbrush'], 'def': "a brush used to groom a person's hair", 'name': 'hairbrush'}, {'frequency': 'c', 'id': 536, 'synset': 'hairnet.n.01', 'synonyms': ['hairnet'], 'def': 'a small net that someone wears over their hair to keep it in place', 'name': 'hairnet'}, {'frequency': 'c', 'id': 537, 'synset': 'hairpin.n.01', 'synonyms': ['hairpin'], 'def': "a double pronged pin used to hold women's hair in place", 'name': 'hairpin'}, {'frequency': 'f', 'id': 538, 'synset': 'ham.n.01', 'synonyms': ['ham', 'jambon', 'gammon'], 'def': 'meat cut from the thigh of a hog (usually smoked)', 'name': 'ham'}, {'frequency': 'c', 'id': 539, 'synset': 'hamburger.n.01', 'synonyms': ['hamburger', 'beefburger', 'burger'], 'def': 'a sandwich consisting of a patty of minced beef served on a bun', 'name': 'hamburger'}, {'frequency': 'c', 'id': 540, 'synset': 'hammer.n.02', 'synonyms': ['hammer'], 'def': 'a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking', 'name': 'hammer'}, {'frequency': 'r', 'id': 541, 'synset': 'hammock.n.02', 'synonyms': ['hammock'], 'def': 'a hanging bed of canvas or rope netting (usually suspended between two trees)', 'name': 'hammock'}, {'frequency': 'r', 'id': 542, 'synset': 'hamper.n.02', 'synonyms': ['hamper'], 'def': 'a basket usually with a cover', 'name': 'hamper'}, {'frequency': 'r', 'id': 543, 'synset': 'hamster.n.01', 'synonyms': ['hamster'], 'def': 'short-tailed burrowing rodent with large cheek pouches', 'name': 'hamster'}, {'frequency': 'c', 'id': 544, 'synset': 'hand_blower.n.01', 'synonyms': ['hair_dryer'], 'def': 'a hand-held electric blower that can blow warm air onto the hair', 'name': 'hair_dryer'}, {'frequency': 'r', 'id': 545, 'synset': 'hand_glass.n.01', 'synonyms': ['hand_glass', 'hand_mirror'], 'def': 'a mirror intended to be held in the hand', 'name': 'hand_glass'}, {'frequency': 'f', 'id': 546, 'synset': 'hand_towel.n.01', 'synonyms': ['hand_towel', 'face_towel'], 'def': 'a small towel used to dry the hands or face', 'name': 'hand_towel'}, {'frequency': 'c', 'id': 547, 'synset': 'handcart.n.01', 'synonyms': ['handcart', 'pushcart', 'hand_truck'], 'def': 'wheeled vehicle that can be pushed by a person', 'name': 'handcart'}, {'frequency': 'r', 'id': 548, 'synset': 'handcuff.n.01', 'synonyms': ['handcuff'], 'def': 'shackle that consists of a metal loop that can be locked around the wrist', 'name': 'handcuff'}, {'frequency': 'c', 'id': 549, 'synset': 'handkerchief.n.01', 'synonyms': ['handkerchief'], 'def': 'a square piece of cloth used for wiping the eyes or nose or as a costume accessory', 'name': 'handkerchief'}, {'frequency': 'f', 'id': 550, 'synset': 'handle.n.01', 'synonyms': ['handle', 'grip', 'handgrip'], 'def': 'the appendage to an object that is designed to be held in order to use or move it', 'name': 'handle'}, {'frequency': 'r', 'id': 551, 'synset': 'handsaw.n.01', 'synonyms': ['handsaw', "carpenter's_saw"], 'def': 'a saw used with one hand for cutting wood', 'name': 'handsaw'}, {'frequency': 'r', 'id': 552, 'synset': 'hardback.n.01', 'synonyms': ['hardback_book', 'hardcover_book'], 'def': 'a book with cardboard or cloth or leather covers', 'name': 'hardback_book'}, {'frequency': 'r', 'id': 553, 'synset': 'harmonium.n.01', 'synonyms': ['harmonium', 'organ_(musical_instrument)', 'reed_organ_(musical_instrument)'], 'def': 'a free-reed instrument in which air is forced through the reeds by bellows', 'name': 'harmonium'}, {'frequency': 'f', 'id': 554, 'synset': 'hat.n.01', 'synonyms': ['hat'], 'def': 'headwear that protects the head from bad weather, sun, or worn for fashion', 'name': 'hat'}, {'frequency': 'r', 'id': 555, 'synset': 'hatbox.n.01', 'synonyms': ['hatbox'], 'def': 'a round piece of luggage for carrying hats', 'name': 'hatbox'}, {'frequency': 'r', 'id': 556, 'synset': 'hatch.n.03', 'synonyms': ['hatch'], 'def': 'a movable barrier covering a hatchway', 'name': 'hatch'}, {'frequency': 'c', 'id': 557, 'synset': 'head_covering.n.01', 'synonyms': ['veil'], 'def': 'a garment that covers the head and face', 'name': 'veil'}, {'frequency': 'f', 'id': 558, 'synset': 'headband.n.01', 'synonyms': ['headband'], 'def': 'a band worn around or over the head', 'name': 'headband'}, {'frequency': 'f', 'id': 559, 'synset': 'headboard.n.01', 'synonyms': ['headboard'], 'def': 'a vertical board or panel forming the head of a bedstead', 'name': 'headboard'}, {'frequency': 'f', 'id': 560, 'synset': 'headlight.n.01', 'synonyms': ['headlight', 'headlamp'], 'def': 'a powerful light with reflector; attached to the front of an automobile or locomotive', 'name': 'headlight'}, {'frequency': 'c', 'id': 561, 'synset': 'headscarf.n.01', 'synonyms': ['headscarf'], 'def': 'a kerchief worn over the head and tied under the chin', 'name': 'headscarf'}, {'frequency': 'r', 'id': 562, 'synset': 'headset.n.01', 'synonyms': ['headset'], 'def': 'receiver consisting of a pair of headphones', 'name': 'headset'}, {'frequency': 'c', 'id': 563, 'synset': 'headstall.n.01', 'synonyms': ['headstall_(for_horses)', 'headpiece_(for_horses)'], 'def': "the band that is the part of a bridle that fits around a horse's head", 'name': 'headstall_(for_horses)'}, {'frequency': 'r', 'id': 564, 'synset': 'hearing_aid.n.02', 'synonyms': ['hearing_aid'], 'def': 'an acoustic device used to direct sound to the ear of a hearing-impaired person', 'name': 'hearing_aid'}, {'frequency': 'c', 'id': 565, 'synset': 'heart.n.02', 'synonyms': ['heart'], 'def': 'a muscular organ; its contractions move the blood through the body', 'name': 'heart'}, {'frequency': 'c', 'id': 566, 'synset': 'heater.n.01', 'synonyms': ['heater', 'warmer'], 'def': 'device that heats water or supplies warmth to a room', 'name': 'heater'}, {'frequency': 'c', 'id': 567, 'synset': 'helicopter.n.01', 'synonyms': ['helicopter'], 'def': 'an aircraft without wings that obtains its lift from the rotation of overhead blades', 'name': 'helicopter'}, {'frequency': 'f', 'id': 568, 'synset': 'helmet.n.02', 'synonyms': ['helmet'], 'def': 'a protective headgear made of hard material to resist blows', 'name': 'helmet'}, {'frequency': 'r', 'id': 569, 'synset': 'heron.n.02', 'synonyms': ['heron'], 'def': 'grey or white wading bird with long neck and long legs and (usually) long bill', 'name': 'heron'}, {'frequency': 'c', 'id': 570, 'synset': 'highchair.n.01', 'synonyms': ['highchair', 'feeding_chair'], 'def': 'a chair for feeding a very young child', 'name': 'highchair'}, {'frequency': 'f', 'id': 571, 'synset': 'hinge.n.01', 'synonyms': ['hinge'], 'def': 'a joint that holds two parts together so that one can swing relative to the other', 'name': 'hinge'}, {'frequency': 'r', 'id': 572, 'synset': 'hippopotamus.n.01', 'synonyms': ['hippopotamus'], 'def': 'massive thick-skinned animal living in or around rivers of tropical Africa', 'name': 'hippopotamus'}, {'frequency': 'r', 'id': 573, 'synset': 'hockey_stick.n.01', 'synonyms': ['hockey_stick'], 'def': 'sports implement consisting of a stick used by hockey players to move the puck', 'name': 'hockey_stick'}, {'frequency': 'c', 'id': 574, 'synset': 'hog.n.03', 'synonyms': ['hog', 'pig'], 'def': 'domestic swine', 'name': 'hog'}, {'frequency': 'f', 'id': 575, 'synset': 'home_plate.n.01', 'synonyms': ['home_plate_(baseball)', 'home_base_(baseball)'], 'def': '(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score', 'name': 'home_plate_(baseball)'}, {'frequency': 'c', 'id': 576, 'synset': 'honey.n.01', 'synonyms': ['honey'], 'def': 'a sweet yellow liquid produced by bees', 'name': 'honey'}, {'frequency': 'f', 'id': 577, 'synset': 'hood.n.06', 'synonyms': ['fume_hood', 'exhaust_hood'], 'def': 'metal covering leading to a vent that exhausts smoke or fumes', 'name': 'fume_hood'}, {'frequency': 'f', 'id': 578, 'synset': 'hook.n.05', 'synonyms': ['hook'], 'def': 'a curved or bent implement for suspending or pulling something', 'name': 'hook'}, {'frequency': 'f', 'id': 579, 'synset': 'horse.n.01', 'synonyms': ['horse'], 'def': 'a common horse', 'name': 'horse'}, {'frequency': 'f', 'id': 580, 'synset': 'hose.n.03', 'synonyms': ['hose', 'hosepipe'], 'def': 'a flexible pipe for conveying a liquid or gas', 'name': 'hose'}, {'frequency': 'r', 'id': 581, 'synset': 'hot-air_balloon.n.01', 'synonyms': ['hot-air_balloon'], 'def': 'balloon for travel through the air in a basket suspended below a large bag of heated air', 'name': 'hot-air_balloon'}, {'frequency': 'r', 'id': 582, 'synset': 'hot_plate.n.01', 'synonyms': ['hotplate'], 'def': 'a portable electric appliance for heating or cooking or keeping food warm', 'name': 'hotplate'}, {'frequency': 'c', 'id': 583, 'synset': 'hot_sauce.n.01', 'synonyms': ['hot_sauce'], 'def': 'a pungent peppery sauce', 'name': 'hot_sauce'}, {'frequency': 'r', 'id': 584, 'synset': 'hourglass.n.01', 'synonyms': ['hourglass'], 'def': 'a sandglass timer that runs for sixty minutes', 'name': 'hourglass'}, {'frequency': 'r', 'id': 585, 'synset': 'houseboat.n.01', 'synonyms': ['houseboat'], 'def': 'a barge that is designed and equipped for use as a dwelling', 'name': 'houseboat'}, {'frequency': 'r', 'id': 586, 'synset': 'hummingbird.n.01', 'synonyms': ['hummingbird'], 'def': 'tiny American bird having brilliant iridescent plumage and long slender bills', 'name': 'hummingbird'}, {'frequency': 'r', 'id': 587, 'synset': 'hummus.n.01', 'synonyms': ['hummus', 'humus', 'hommos', 'hoummos', 'humous'], 'def': 'a thick spread made from mashed chickpeas', 'name': 'hummus'}, {'frequency': 'c', 'id': 588, 'synset': 'ice_bear.n.01', 'synonyms': ['polar_bear'], 'def': 'white bear of Arctic regions', 'name': 'polar_bear'}, {'frequency': 'c', 'id': 589, 'synset': 'ice_cream.n.01', 'synonyms': ['icecream'], 'def': 'frozen dessert containing cream and sugar and flavoring', 'name': 'icecream'}, {'frequency': 'r', 'id': 590, 'synset': 'ice_lolly.n.01', 'synonyms': ['popsicle'], 'def': 'ice cream or water ice on a small wooden stick', 'name': 'popsicle'}, {'frequency': 'c', 'id': 591, 'synset': 'ice_maker.n.01', 'synonyms': ['ice_maker'], 'def': 'an appliance included in some electric refrigerators for making ice cubes', 'name': 'ice_maker'}, {'frequency': 'r', 'id': 592, 'synset': 'ice_pack.n.01', 'synonyms': ['ice_pack', 'ice_bag'], 'def': 'a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling', 'name': 'ice_pack'}, {'frequency': 'r', 'id': 593, 'synset': 'ice_skate.n.01', 'synonyms': ['ice_skate'], 'def': 'skate consisting of a boot with a steel blade fitted to the sole', 'name': 'ice_skate'}, {'frequency': 'r', 'id': 594, 'synset': 'ice_tea.n.01', 'synonyms': ['ice_tea', 'iced_tea'], 'def': 'strong tea served over ice', 'name': 'ice_tea'}, {'frequency': 'c', 'id': 595, 'synset': 'igniter.n.01', 'synonyms': ['igniter', 'ignitor', 'lighter'], 'def': 'a substance or device used to start a fire', 'name': 'igniter'}, {'frequency': 'r', 'id': 596, 'synset': 'incense.n.01', 'synonyms': ['incense'], 'def': 'a substance that produces a fragrant odor when burned', 'name': 'incense'}, {'frequency': 'r', 'id': 597, 'synset': 'inhaler.n.01', 'synonyms': ['inhaler', 'inhalator'], 'def': 'a dispenser that produces a chemical vapor to be inhaled through mouth or nose', 'name': 'inhaler'}, {'frequency': 'c', 'id': 598, 'synset': 'ipod.n.01', 'synonyms': ['iPod'], 'def': 'a pocket-sized device used to play music files', 'name': 'iPod'}, {'frequency': 'c', 'id': 599, 'synset': 'iron.n.04', 'synonyms': ['iron_(for_clothing)', 'smoothing_iron_(for_clothing)'], 'def': 'home appliance consisting of a flat metal base that is heated and used to smooth cloth', 'name': 'iron_(for_clothing)'}, {'frequency': 'r', 'id': 600, 'synset': 'ironing_board.n.01', 'synonyms': ['ironing_board'], 'def': 'narrow padded board on collapsible supports; used for ironing clothes', 'name': 'ironing_board'}, {'frequency': 'f', 'id': 601, 'synset': 'jacket.n.01', 'synonyms': ['jacket'], 'def': 'a waist-length coat', 'name': 'jacket'}, {'frequency': 'r', 'id': 602, 'synset': 'jam.n.01', 'synonyms': ['jam'], 'def': 'preserve of crushed fruit', 'name': 'jam'}, {'frequency': 'f', 'id': 603, 'synset': 'jean.n.01', 'synonyms': ['jean', 'blue_jean', 'denim'], 'def': '(usually plural) close-fitting trousers of heavy denim for manual work or casual wear', 'name': 'jean'}, {'frequency': 'c', 'id': 604, 'synset': 'jeep.n.01', 'synonyms': ['jeep', 'landrover'], 'def': 'a car suitable for traveling over rough terrain', 'name': 'jeep'}, {'frequency': 'r', 'id': 605, 'synset': 'jelly_bean.n.01', 'synonyms': ['jelly_bean', 'jelly_egg'], 'def': 'sugar-glazed jellied candy', 'name': 'jelly_bean'}, {'frequency': 'f', 'id': 606, 'synset': 'jersey.n.03', 'synonyms': ['jersey', 'T-shirt', 'tee_shirt'], 'def': 'a close-fitting pullover shirt', 'name': 'jersey'}, {'frequency': 'c', 'id': 607, 'synset': 'jet.n.01', 'synonyms': ['jet_plane', 'jet-propelled_plane'], 'def': 'an airplane powered by one or more jet engines', 'name': 'jet_plane'}, {'frequency': 'c', 'id': 608, 'synset': 'jewelry.n.01', 'synonyms': ['jewelry', 'jewellery'], 'def': 'an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)', 'name': 'jewelry'}, {'frequency': 'r', 'id': 609, 'synset': 'joystick.n.02', 'synonyms': ['joystick'], 'def': 'a control device for computers consisting of a vertical handle that can move freely in two directions', 'name': 'joystick'}, {'frequency': 'r', 'id': 610, 'synset': 'jump_suit.n.01', 'synonyms': ['jumpsuit'], 'def': "one-piece garment fashioned after a parachutist's uniform", 'name': 'jumpsuit'}, {'frequency': 'c', 'id': 611, 'synset': 'kayak.n.01', 'synonyms': ['kayak'], 'def': 'a small canoe consisting of a light frame made watertight with animal skins', 'name': 'kayak'}, {'frequency': 'r', 'id': 612, 'synset': 'keg.n.02', 'synonyms': ['keg'], 'def': 'small cask or barrel', 'name': 'keg'}, {'frequency': 'r', 'id': 613, 'synset': 'kennel.n.01', 'synonyms': ['kennel', 'doghouse'], 'def': 'outbuilding that serves as a shelter for a dog', 'name': 'kennel'}, {'frequency': 'c', 'id': 614, 'synset': 'kettle.n.01', 'synonyms': ['kettle', 'boiler'], 'def': 'a metal pot for stewing or boiling; usually has a lid', 'name': 'kettle'}, {'frequency': 'f', 'id': 615, 'synset': 'key.n.01', 'synonyms': ['key'], 'def': 'metal instrument used to unlock a lock', 'name': 'key'}, {'frequency': 'r', 'id': 616, 'synset': 'keycard.n.01', 'synonyms': ['keycard'], 'def': 'a plastic card used to gain access typically to a door', 'name': 'keycard'}, {'frequency': 'r', 'id': 617, 'synset': 'kilt.n.01', 'synonyms': ['kilt'], 'def': 'a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland', 'name': 'kilt'}, {'frequency': 'c', 'id': 618, 'synset': 'kimono.n.01', 'synonyms': ['kimono'], 'def': 'a loose robe; imitated from robes originally worn by Japanese', 'name': 'kimono'}, {'frequency': 'f', 'id': 619, 'synset': 'kitchen_sink.n.01', 'synonyms': ['kitchen_sink'], 'def': 'a sink in a kitchen', 'name': 'kitchen_sink'}, {'frequency': 'c', 'id': 620, 'synset': 'kitchen_table.n.01', 'synonyms': ['kitchen_table'], 'def': 'a table in the kitchen', 'name': 'kitchen_table'}, {'frequency': 'f', 'id': 621, 'synset': 'kite.n.03', 'synonyms': ['kite'], 'def': 'plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string', 'name': 'kite'}, {'frequency': 'c', 'id': 622, 'synset': 'kitten.n.01', 'synonyms': ['kitten', 'kitty'], 'def': 'young domestic cat', 'name': 'kitten'}, {'frequency': 'c', 'id': 623, 'synset': 'kiwi.n.03', 'synonyms': ['kiwi_fruit'], 'def': 'fuzzy brown egg-shaped fruit with slightly tart green flesh', 'name': 'kiwi_fruit'}, {'frequency': 'f', 'id': 624, 'synset': 'knee_pad.n.01', 'synonyms': ['knee_pad'], 'def': 'protective garment consisting of a pad worn by football or baseball or hockey players', 'name': 'knee_pad'}, {'frequency': 'f', 'id': 625, 'synset': 'knife.n.01', 'synonyms': ['knife'], 'def': 'tool with a blade and point used as a cutting instrument', 'name': 'knife'}, {'frequency': 'r', 'id': 626, 'synset': 'knight.n.02', 'synonyms': ['knight_(chess_piece)', 'horse_(chess_piece)'], 'def': 'a chess game piece shaped to resemble the head of a horse', 'name': 'knight_(chess_piece)'}, {'frequency': 'r', 'id': 627, 'synset': 'knitting_needle.n.01', 'synonyms': ['knitting_needle'], 'def': 'needle consisting of a slender rod with pointed ends; usually used in pairs', 'name': 'knitting_needle'}, {'frequency': 'f', 'id': 628, 'synset': 'knob.n.02', 'synonyms': ['knob'], 'def': 'a round handle often found on a door', 'name': 'knob'}, {'frequency': 'r', 'id': 629, 'synset': 'knocker.n.05', 'synonyms': ['knocker_(on_a_door)', 'doorknocker'], 'def': 'a device (usually metal and ornamental) attached by a hinge to a door', 'name': 'knocker_(on_a_door)'}, {'frequency': 'r', 'id': 630, 'synset': 'koala.n.01', 'synonyms': ['koala', 'koala_bear'], 'def': 'sluggish tailless Australian marsupial with grey furry ears and coat', 'name': 'koala'}, {'frequency': 'r', 'id': 631, 'synset': 'lab_coat.n.01', 'synonyms': ['lab_coat', 'laboratory_coat'], 'def': 'a light coat worn to protect clothing from substances used while working in a laboratory', 'name': 'lab_coat'}, {'frequency': 'f', 'id': 632, 'synset': 'ladder.n.01', 'synonyms': ['ladder'], 'def': 'steps consisting of two parallel members connected by rungs', 'name': 'ladder'}, {'frequency': 'c', 'id': 633, 'synset': 'ladle.n.01', 'synonyms': ['ladle'], 'def': 'a spoon-shaped vessel with a long handle frequently used to transfer liquids', 'name': 'ladle'}, {'frequency': 'r', 'id': 634, 'synset': 'ladybug.n.01', 'synonyms': ['ladybug', 'ladybeetle', 'ladybird_beetle'], 'def': 'small round bright-colored and spotted beetle, typically red and black', 'name': 'ladybug'}, {'frequency': 'c', 'id': 635, 'synset': 'lamb.n.01', 'synonyms': ['lamb_(animal)'], 'def': 'young sheep', 'name': 'lamb_(animal)'}, {'frequency': 'r', 'id': 636, 'synset': 'lamb_chop.n.01', 'synonyms': ['lamb-chop', 'lambchop'], 'def': 'chop cut from a lamb', 'name': 'lamb-chop'}, {'frequency': 'f', 'id': 637, 'synset': 'lamp.n.02', 'synonyms': ['lamp'], 'def': 'a piece of furniture holding one or more electric light bulbs', 'name': 'lamp'}, {'frequency': 'f', 'id': 638, 'synset': 'lamppost.n.01', 'synonyms': ['lamppost'], 'def': 'a metal post supporting an outdoor lamp (such as a streetlight)', 'name': 'lamppost'}, {'frequency': 'f', 'id': 639, 'synset': 'lampshade.n.01', 'synonyms': ['lampshade'], 'def': 'a protective ornamental shade used to screen a light bulb from direct view', 'name': 'lampshade'}, {'frequency': 'c', 'id': 640, 'synset': 'lantern.n.01', 'synonyms': ['lantern'], 'def': 'light in a transparent protective case', 'name': 'lantern'}, {'frequency': 'f', 'id': 641, 'synset': 'lanyard.n.02', 'synonyms': ['lanyard', 'laniard'], 'def': 'a cord worn around the neck to hold a knife or whistle, etc.', 'name': 'lanyard'}, {'frequency': 'f', 'id': 642, 'synset': 'laptop.n.01', 'synonyms': ['laptop_computer', 'notebook_computer'], 'def': 'a portable computer small enough to use in your lap', 'name': 'laptop_computer'}, {'frequency': 'r', 'id': 643, 'synset': 'lasagna.n.01', 'synonyms': ['lasagna', 'lasagne'], 'def': 'baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables', 'name': 'lasagna'}, {'frequency': 'c', 'id': 644, 'synset': 'latch.n.02', 'synonyms': ['latch'], 'def': 'a bar that can be lowered or slid into a groove to fasten a door or gate', 'name': 'latch'}, {'frequency': 'r', 'id': 645, 'synset': 'lawn_mower.n.01', 'synonyms': ['lawn_mower'], 'def': 'garden tool for mowing grass on lawns', 'name': 'lawn_mower'}, {'frequency': 'r', 'id': 646, 'synset': 'leather.n.01', 'synonyms': ['leather'], 'def': 'an animal skin made smooth and flexible by removing the hair and then tanning', 'name': 'leather'}, {'frequency': 'c', 'id': 647, 'synset': 'legging.n.01', 'synonyms': ['legging_(clothing)', 'leging_(clothing)', 'leg_covering'], 'def': 'a garment covering the leg (usually extending from the knee to the ankle)', 'name': 'legging_(clothing)'}, {'frequency': 'c', 'id': 648, 'synset': 'lego.n.01', 'synonyms': ['Lego', 'Lego_set'], 'def': "a child's plastic construction set for making models from blocks", 'name': 'Lego'}, {'frequency': 'f', 'id': 649, 'synset': 'lemon.n.01', 'synonyms': ['lemon'], 'def': 'yellow oval fruit with juicy acidic flesh', 'name': 'lemon'}, {'frequency': 'r', 'id': 650, 'synset': 'lemonade.n.01', 'synonyms': ['lemonade'], 'def': 'sweetened beverage of diluted lemon juice', 'name': 'lemonade'}, {'frequency': 'f', 'id': 651, 'synset': 'lettuce.n.02', 'synonyms': ['lettuce'], 'def': 'leafy plant commonly eaten in salad or on sandwiches', 'name': 'lettuce'}, {'frequency': 'f', 'id': 652, 'synset': 'license_plate.n.01', 'synonyms': ['license_plate', 'numberplate'], 'def': "a plate mounted on the front and back of car and bearing the car's registration number", 'name': 'license_plate'}, {'frequency': 'f', 'id': 653, 'synset': 'life_buoy.n.01', 'synonyms': ['life_buoy', 'lifesaver', 'life_belt', 'life_ring'], 'def': 'a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)', 'name': 'life_buoy'}, {'frequency': 'f', 'id': 654, 'synset': 'life_jacket.n.01', 'synonyms': ['life_jacket', 'life_vest'], 'def': 'life preserver consisting of a sleeveless jacket of buoyant or inflatable design', 'name': 'life_jacket'}, {'frequency': 'f', 'id': 655, 'synset': 'light_bulb.n.01', 'synonyms': ['lightbulb'], 'def': 'glass bulb or tube shaped electric device that emits light (DO NOT MARK LAMPS AS A WHOLE)', 'name': 'lightbulb'}, {'frequency': 'r', 'id': 656, 'synset': 'lightning_rod.n.02', 'synonyms': ['lightning_rod', 'lightning_conductor'], 'def': 'a metallic conductor that is attached to a high point and leads to the ground', 'name': 'lightning_rod'}, {'frequency': 'c', 'id': 657, 'synset': 'lime.n.06', 'synonyms': ['lime'], 'def': 'the green acidic fruit of any of various lime trees', 'name': 'lime'}, {'frequency': 'r', 'id': 658, 'synset': 'limousine.n.01', 'synonyms': ['limousine'], 'def': 'long luxurious car; usually driven by a chauffeur', 'name': 'limousine'}, {'frequency': 'r', 'id': 659, 'synset': 'linen.n.02', 'synonyms': ['linen_paper'], 'def': 'a high-quality paper made of linen fibers or with a linen finish', 'name': 'linen_paper'}, {'frequency': 'c', 'id': 660, 'synset': 'lion.n.01', 'synonyms': ['lion'], 'def': 'large gregarious predatory cat of Africa and India', 'name': 'lion'}, {'frequency': 'c', 'id': 661, 'synset': 'lip_balm.n.01', 'synonyms': ['lip_balm'], 'def': 'a balm applied to the lips', 'name': 'lip_balm'}, {'frequency': 'c', 'id': 662, 'synset': 'lipstick.n.01', 'synonyms': ['lipstick', 'lip_rouge'], 'def': 'makeup that is used to color the lips', 'name': 'lipstick'}, {'frequency': 'r', 'id': 663, 'synset': 'liquor.n.01', 'synonyms': ['liquor', 'spirits', 'hard_liquor', 'liqueur', 'cordial'], 'def': 'an alcoholic beverage that is distilled rather than fermented', 'name': 'liquor'}, {'frequency': 'r', 'id': 664, 'synset': 'lizard.n.01', 'synonyms': ['lizard'], 'def': 'a reptile with usually two pairs of legs and a tapering tail', 'name': 'lizard'}, {'frequency': 'r', 'id': 665, 'synset': 'loafer.n.02', 'synonyms': ['Loafer_(type_of_shoe)'], 'def': 'a low leather step-in shoe', 'name': 'Loafer_(type_of_shoe)'}, {'frequency': 'f', 'id': 666, 'synset': 'log.n.01', 'synonyms': ['log'], 'def': 'a segment of the trunk of a tree when stripped of branches', 'name': 'log'}, {'frequency': 'c', 'id': 667, 'synset': 'lollipop.n.02', 'synonyms': ['lollipop'], 'def': 'hard candy on a stick', 'name': 'lollipop'}, {'frequency': 'c', 'id': 668, 'synset': 'lotion.n.01', 'synonyms': ['lotion'], 'def': 'any of various cosmetic preparations that are applied to the skin', 'name': 'lotion'}, {'frequency': 'f', 'id': 669, 'synset': 'loudspeaker.n.01', 'synonyms': ['speaker_(stero_equipment)'], 'def': 'electronic device that produces sound often as part of a stereo system', 'name': 'speaker_(stero_equipment)'}, {'frequency': 'c', 'id': 670, 'synset': 'love_seat.n.01', 'synonyms': ['loveseat'], 'def': 'small sofa that seats two people', 'name': 'loveseat'}, {'frequency': 'r', 'id': 671, 'synset': 'machine_gun.n.01', 'synonyms': ['machine_gun'], 'def': 'a rapidly firing automatic gun', 'name': 'machine_gun'}, {'frequency': 'f', 'id': 672, 'synset': 'magazine.n.02', 'synonyms': ['magazine'], 'def': 'a paperback periodic publication', 'name': 'magazine'}, {'frequency': 'f', 'id': 673, 'synset': 'magnet.n.01', 'synonyms': ['magnet'], 'def': 'a device that attracts iron and produces a magnetic field', 'name': 'magnet'}, {'frequency': 'r', 'id': 674, 'synset': 'mail_slot.n.01', 'synonyms': ['mail_slot'], 'def': 'a slot (usually in a door) through which mail can be delivered', 'name': 'mail_slot'}, {'frequency': 'c', 'id': 675, 'synset': 'mailbox.n.01', 'synonyms': ['mailbox_(at_home)', 'letter_box_(at_home)'], 'def': 'a private box for delivery of mail', 'name': 'mailbox_(at_home)'}, {'frequency': 'r', 'id': 676, 'synset': 'mallet.n.01', 'synonyms': ['mallet'], 'def': 'a sports implement with a long handle and a hammer-like head used to hit a ball', 'name': 'mallet'}, {'frequency': 'r', 'id': 677, 'synset': 'mammoth.n.01', 'synonyms': ['mammoth'], 'def': 'any of numerous extinct elephants widely distributed in the Pleistocene', 'name': 'mammoth'}, {'frequency': 'c', 'id': 678, 'synset': 'mandarin.n.05', 'synonyms': ['mandarin_orange'], 'def': 'a somewhat flat reddish-orange loose skinned citrus of China', 'name': 'mandarin_orange'}, {'frequency': 'c', 'id': 679, 'synset': 'manger.n.01', 'synonyms': ['manger', 'trough'], 'def': 'a container (usually in a barn or stable) from which cattle or horses feed', 'name': 'manger'}, {'frequency': 'f', 'id': 680, 'synset': 'manhole.n.01', 'synonyms': ['manhole'], 'def': 'a hole (usually with a flush cover) through which a person can gain access to an underground structure', 'name': 'manhole'}, {'frequency': 'c', 'id': 681, 'synset': 'map.n.01', 'synonyms': ['map'], 'def': "a diagrammatic representation of the earth's surface (or part of it)", 'name': 'map'}, {'frequency': 'c', 'id': 682, 'synset': 'marker.n.03', 'synonyms': ['marker'], 'def': 'a writing implement for making a mark', 'name': 'marker'}, {'frequency': 'r', 'id': 683, 'synset': 'martini.n.01', 'synonyms': ['martini'], 'def': 'a cocktail made of gin (or vodka) with dry vermouth', 'name': 'martini'}, {'frequency': 'r', 'id': 684, 'synset': 'mascot.n.01', 'synonyms': ['mascot'], 'def': 'a person or animal that is adopted by a team or other group as a symbolic figure', 'name': 'mascot'}, {'frequency': 'c', 'id': 685, 'synset': 'mashed_potato.n.01', 'synonyms': ['mashed_potato'], 'def': 'potato that has been peeled and boiled and then mashed', 'name': 'mashed_potato'}, {'frequency': 'r', 'id': 686, 'synset': 'masher.n.02', 'synonyms': ['masher'], 'def': 'a kitchen utensil used for mashing (e.g. potatoes)', 'name': 'masher'}, {'frequency': 'f', 'id': 687, 'synset': 'mask.n.04', 'synonyms': ['mask', 'facemask'], 'def': 'a protective covering worn over the face', 'name': 'mask'}, {'frequency': 'f', 'id': 688, 'synset': 'mast.n.01', 'synonyms': ['mast'], 'def': 'a vertical spar for supporting sails', 'name': 'mast'}, {'frequency': 'c', 'id': 689, 'synset': 'mat.n.03', 'synonyms': ['mat_(gym_equipment)', 'gym_mat'], 'def': 'sports equipment consisting of a piece of thick padding on the floor for gymnastics', 'name': 'mat_(gym_equipment)'}, {'frequency': 'r', 'id': 690, 'synset': 'matchbox.n.01', 'synonyms': ['matchbox'], 'def': 'a box for holding matches', 'name': 'matchbox'}, {'frequency': 'f', 'id': 691, 'synset': 'mattress.n.01', 'synonyms': ['mattress'], 'def': 'a thick pad filled with resilient material used as a bed or part of a bed', 'name': 'mattress'}, {'frequency': 'c', 'id': 692, 'synset': 'measuring_cup.n.01', 'synonyms': ['measuring_cup'], 'def': 'graduated cup used to measure liquid or granular ingredients', 'name': 'measuring_cup'}, {'frequency': 'c', 'id': 693, 'synset': 'measuring_stick.n.01', 'synonyms': ['measuring_stick', 'ruler_(measuring_stick)', 'measuring_rod'], 'def': 'measuring instrument having a sequence of marks at regular intervals', 'name': 'measuring_stick'}, {'frequency': 'c', 'id': 694, 'synset': 'meatball.n.01', 'synonyms': ['meatball'], 'def': 'ground meat formed into a ball and fried or simmered in broth', 'name': 'meatball'}, {'frequency': 'c', 'id': 695, 'synset': 'medicine.n.02', 'synonyms': ['medicine'], 'def': 'something that treats or prevents or alleviates the symptoms of disease', 'name': 'medicine'}, {'frequency': 'r', 'id': 696, 'synset': 'melon.n.01', 'synonyms': ['melon'], 'def': 'fruit of the gourd family having a hard rind and sweet juicy flesh', 'name': 'melon'}, {'frequency': 'f', 'id': 697, 'synset': 'microphone.n.01', 'synonyms': ['microphone'], 'def': 'device for converting sound waves into electrical energy', 'name': 'microphone'}, {'frequency': 'r', 'id': 698, 'synset': 'microscope.n.01', 'synonyms': ['microscope'], 'def': 'magnifier of the image of small objects', 'name': 'microscope'}, {'frequency': 'f', 'id': 699, 'synset': 'microwave.n.02', 'synonyms': ['microwave_oven'], 'def': 'kitchen appliance that cooks food by passing an electromagnetic wave through it', 'name': 'microwave_oven'}, {'frequency': 'r', 'id': 700, 'synset': 'milestone.n.01', 'synonyms': ['milestone', 'milepost'], 'def': 'stone post at side of a road to show distances', 'name': 'milestone'}, {'frequency': 'c', 'id': 701, 'synset': 'milk.n.01', 'synonyms': ['milk'], 'def': 'a white nutritious liquid secreted by mammals and used as food by human beings', 'name': 'milk'}, {'frequency': 'f', 'id': 702, 'synset': 'minivan.n.01', 'synonyms': ['minivan'], 'def': 'a small box-shaped passenger van', 'name': 'minivan'}, {'frequency': 'r', 'id': 703, 'synset': 'mint.n.05', 'synonyms': ['mint_candy'], 'def': 'a candy that is flavored with a mint oil', 'name': 'mint_candy'}, {'frequency': 'f', 'id': 704, 'synset': 'mirror.n.01', 'synonyms': ['mirror'], 'def': 'polished surface that forms images by reflecting light', 'name': 'mirror'}, {'frequency': 'c', 'id': 705, 'synset': 'mitten.n.01', 'synonyms': ['mitten'], 'def': 'glove that encases the thumb separately and the other four fingers together', 'name': 'mitten'}, {'frequency': 'c', 'id': 706, 'synset': 'mixer.n.04', 'synonyms': ['mixer_(kitchen_tool)', 'stand_mixer'], 'def': 'a kitchen utensil that is used for mixing foods', 'name': 'mixer_(kitchen_tool)'}, {'frequency': 'c', 'id': 707, 'synset': 'money.n.03', 'synonyms': ['money'], 'def': 'the official currency issued by a government or national bank', 'name': 'money'}, {'frequency': 'f', 'id': 708, 'synset': 'monitor.n.04', 'synonyms': ['monitor_(computer_equipment) computer_monitor'], 'def': 'a computer monitor', 'name': 'monitor_(computer_equipment) computer_monitor'}, {'frequency': 'c', 'id': 709, 'synset': 'monkey.n.01', 'synonyms': ['monkey'], 'def': 'any of various long-tailed primates', 'name': 'monkey'}, {'frequency': 'f', 'id': 710, 'synset': 'motor.n.01', 'synonyms': ['motor'], 'def': 'machine that converts other forms of energy into mechanical energy and so imparts motion', 'name': 'motor'}, {'frequency': 'f', 'id': 711, 'synset': 'motor_scooter.n.01', 'synonyms': ['motor_scooter', 'scooter'], 'def': 'a wheeled vehicle with small wheels and a low-powered engine', 'name': 'motor_scooter'}, {'frequency': 'r', 'id': 712, 'synset': 'motor_vehicle.n.01', 'synonyms': ['motor_vehicle', 'automotive_vehicle'], 'def': 'a self-propelled wheeled vehicle that does not run on rails', 'name': 'motor_vehicle'}, {'frequency': 'r', 'id': 713, 'synset': 'motorboat.n.01', 'synonyms': ['motorboat', 'powerboat'], 'def': 'a boat propelled by an internal-combustion engine', 'name': 'motorboat'}, {'frequency': 'f', 'id': 714, 'synset': 'motorcycle.n.01', 'synonyms': ['motorcycle'], 'def': 'a motor vehicle with two wheels and a strong frame', 'name': 'motorcycle'}, {'frequency': 'f', 'id': 715, 'synset': 'mound.n.01', 'synonyms': ['mound_(baseball)', "pitcher's_mound"], 'def': '(baseball) the slight elevation on which the pitcher stands', 'name': 'mound_(baseball)'}, {'frequency': 'r', 'id': 716, 'synset': 'mouse.n.01', 'synonyms': ['mouse_(animal_rodent)'], 'def': 'a small rodent with pointed snouts and small ears on elongated bodies with slender usually hairless tails', 'name': 'mouse_(animal_rodent)'}, {'frequency': 'f', 'id': 717, 'synset': 'mouse.n.04', 'synonyms': ['mouse_(computer_equipment)', 'computer_mouse'], 'def': 'a computer input device that controls an on-screen pointer', 'name': 'mouse_(computer_equipment)'}, {'frequency': 'f', 'id': 718, 'synset': 'mousepad.n.01', 'synonyms': ['mousepad'], 'def': 'a small portable pad that provides an operating surface for a computer mouse', 'name': 'mousepad'}, {'frequency': 'c', 'id': 719, 'synset': 'muffin.n.01', 'synonyms': ['muffin'], 'def': 'a sweet quick bread baked in a cup-shaped pan', 'name': 'muffin'}, {'frequency': 'f', 'id': 720, 'synset': 'mug.n.04', 'synonyms': ['mug'], 'def': 'with handle and usually cylindrical', 'name': 'mug'}, {'frequency': 'f', 'id': 721, 'synset': 'mushroom.n.02', 'synonyms': ['mushroom'], 'def': 'a common mushroom', 'name': 'mushroom'}, {'frequency': 'r', 'id': 722, 'synset': 'music_stool.n.01', 'synonyms': ['music_stool', 'piano_stool'], 'def': 'a stool for piano players; usually adjustable in height', 'name': 'music_stool'}, {'frequency': 'r', 'id': 723, 'synset': 'musical_instrument.n.01', 'synonyms': ['musical_instrument', 'instrument_(musical)'], 'def': 'any of various devices or contrivances that can be used to produce musical tones or sounds', 'name': 'musical_instrument'}, {'frequency': 'r', 'id': 724, 'synset': 'nailfile.n.01', 'synonyms': ['nailfile'], 'def': 'a small flat file for shaping the nails', 'name': 'nailfile'}, {'frequency': 'r', 'id': 725, 'synset': 'nameplate.n.01', 'synonyms': ['nameplate'], 'def': 'a plate bearing a name', 'name': 'nameplate'}, {'frequency': 'f', 'id': 726, 'synset': 'napkin.n.01', 'synonyms': ['napkin', 'table_napkin', 'serviette'], 'def': 'a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing', 'name': 'napkin'}, {'frequency': 'r', 'id': 727, 'synset': 'neckerchief.n.01', 'synonyms': ['neckerchief'], 'def': 'a kerchief worn around the neck', 'name': 'neckerchief'}, {'frequency': 'f', 'id': 728, 'synset': 'necklace.n.01', 'synonyms': ['necklace'], 'def': 'jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament', 'name': 'necklace'}, {'frequency': 'f', 'id': 729, 'synset': 'necktie.n.01', 'synonyms': ['necktie', 'tie_(necktie)'], 'def': 'neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front', 'name': 'necktie'}, {'frequency': 'r', 'id': 730, 'synset': 'needle.n.03', 'synonyms': ['needle'], 'def': 'a sharp pointed implement (usually metal)', 'name': 'needle'}, {'frequency': 'c', 'id': 731, 'synset': 'nest.n.01', 'synonyms': ['nest'], 'def': 'a structure in which animals lay eggs or give birth to their young', 'name': 'nest'}, {'frequency': 'r', 'id': 732, 'synset': 'newsstand.n.01', 'synonyms': ['newsstand'], 'def': 'a stall where newspapers and other periodicals are sold', 'name': 'newsstand'}, {'frequency': 'c', 'id': 733, 'synset': 'nightwear.n.01', 'synonyms': ['nightshirt', 'nightwear', 'sleepwear', 'nightclothes'], 'def': 'garments designed to be worn in bed', 'name': 'nightshirt'}, {'frequency': 'r', 'id': 734, 'synset': 'nosebag.n.01', 'synonyms': ['nosebag_(for_animals)', 'feedbag'], 'def': 'a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head', 'name': 'nosebag_(for_animals)'}, {'frequency': 'r', 'id': 735, 'synset': 'noseband.n.01', 'synonyms': ['noseband_(for_animals)', 'nosepiece_(for_animals)'], 'def': "a strap that is the part of a bridle that goes over the animal's nose", 'name': 'noseband_(for_animals)'}, {'frequency': 'f', 'id': 736, 'synset': 'notebook.n.01', 'synonyms': ['notebook'], 'def': 'a book with blank pages for recording notes or memoranda', 'name': 'notebook'}, {'frequency': 'c', 'id': 737, 'synset': 'notepad.n.01', 'synonyms': ['notepad'], 'def': 'a pad of paper for keeping notes', 'name': 'notepad'}, {'frequency': 'c', 'id': 738, 'synset': 'nut.n.03', 'synonyms': ['nut'], 'def': 'a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt', 'name': 'nut'}, {'frequency': 'r', 'id': 739, 'synset': 'nutcracker.n.01', 'synonyms': ['nutcracker'], 'def': 'a hand tool used to crack nuts open', 'name': 'nutcracker'}, {'frequency': 'c', 'id': 740, 'synset': 'oar.n.01', 'synonyms': ['oar'], 'def': 'an implement used to propel or steer a boat', 'name': 'oar'}, {'frequency': 'r', 'id': 741, 'synset': 'octopus.n.01', 'synonyms': ['octopus_(food)'], 'def': 'tentacles of octopus prepared as food', 'name': 'octopus_(food)'}, {'frequency': 'r', 'id': 742, 'synset': 'octopus.n.02', 'synonyms': ['octopus_(animal)'], 'def': 'bottom-living cephalopod having a soft oval body with eight long tentacles', 'name': 'octopus_(animal)'}, {'frequency': 'c', 'id': 743, 'synset': 'oil_lamp.n.01', 'synonyms': ['oil_lamp', 'kerosene_lamp', 'kerosine_lamp'], 'def': 'a lamp that burns oil (as kerosine) for light', 'name': 'oil_lamp'}, {'frequency': 'c', 'id': 744, 'synset': 'olive_oil.n.01', 'synonyms': ['olive_oil'], 'def': 'oil from olives', 'name': 'olive_oil'}, {'frequency': 'r', 'id': 745, 'synset': 'omelet.n.01', 'synonyms': ['omelet', 'omelette'], 'def': 'beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly', 'name': 'omelet'}, {'frequency': 'f', 'id': 746, 'synset': 'onion.n.01', 'synonyms': ['onion'], 'def': 'the bulb of an onion plant', 'name': 'onion'}, {'frequency': 'f', 'id': 747, 'synset': 'orange.n.01', 'synonyms': ['orange_(fruit)'], 'def': 'orange (FRUIT of an orange tree)', 'name': 'orange_(fruit)'}, {'frequency': 'c', 'id': 748, 'synset': 'orange_juice.n.01', 'synonyms': ['orange_juice'], 'def': 'bottled or freshly squeezed juice of oranges', 'name': 'orange_juice'}, {'frequency': 'r', 'id': 749, 'synset': 'oregano.n.01', 'synonyms': ['oregano', 'marjoram'], 'def': 'aromatic Eurasian perennial herb used in cooking and baking', 'name': 'oregano'}, {'frequency': 'c', 'id': 750, 'synset': 'ostrich.n.02', 'synonyms': ['ostrich'], 'def': 'fast-running African flightless bird with two-toed feet; largest living bird', 'name': 'ostrich'}, {'frequency': 'c', 'id': 751, 'synset': 'ottoman.n.03', 'synonyms': ['ottoman', 'pouf', 'pouffe', 'hassock'], 'def': 'thick cushion used as a seat', 'name': 'ottoman'}, {'frequency': 'c', 'id': 752, 'synset': 'overall.n.01', 'synonyms': ['overalls_(clothing)'], 'def': 'work clothing consisting of denim trousers usually with a bib and shoulder straps', 'name': 'overalls_(clothing)'}, {'frequency': 'c', 'id': 753, 'synset': 'owl.n.01', 'synonyms': ['owl'], 'def': 'nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes', 'name': 'owl'}, {'frequency': 'c', 'id': 754, 'synset': 'packet.n.03', 'synonyms': ['packet'], 'def': 'a small package or bundle', 'name': 'packet'}, {'frequency': 'r', 'id': 755, 'synset': 'pad.n.03', 'synonyms': ['inkpad', 'inking_pad', 'stamp_pad'], 'def': 'absorbent material saturated with ink used to transfer ink evenly to a rubber stamp', 'name': 'inkpad'}, {'frequency': 'c', 'id': 756, 'synset': 'pad.n.04', 'synonyms': ['pad'], 'def': 'a flat mass of soft material used for protection, stuffing, or comfort', 'name': 'pad'}, {'frequency': 'c', 'id': 757, 'synset': 'paddle.n.04', 'synonyms': ['paddle', 'boat_paddle'], 'def': 'a short light oar used without an oarlock to propel a canoe or small boat', 'name': 'paddle'}, {'frequency': 'c', 'id': 758, 'synset': 'padlock.n.01', 'synonyms': ['padlock'], 'def': 'a detachable, portable lock', 'name': 'padlock'}, {'frequency': 'r', 'id': 759, 'synset': 'paintbox.n.01', 'synonyms': ['paintbox'], 'def': "a box containing a collection of cubes or tubes of artists' paint", 'name': 'paintbox'}, {'frequency': 'c', 'id': 760, 'synset': 'paintbrush.n.01', 'synonyms': ['paintbrush'], 'def': 'a brush used as an applicator to apply paint', 'name': 'paintbrush'}, {'frequency': 'f', 'id': 761, 'synset': 'painting.n.01', 'synonyms': ['painting'], 'def': 'graphic art consisting of an artistic composition made by applying paints to a surface', 'name': 'painting'}, {'frequency': 'c', 'id': 762, 'synset': 'pajama.n.02', 'synonyms': ['pajamas', 'pyjamas'], 'def': 'loose-fitting nightclothes worn for sleeping or lounging', 'name': 'pajamas'}, {'frequency': 'c', 'id': 763, 'synset': 'palette.n.02', 'synonyms': ['palette', 'pallet'], 'def': 'board that provides a flat surface on which artists mix paints and the range of colors used', 'name': 'palette'}, {'frequency': 'f', 'id': 764, 'synset': 'pan.n.01', 'synonyms': ['pan_(for_cooking)', 'cooking_pan'], 'def': 'cooking utensil consisting of a wide metal vessel', 'name': 'pan_(for_cooking)'}, {'frequency': 'r', 'id': 765, 'synset': 'pan.n.03', 'synonyms': ['pan_(metal_container)'], 'def': 'shallow container made of metal', 'name': 'pan_(metal_container)'}, {'frequency': 'c', 'id': 766, 'synset': 'pancake.n.01', 'synonyms': ['pancake'], 'def': 'a flat cake of thin batter fried on both sides on a griddle', 'name': 'pancake'}, {'frequency': 'r', 'id': 767, 'synset': 'pantyhose.n.01', 'synonyms': ['pantyhose'], 'def': "a woman's tights consisting of underpants and stockings", 'name': 'pantyhose'}, {'frequency': 'r', 'id': 768, 'synset': 'papaya.n.02', 'synonyms': ['papaya'], 'def': 'large oval melon-like tropical fruit with yellowish flesh', 'name': 'papaya'}, {'frequency': 'r', 'id': 769, 'synset': 'paper_clip.n.01', 'synonyms': ['paperclip'], 'def': 'a wire or plastic clip for holding sheets of paper together', 'name': 'paperclip'}, {'frequency': 'f', 'id': 770, 'synset': 'paper_plate.n.01', 'synonyms': ['paper_plate'], 'def': 'a disposable plate made of cardboard', 'name': 'paper_plate'}, {'frequency': 'f', 'id': 771, 'synset': 'paper_towel.n.01', 'synonyms': ['paper_towel'], 'def': 'a disposable towel made of absorbent paper', 'name': 'paper_towel'}, {'frequency': 'r', 'id': 772, 'synset': 'paperback_book.n.01', 'synonyms': ['paperback_book', 'paper-back_book', 'softback_book', 'soft-cover_book'], 'def': 'a book with paper covers', 'name': 'paperback_book'}, {'frequency': 'r', 'id': 773, 'synset': 'paperweight.n.01', 'synonyms': ['paperweight'], 'def': 'a weight used to hold down a stack of papers', 'name': 'paperweight'}, {'frequency': 'c', 'id': 774, 'synset': 'parachute.n.01', 'synonyms': ['parachute'], 'def': 'rescue equipment consisting of a device that fills with air and retards your fall', 'name': 'parachute'}, {'frequency': 'r', 'id': 775, 'synset': 'parakeet.n.01', 'synonyms': ['parakeet', 'parrakeet', 'parroket', 'paraquet', 'paroquet', 'parroquet'], 'def': 'any of numerous small slender long-tailed parrots', 'name': 'parakeet'}, {'frequency': 'c', 'id': 776, 'synset': 'parasail.n.01', 'synonyms': ['parasail_(sports)'], 'def': 'parachute that will lift a person up into the air when it is towed by a motorboat or a car', 'name': 'parasail_(sports)'}, {'frequency': 'r', 'id': 777, 'synset': 'parchment.n.01', 'synonyms': ['parchment'], 'def': 'a superior paper resembling sheepskin', 'name': 'parchment'}, {'frequency': 'r', 'id': 778, 'synset': 'parka.n.01', 'synonyms': ['parka', 'anorak'], 'def': "a kind of heavy jacket (`windcheater' is a British term)", 'name': 'parka'}, {'frequency': 'f', 'id': 779, 'synset': 'parking_meter.n.01', 'synonyms': ['parking_meter'], 'def': 'a coin-operated timer located next to a parking space', 'name': 'parking_meter'}, {'frequency': 'c', 'id': 780, 'synset': 'parrot.n.01', 'synonyms': ['parrot'], 'def': 'usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds', 'name': 'parrot'}, {'frequency': 'c', 'id': 781, 'synset': 'passenger_car.n.01', 'synonyms': ['passenger_car_(part_of_a_train)', 'coach_(part_of_a_train)'], 'def': 'a railcar where passengers ride', 'name': 'passenger_car_(part_of_a_train)'}, {'frequency': 'r', 'id': 782, 'synset': 'passenger_ship.n.01', 'synonyms': ['passenger_ship'], 'def': 'a ship built to carry passengers', 'name': 'passenger_ship'}, {'frequency': 'r', 'id': 783, 'synset': 'passport.n.02', 'synonyms': ['passport'], 'def': 'a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country', 'name': 'passport'}, {'frequency': 'f', 'id': 784, 'synset': 'pastry.n.02', 'synonyms': ['pastry'], 'def': 'any of various baked foods made of dough or batter', 'name': 'pastry'}, {'frequency': 'r', 'id': 785, 'synset': 'patty.n.01', 'synonyms': ['patty_(food)'], 'def': 'small flat mass of chopped food', 'name': 'patty_(food)'}, {'frequency': 'c', 'id': 786, 'synset': 'pea.n.01', 'synonyms': ['pea_(food)'], 'def': 'seed of a pea plant used for food', 'name': 'pea_(food)'}, {'frequency': 'c', 'id': 787, 'synset': 'peach.n.03', 'synonyms': ['peach'], 'def': 'downy juicy fruit with sweet yellowish or whitish flesh', 'name': 'peach'}, {'frequency': 'c', 'id': 788, 'synset': 'peanut_butter.n.01', 'synonyms': ['peanut_butter'], 'def': 'a spread made from ground peanuts', 'name': 'peanut_butter'}, {'frequency': 'c', 'id': 789, 'synset': 'pear.n.01', 'synonyms': ['pear'], 'def': 'sweet juicy gritty-textured fruit available in many varieties', 'name': 'pear'}, {'frequency': 'r', 'id': 790, 'synset': 'peeler.n.03', 'synonyms': ['peeler_(tool_for_fruit_and_vegetables)'], 'def': 'a device for peeling vegetables or fruits', 'name': 'peeler_(tool_for_fruit_and_vegetables)'}, {'frequency': 'r', 'id': 791, 'synset': 'pegboard.n.01', 'synonyms': ['pegboard'], 'def': 'a board perforated with regularly spaced holes into which pegs can be fitted', 'name': 'pegboard'}, {'frequency': 'c', 'id': 792, 'synset': 'pelican.n.01', 'synonyms': ['pelican'], 'def': 'large long-winged warm-water seabird having a large bill with a distensible pouch for fish', 'name': 'pelican'}, {'frequency': 'f', 'id': 793, 'synset': 'pen.n.01', 'synonyms': ['pen'], 'def': 'a writing implement with a point from which ink flows', 'name': 'pen'}, {'frequency': 'c', 'id': 794, 'synset': 'pencil.n.01', 'synonyms': ['pencil'], 'def': 'a thin cylindrical pointed writing implement made of wood and graphite', 'name': 'pencil'}, {'frequency': 'r', 'id': 795, 'synset': 'pencil_box.n.01', 'synonyms': ['pencil_box', 'pencil_case'], 'def': 'a box for holding pencils', 'name': 'pencil_box'}, {'frequency': 'r', 'id': 796, 'synset': 'pencil_sharpener.n.01', 'synonyms': ['pencil_sharpener'], 'def': 'a rotary implement for sharpening the point on pencils', 'name': 'pencil_sharpener'}, {'frequency': 'r', 'id': 797, 'synset': 'pendulum.n.01', 'synonyms': ['pendulum'], 'def': 'an apparatus consisting of an object mounted so that it swings freely under the influence of gravity', 'name': 'pendulum'}, {'frequency': 'c', 'id': 798, 'synset': 'penguin.n.01', 'synonyms': ['penguin'], 'def': 'short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers', 'name': 'penguin'}, {'frequency': 'r', 'id': 799, 'synset': 'pennant.n.02', 'synonyms': ['pennant'], 'def': 'a flag longer than it is wide (and often tapering)', 'name': 'pennant'}, {'frequency': 'r', 'id': 800, 'synset': 'penny.n.02', 'synonyms': ['penny_(coin)'], 'def': 'a coin worth one-hundredth of the value of the basic unit', 'name': 'penny_(coin)'}, {'frequency': 'c', 'id': 801, 'synset': 'pepper.n.03', 'synonyms': ['pepper', 'peppercorn'], 'def': 'pungent seasoning from the berry of the common pepper plant; whole or ground', 'name': 'pepper'}, {'frequency': 'c', 'id': 802, 'synset': 'pepper_mill.n.01', 'synonyms': ['pepper_mill', 'pepper_grinder'], 'def': 'a mill for grinding pepper', 'name': 'pepper_mill'}, {'frequency': 'c', 'id': 803, 'synset': 'perfume.n.02', 'synonyms': ['perfume'], 'def': 'a toiletry that emits and diffuses a fragrant odor', 'name': 'perfume'}, {'frequency': 'r', 'id': 804, 'synset': 'persimmon.n.02', 'synonyms': ['persimmon'], 'def': 'orange fruit resembling a plum; edible when fully ripe', 'name': 'persimmon'}, {'frequency': 'f', 'id': 805, 'synset': 'person.n.01', 'synonyms': ['baby', 'child', 'boy', 'girl', 'man', 'woman', 'person', 'human'], 'def': 'a human being', 'name': 'baby'}, {'frequency': 'r', 'id': 806, 'synset': 'pet.n.01', 'synonyms': ['pet'], 'def': 'a domesticated animal kept for companionship or amusement', 'name': 'pet'}, {'frequency': 'r', 'id': 807, 'synset': 'petfood.n.01', 'synonyms': ['petfood', 'pet-food'], 'def': 'food prepared for animal pets', 'name': 'petfood'}, {'frequency': 'r', 'id': 808, 'synset': 'pew.n.01', 'synonyms': ['pew_(church_bench)', 'church_bench'], 'def': 'long bench with backs; used in church by the congregation', 'name': 'pew_(church_bench)'}, {'frequency': 'r', 'id': 809, 'synset': 'phonebook.n.01', 'synonyms': ['phonebook', 'telephone_book', 'telephone_directory'], 'def': 'a directory containing an alphabetical list of telephone subscribers and their telephone numbers', 'name': 'phonebook'}, {'frequency': 'c', 'id': 810, 'synset': 'phonograph_record.n.01', 'synonyms': ['phonograph_record', 'phonograph_recording', 'record_(phonograph_recording)'], 'def': 'sound recording consisting of a typically black disk with a continuous groove', 'name': 'phonograph_record'}, {'frequency': 'c', 'id': 811, 'synset': 'piano.n.01', 'synonyms': ['piano'], 'def': 'a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds', 'name': 'piano'}, {'frequency': 'f', 'id': 812, 'synset': 'pickle.n.01', 'synonyms': ['pickle'], 'def': 'vegetables (especially cucumbers) preserved in brine or vinegar', 'name': 'pickle'}, {'frequency': 'f', 'id': 813, 'synset': 'pickup.n.01', 'synonyms': ['pickup_truck'], 'def': 'a light truck with an open body and low sides and a tailboard', 'name': 'pickup_truck'}, {'frequency': 'c', 'id': 814, 'synset': 'pie.n.01', 'synonyms': ['pie'], 'def': 'dish baked in pastry-lined pan often with a pastry top', 'name': 'pie'}, {'frequency': 'c', 'id': 815, 'synset': 'pigeon.n.01', 'synonyms': ['pigeon'], 'def': 'wild and domesticated birds having a heavy body and short legs', 'name': 'pigeon'}, {'frequency': 'r', 'id': 816, 'synset': 'piggy_bank.n.01', 'synonyms': ['piggy_bank', 'penny_bank'], 'def': "a child's coin bank (often shaped like a pig)", 'name': 'piggy_bank'}, {'frequency': 'f', 'id': 817, 'synset': 'pillow.n.01', 'synonyms': ['pillow'], 'def': 'a cushion to support the head of a sleeping person', 'name': 'pillow'}, {'frequency': 'r', 'id': 818, 'synset': 'pin.n.09', 'synonyms': ['pin_(non_jewelry)'], 'def': 'a small slender (often pointed) piece of wood or metal used to support or fasten or attach things', 'name': 'pin_(non_jewelry)'}, {'frequency': 'f', 'id': 819, 'synset': 'pineapple.n.02', 'synonyms': ['pineapple'], 'def': 'large sweet fleshy tropical fruit with a tuft of stiff leaves', 'name': 'pineapple'}, {'frequency': 'c', 'id': 820, 'synset': 'pinecone.n.01', 'synonyms': ['pinecone'], 'def': 'the seed-producing cone of a pine tree', 'name': 'pinecone'}, {'frequency': 'r', 'id': 821, 'synset': 'ping-pong_ball.n.01', 'synonyms': ['ping-pong_ball'], 'def': 'light hollow ball used in playing table tennis', 'name': 'ping-pong_ball'}, {'frequency': 'r', 'id': 822, 'synset': 'pinwheel.n.03', 'synonyms': ['pinwheel'], 'def': 'a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind', 'name': 'pinwheel'}, {'frequency': 'r', 'id': 823, 'synset': 'pipe.n.01', 'synonyms': ['tobacco_pipe'], 'def': 'a tube with a small bowl at one end; used for smoking tobacco', 'name': 'tobacco_pipe'}, {'frequency': 'f', 'id': 824, 'synset': 'pipe.n.02', 'synonyms': ['pipe', 'piping'], 'def': 'a long tube made of metal or plastic that is used to carry water or oil or gas etc.', 'name': 'pipe'}, {'frequency': 'r', 'id': 825, 'synset': 'pistol.n.01', 'synonyms': ['pistol', 'handgun'], 'def': 'a firearm that is held and fired with one hand', 'name': 'pistol'}, {'frequency': 'r', 'id': 826, 'synset': 'pita.n.01', 'synonyms': ['pita_(bread)', 'pocket_bread'], 'def': 'usually small round bread that can open into a pocket for filling', 'name': 'pita_(bread)'}, {'frequency': 'f', 'id': 827, 'synset': 'pitcher.n.02', 'synonyms': ['pitcher_(vessel_for_liquid)', 'ewer'], 'def': 'an open vessel with a handle and a spout for pouring', 'name': 'pitcher_(vessel_for_liquid)'}, {'frequency': 'r', 'id': 828, 'synset': 'pitchfork.n.01', 'synonyms': ['pitchfork'], 'def': 'a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay', 'name': 'pitchfork'}, {'frequency': 'f', 'id': 829, 'synset': 'pizza.n.01', 'synonyms': ['pizza'], 'def': 'Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese', 'name': 'pizza'}, {'frequency': 'f', 'id': 830, 'synset': 'place_mat.n.01', 'synonyms': ['place_mat'], 'def': 'a mat placed on a table for an individual place setting', 'name': 'place_mat'}, {'frequency': 'f', 'id': 831, 'synset': 'plate.n.04', 'synonyms': ['plate'], 'def': 'dish on which food is served or from which food is eaten', 'name': 'plate'}, {'frequency': 'c', 'id': 832, 'synset': 'platter.n.01', 'synonyms': ['platter'], 'def': 'a large shallow dish used for serving food', 'name': 'platter'}, {'frequency': 'r', 'id': 833, 'synset': 'playing_card.n.01', 'synonyms': ['playing_card'], 'def': 'one of a pack of cards that are used to play card games', 'name': 'playing_card'}, {'frequency': 'r', 'id': 834, 'synset': 'playpen.n.01', 'synonyms': ['playpen'], 'def': 'a portable enclosure in which babies may be left to play', 'name': 'playpen'}, {'frequency': 'c', 'id': 835, 'synset': 'pliers.n.01', 'synonyms': ['pliers', 'plyers'], 'def': 'a gripping hand tool with two hinged arms and (usually) serrated jaws', 'name': 'pliers'}, {'frequency': 'r', 'id': 836, 'synset': 'plow.n.01', 'synonyms': ['plow_(farm_equipment)', 'plough_(farm_equipment)'], 'def': 'a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing', 'name': 'plow_(farm_equipment)'}, {'frequency': 'r', 'id': 837, 'synset': 'pocket_watch.n.01', 'synonyms': ['pocket_watch'], 'def': 'a watch that is carried in a small watch pocket', 'name': 'pocket_watch'}, {'frequency': 'c', 'id': 838, 'synset': 'pocketknife.n.01', 'synonyms': ['pocketknife'], 'def': 'a knife with a blade that folds into the handle; suitable for carrying in the pocket', 'name': 'pocketknife'}, {'frequency': 'c', 'id': 839, 'synset': 'poker.n.01', 'synonyms': ['poker_(fire_stirring_tool)', 'stove_poker', 'fire_hook'], 'def': 'fire iron consisting of a metal rod with a handle; used to stir a fire', 'name': 'poker_(fire_stirring_tool)'}, {'frequency': 'f', 'id': 840, 'synset': 'pole.n.01', 'synonyms': ['pole', 'post'], 'def': 'a long (usually round) rod of wood or metal or plastic', 'name': 'pole'}, {'frequency': 'r', 'id': 841, 'synset': 'police_van.n.01', 'synonyms': ['police_van', 'police_wagon', 'paddy_wagon', 'patrol_wagon'], 'def': 'van used by police to transport prisoners', 'name': 'police_van'}, {'frequency': 'f', 'id': 842, 'synset': 'polo_shirt.n.01', 'synonyms': ['polo_shirt', 'sport_shirt'], 'def': 'a shirt with short sleeves designed for comfort and casual wear', 'name': 'polo_shirt'}, {'frequency': 'r', 'id': 843, 'synset': 'poncho.n.01', 'synonyms': ['poncho'], 'def': 'a blanket-like cloak with a hole in the center for the head', 'name': 'poncho'}, {'frequency': 'c', 'id': 844, 'synset': 'pony.n.05', 'synonyms': ['pony'], 'def': 'any of various breeds of small gentle horses usually less than five feet high at the shoulder', 'name': 'pony'}, {'frequency': 'r', 'id': 845, 'synset': 'pool_table.n.01', 'synonyms': ['pool_table', 'billiard_table', 'snooker_table'], 'def': 'game equipment consisting of a heavy table on which pool is played', 'name': 'pool_table'}, {'frequency': 'f', 'id': 846, 'synset': 'pop.n.02', 'synonyms': ['pop_(soda)', 'soda_(pop)', 'tonic', 'soft_drink'], 'def': 'a sweet drink containing carbonated water and flavoring', 'name': 'pop_(soda)'}, {'frequency': 'r', 'id': 847, 'synset': 'portrait.n.02', 'synonyms': ['portrait', 'portrayal'], 'def': 'any likeness of a person, in any medium', 'name': 'portrait'}, {'frequency': 'c', 'id': 848, 'synset': 'postbox.n.01', 'synonyms': ['postbox_(public)', 'mailbox_(public)'], 'def': 'public box for deposit of mail', 'name': 'postbox_(public)'}, {'frequency': 'c', 'id': 849, 'synset': 'postcard.n.01', 'synonyms': ['postcard', 'postal_card', 'mailing-card'], 'def': 'a card for sending messages by post without an envelope', 'name': 'postcard'}, {'frequency': 'f', 'id': 850, 'synset': 'poster.n.01', 'synonyms': ['poster', 'placard'], 'def': 'a sign posted in a public place as an advertisement', 'name': 'poster'}, {'frequency': 'f', 'id': 851, 'synset': 'pot.n.01', 'synonyms': ['pot'], 'def': 'metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid', 'name': 'pot'}, {'frequency': 'f', 'id': 852, 'synset': 'pot.n.04', 'synonyms': ['flowerpot'], 'def': 'a container in which plants are cultivated', 'name': 'flowerpot'}, {'frequency': 'f', 'id': 853, 'synset': 'potato.n.01', 'synonyms': ['potato'], 'def': 'an edible tuber native to South America', 'name': 'potato'}, {'frequency': 'c', 'id': 854, 'synset': 'potholder.n.01', 'synonyms': ['potholder'], 'def': 'an insulated pad for holding hot pots', 'name': 'potholder'}, {'frequency': 'c', 'id': 855, 'synset': 'pottery.n.01', 'synonyms': ['pottery', 'clayware'], 'def': 'ceramic ware made from clay and baked in a kiln', 'name': 'pottery'}, {'frequency': 'c', 'id': 856, 'synset': 'pouch.n.01', 'synonyms': ['pouch'], 'def': 'a small or medium size container for holding or carrying things', 'name': 'pouch'}, {'frequency': 'r', 'id': 857, 'synset': 'power_shovel.n.01', 'synonyms': ['power_shovel', 'excavator', 'digger'], 'def': 'a machine for excavating', 'name': 'power_shovel'}, {'frequency': 'c', 'id': 858, 'synset': 'prawn.n.01', 'synonyms': ['prawn', 'shrimp'], 'def': 'any of various edible decapod crustaceans', 'name': 'prawn'}, {'frequency': 'f', 'id': 859, 'synset': 'printer.n.03', 'synonyms': ['printer', 'printing_machine'], 'def': 'a machine that prints', 'name': 'printer'}, {'frequency': 'c', 'id': 860, 'synset': 'projectile.n.01', 'synonyms': ['projectile_(weapon)', 'missile'], 'def': 'a weapon that is forcibly thrown or projected at a targets', 'name': 'projectile_(weapon)'}, {'frequency': 'c', 'id': 861, 'synset': 'projector.n.02', 'synonyms': ['projector'], 'def': 'an optical instrument that projects an enlarged image onto a screen', 'name': 'projector'}, {'frequency': 'f', 'id': 862, 'synset': 'propeller.n.01', 'synonyms': ['propeller', 'propellor'], 'def': 'a mechanical device that rotates to push against air or water', 'name': 'propeller'}, {'frequency': 'r', 'id': 863, 'synset': 'prune.n.01', 'synonyms': ['prune'], 'def': 'dried plum', 'name': 'prune'}, {'frequency': 'r', 'id': 864, 'synset': 'pudding.n.01', 'synonyms': ['pudding'], 'def': 'any of various soft thick unsweetened baked dishes', 'name': 'pudding'}, {'frequency': 'r', 'id': 865, 'synset': 'puffer.n.02', 'synonyms': ['puffer_(fish)', 'pufferfish', 'blowfish', 'globefish'], 'def': 'fishes whose elongated spiny body can inflate itself with water or air to form a globe', 'name': 'puffer_(fish)'}, {'frequency': 'r', 'id': 866, 'synset': 'puffin.n.01', 'synonyms': ['puffin'], 'def': 'seabirds having short necks and brightly colored compressed bills', 'name': 'puffin'}, {'frequency': 'r', 'id': 867, 'synset': 'pug.n.01', 'synonyms': ['pug-dog'], 'def': 'small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle', 'name': 'pug-dog'}, {'frequency': 'c', 'id': 868, 'synset': 'pumpkin.n.02', 'synonyms': ['pumpkin'], 'def': 'usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn', 'name': 'pumpkin'}, {'frequency': 'r', 'id': 869, 'synset': 'punch.n.03', 'synonyms': ['puncher'], 'def': 'a tool for making holes or indentations', 'name': 'puncher'}, {'frequency': 'r', 'id': 870, 'synset': 'puppet.n.01', 'synonyms': ['puppet', 'marionette'], 'def': 'a small figure of a person operated from above with strings by a puppeteer', 'name': 'puppet'}, {'frequency': 'r', 'id': 871, 'synset': 'puppy.n.01', 'synonyms': ['puppy'], 'def': 'a young dog', 'name': 'puppy'}, {'frequency': 'r', 'id': 872, 'synset': 'quesadilla.n.01', 'synonyms': ['quesadilla'], 'def': 'a tortilla that is filled with cheese and heated', 'name': 'quesadilla'}, {'frequency': 'r', 'id': 873, 'synset': 'quiche.n.02', 'synonyms': ['quiche'], 'def': 'a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)', 'name': 'quiche'}, {'frequency': 'f', 'id': 874, 'synset': 'quilt.n.01', 'synonyms': ['quilt', 'comforter'], 'def': 'bedding made of two layers of cloth filled with stuffing and stitched together', 'name': 'quilt'}, {'frequency': 'c', 'id': 875, 'synset': 'rabbit.n.01', 'synonyms': ['rabbit'], 'def': 'any of various burrowing animals of the family Leporidae having long ears and short tails', 'name': 'rabbit'}, {'frequency': 'r', 'id': 876, 'synset': 'racer.n.02', 'synonyms': ['race_car', 'racing_car'], 'def': 'a fast car that competes in races', 'name': 'race_car'}, {'frequency': 'c', 'id': 877, 'synset': 'racket.n.04', 'synonyms': ['racket', 'racquet'], 'def': 'a sports implement used to strike a ball in various games', 'name': 'racket'}, {'frequency': 'r', 'id': 878, 'synset': 'radar.n.01', 'synonyms': ['radar'], 'def': 'measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects', 'name': 'radar'}, {'frequency': 'c', 'id': 879, 'synset': 'radiator.n.03', 'synonyms': ['radiator'], 'def': 'a mechanism consisting of a metal honeycomb through which hot fluids circulate', 'name': 'radiator'}, {'frequency': 'c', 'id': 880, 'synset': 'radio_receiver.n.01', 'synonyms': ['radio_receiver', 'radio_set', 'radio', 'tuner_(radio)'], 'def': 'an electronic receiver that detects and demodulates and amplifies transmitted radio signals', 'name': 'radio_receiver'}, {'frequency': 'c', 'id': 881, 'synset': 'radish.n.03', 'synonyms': ['radish', 'daikon'], 'def': 'pungent edible root of any of various cultivated radish plants', 'name': 'radish'}, {'frequency': 'c', 'id': 882, 'synset': 'raft.n.01', 'synonyms': ['raft'], 'def': 'a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers', 'name': 'raft'}, {'frequency': 'r', 'id': 883, 'synset': 'rag_doll.n.01', 'synonyms': ['rag_doll'], 'def': 'a cloth doll that is stuffed and (usually) painted', 'name': 'rag_doll'}, {'frequency': 'c', 'id': 884, 'synset': 'raincoat.n.01', 'synonyms': ['raincoat', 'waterproof_jacket'], 'def': 'a water-resistant coat', 'name': 'raincoat'}, {'frequency': 'c', 'id': 885, 'synset': 'ram.n.05', 'synonyms': ['ram_(animal)'], 'def': 'uncastrated adult male sheep', 'name': 'ram_(animal)'}, {'frequency': 'c', 'id': 886, 'synset': 'raspberry.n.02', 'synonyms': ['raspberry'], 'def': 'red or black edible aggregate berries usually smaller than the related blackberries', 'name': 'raspberry'}, {'frequency': 'r', 'id': 887, 'synset': 'rat.n.01', 'synonyms': ['rat'], 'def': 'any of various long-tailed rodents similar to but larger than a mouse', 'name': 'rat'}, {'frequency': 'c', 'id': 888, 'synset': 'razorblade.n.01', 'synonyms': ['razorblade'], 'def': 'a blade that has very sharp edge', 'name': 'razorblade'}, {'frequency': 'c', 'id': 889, 'synset': 'reamer.n.01', 'synonyms': ['reamer_(juicer)', 'juicer', 'juice_reamer'], 'def': 'a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit', 'name': 'reamer_(juicer)'}, {'frequency': 'f', 'id': 890, 'synset': 'rearview_mirror.n.01', 'synonyms': ['rearview_mirror'], 'def': 'car mirror that reflects the view out of the rear window', 'name': 'rearview_mirror'}, {'frequency': 'c', 'id': 891, 'synset': 'receipt.n.02', 'synonyms': ['receipt'], 'def': 'an acknowledgment (usually tangible) that payment has been made', 'name': 'receipt'}, {'frequency': 'c', 'id': 892, 'synset': 'recliner.n.01', 'synonyms': ['recliner', 'reclining_chair', 'lounger_(chair)'], 'def': 'an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it', 'name': 'recliner'}, {'frequency': 'r', 'id': 893, 'synset': 'record_player.n.01', 'synonyms': ['record_player', 'phonograph_(record_player)', 'turntable'], 'def': 'machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically', 'name': 'record_player'}, {'frequency': 'r', 'id': 894, 'synset': 'red_cabbage.n.02', 'synonyms': ['red_cabbage'], 'def': 'compact head of purplish-red leaves', 'name': 'red_cabbage'}, {'frequency': 'f', 'id': 895, 'synset': 'reflector.n.01', 'synonyms': ['reflector'], 'def': 'device that reflects light, radiation, etc.', 'name': 'reflector'}, {'frequency': 'f', 'id': 896, 'synset': 'remote_control.n.01', 'synonyms': ['remote_control'], 'def': 'a device that can be used to control a machine or apparatus from a distance', 'name': 'remote_control'}, {'frequency': 'c', 'id': 897, 'synset': 'rhinoceros.n.01', 'synonyms': ['rhinoceros'], 'def': 'massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout', 'name': 'rhinoceros'}, {'frequency': 'r', 'id': 898, 'synset': 'rib.n.03', 'synonyms': ['rib_(food)'], 'def': 'cut of meat including one or more ribs', 'name': 'rib_(food)'}, {'frequency': 'r', 'id': 899, 'synset': 'rifle.n.01', 'synonyms': ['rifle'], 'def': 'a shoulder firearm with a long barrel', 'name': 'rifle'}, {'frequency': 'f', 'id': 900, 'synset': 'ring.n.08', 'synonyms': ['ring'], 'def': 'jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger', 'name': 'ring'}, {'frequency': 'r', 'id': 901, 'synset': 'river_boat.n.01', 'synonyms': ['river_boat'], 'def': 'a boat used on rivers or to ply a river', 'name': 'river_boat'}, {'frequency': 'r', 'id': 902, 'synset': 'road_map.n.02', 'synonyms': ['road_map'], 'def': '(NOT A ROAD) a MAP showing roads (for automobile travel)', 'name': 'road_map'}, {'frequency': 'c', 'id': 903, 'synset': 'robe.n.01', 'synonyms': ['robe'], 'def': 'any loose flowing garment', 'name': 'robe'}, {'frequency': 'c', 'id': 904, 'synset': 'rocking_chair.n.01', 'synonyms': ['rocking_chair'], 'def': 'a chair mounted on rockers', 'name': 'rocking_chair'}, {'frequency': 'r', 'id': 905, 'synset': 'roller_skate.n.01', 'synonyms': ['roller_skate'], 'def': 'a shoe with pairs of rollers (small hard wheels) fixed to the sole', 'name': 'roller_skate'}, {'frequency': 'r', 'id': 906, 'synset': 'rollerblade.n.01', 'synonyms': ['Rollerblade'], 'def': 'an in-line variant of a roller skate', 'name': 'Rollerblade'}, {'frequency': 'c', 'id': 907, 'synset': 'rolling_pin.n.01', 'synonyms': ['rolling_pin'], 'def': 'utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough', 'name': 'rolling_pin'}, {'frequency': 'r', 'id': 908, 'synset': 'root_beer.n.01', 'synonyms': ['root_beer'], 'def': 'carbonated drink containing extracts of roots and herbs', 'name': 'root_beer'}, {'frequency': 'c', 'id': 909, 'synset': 'router.n.02', 'synonyms': ['router_(computer_equipment)'], 'def': 'a device that forwards data packets between computer networks', 'name': 'router_(computer_equipment)'}, {'frequency': 'f', 'id': 910, 'synset': 'rubber_band.n.01', 'synonyms': ['rubber_band', 'elastic_band'], 'def': 'a narrow band of elastic rubber used to hold things (such as papers) together', 'name': 'rubber_band'}, {'frequency': 'c', 'id': 911, 'synset': 'runner.n.08', 'synonyms': ['runner_(carpet)'], 'def': 'a long narrow carpet', 'name': 'runner_(carpet)'}, {'frequency': 'f', 'id': 912, 'synset': 'sack.n.01', 'synonyms': ['plastic_bag', 'paper_bag'], 'def': "a bag made of paper or plastic for holding customer's purchases", 'name': 'plastic_bag'}, {'frequency': 'f', 'id': 913, 'synset': 'saddle.n.01', 'synonyms': ['saddle_(on_an_animal)'], 'def': 'a seat for the rider of a horse or camel', 'name': 'saddle_(on_an_animal)'}, {'frequency': 'f', 'id': 914, 'synset': 'saddle_blanket.n.01', 'synonyms': ['saddle_blanket', 'saddlecloth', 'horse_blanket'], 'def': 'stable gear consisting of a blanket placed under the saddle', 'name': 'saddle_blanket'}, {'frequency': 'c', 'id': 915, 'synset': 'saddlebag.n.01', 'synonyms': ['saddlebag'], 'def': 'a large bag (or pair of bags) hung over a saddle', 'name': 'saddlebag'}, {'frequency': 'r', 'id': 916, 'synset': 'safety_pin.n.01', 'synonyms': ['safety_pin'], 'def': 'a pin in the form of a clasp; has a guard so the point of the pin will not stick the user', 'name': 'safety_pin'}, {'frequency': 'c', 'id': 917, 'synset': 'sail.n.01', 'synonyms': ['sail'], 'def': 'a large piece of fabric by means of which wind is used to propel a sailing vessel', 'name': 'sail'}, {'frequency': 'c', 'id': 918, 'synset': 'salad.n.01', 'synonyms': ['salad'], 'def': 'food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens', 'name': 'salad'}, {'frequency': 'r', 'id': 919, 'synset': 'salad_plate.n.01', 'synonyms': ['salad_plate', 'salad_bowl'], 'def': 'a plate or bowl for individual servings of salad', 'name': 'salad_plate'}, {'frequency': 'r', 'id': 920, 'synset': 'salami.n.01', 'synonyms': ['salami'], 'def': 'highly seasoned fatty sausage of pork and beef usually dried', 'name': 'salami'}, {'frequency': 'r', 'id': 921, 'synset': 'salmon.n.01', 'synonyms': ['salmon_(fish)'], 'def': 'any of various large food and game fishes of northern waters', 'name': 'salmon_(fish)'}, {'frequency': 'r', 'id': 922, 'synset': 'salmon.n.03', 'synonyms': ['salmon_(food)'], 'def': 'flesh of any of various marine or freshwater fish of the family Salmonidae', 'name': 'salmon_(food)'}, {'frequency': 'r', 'id': 923, 'synset': 'salsa.n.01', 'synonyms': ['salsa'], 'def': 'spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods', 'name': 'salsa'}, {'frequency': 'f', 'id': 924, 'synset': 'saltshaker.n.01', 'synonyms': ['saltshaker'], 'def': 'a shaker with a perforated top for sprinkling salt', 'name': 'saltshaker'}, {'frequency': 'f', 'id': 925, 'synset': 'sandal.n.01', 'synonyms': ['sandal_(type_of_shoe)'], 'def': 'a shoe consisting of a sole fastened by straps to the foot', 'name': 'sandal_(type_of_shoe)'}, {'frequency': 'f', 'id': 926, 'synset': 'sandwich.n.01', 'synonyms': ['sandwich'], 'def': 'two (or more) slices of bread with a filling between them', 'name': 'sandwich'}, {'frequency': 'r', 'id': 927, 'synset': 'satchel.n.01', 'synonyms': ['satchel'], 'def': 'luggage consisting of a small case with a flat bottom and (usually) a shoulder strap', 'name': 'satchel'}, {'frequency': 'r', 'id': 928, 'synset': 'saucepan.n.01', 'synonyms': ['saucepan'], 'def': 'a deep pan with a handle; used for stewing or boiling', 'name': 'saucepan'}, {'frequency': 'f', 'id': 929, 'synset': 'saucer.n.02', 'synonyms': ['saucer'], 'def': 'a small shallow dish for holding a cup at the table', 'name': 'saucer'}, {'frequency': 'f', 'id': 930, 'synset': 'sausage.n.01', 'synonyms': ['sausage'], 'def': 'highly seasoned minced meat stuffed in casings', 'name': 'sausage'}, {'frequency': 'r', 'id': 931, 'synset': 'sawhorse.n.01', 'synonyms': ['sawhorse', 'sawbuck'], 'def': 'a framework for holding wood that is being sawed', 'name': 'sawhorse'}, {'frequency': 'r', 'id': 932, 'synset': 'sax.n.02', 'synonyms': ['saxophone'], 'def': "a wind instrument with a `J'-shaped form typically made of brass", 'name': 'saxophone'}, {'frequency': 'f', 'id': 933, 'synset': 'scale.n.07', 'synonyms': ['scale_(measuring_instrument)'], 'def': 'a measuring instrument for weighing; shows amount of mass', 'name': 'scale_(measuring_instrument)'}, {'frequency': 'r', 'id': 934, 'synset': 'scarecrow.n.01', 'synonyms': ['scarecrow', 'strawman'], 'def': 'an effigy in the shape of a man to frighten birds away from seeds', 'name': 'scarecrow'}, {'frequency': 'f', 'id': 935, 'synset': 'scarf.n.01', 'synonyms': ['scarf'], 'def': 'a garment worn around the head or neck or shoulders for warmth or decoration', 'name': 'scarf'}, {'frequency': 'c', 'id': 936, 'synset': 'school_bus.n.01', 'synonyms': ['school_bus'], 'def': 'a bus used to transport children to or from school', 'name': 'school_bus'}, {'frequency': 'f', 'id': 937, 'synset': 'scissors.n.01', 'synonyms': ['scissors'], 'def': 'a tool having two crossed pivoting blades with looped handles', 'name': 'scissors'}, {'frequency': 'c', 'id': 938, 'synset': 'scoreboard.n.01', 'synonyms': ['scoreboard'], 'def': 'a large board for displaying the score of a contest (and some other information)', 'name': 'scoreboard'}, {'frequency': 'c', 'id': 939, 'synset': 'scrambled_eggs.n.01', 'synonyms': ['scrambled_eggs'], 'def': 'eggs beaten and cooked to a soft firm consistency while stirring', 'name': 'scrambled_eggs'}, {'frequency': 'r', 'id': 940, 'synset': 'scraper.n.01', 'synonyms': ['scraper'], 'def': 'any of various hand tools for scraping', 'name': 'scraper'}, {'frequency': 'r', 'id': 941, 'synset': 'scratcher.n.03', 'synonyms': ['scratcher'], 'def': 'a device used for scratching', 'name': 'scratcher'}, {'frequency': 'c', 'id': 942, 'synset': 'screwdriver.n.01', 'synonyms': ['screwdriver'], 'def': 'a hand tool for driving screws; has a tip that fits into the head of a screw', 'name': 'screwdriver'}, {'frequency': 'c', 'id': 943, 'synset': 'scrub_brush.n.01', 'synonyms': ['scrubbing_brush'], 'def': 'a brush with short stiff bristles for heavy cleaning', 'name': 'scrubbing_brush'}, {'frequency': 'c', 'id': 944, 'synset': 'sculpture.n.01', 'synonyms': ['sculpture'], 'def': 'a three-dimensional work of art', 'name': 'sculpture'}, {'frequency': 'r', 'id': 945, 'synset': 'seabird.n.01', 'synonyms': ['seabird', 'seafowl'], 'def': 'a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.', 'name': 'seabird'}, {'frequency': 'r', 'id': 946, 'synset': 'seahorse.n.02', 'synonyms': ['seahorse'], 'def': 'small fish with horse-like heads bent sharply downward and curled tails', 'name': 'seahorse'}, {'frequency': 'r', 'id': 947, 'synset': 'seaplane.n.01', 'synonyms': ['seaplane', 'hydroplane'], 'def': 'an airplane that can land on or take off from water', 'name': 'seaplane'}, {'frequency': 'c', 'id': 948, 'synset': 'seashell.n.01', 'synonyms': ['seashell'], 'def': 'the shell of a marine organism', 'name': 'seashell'}, {'frequency': 'r', 'id': 949, 'synset': 'seedling.n.01', 'synonyms': ['seedling'], 'def': 'young plant or tree grown from a seed', 'name': 'seedling'}, {'frequency': 'c', 'id': 950, 'synset': 'serving_dish.n.01', 'synonyms': ['serving_dish'], 'def': 'a dish used for serving food', 'name': 'serving_dish'}, {'frequency': 'r', 'id': 951, 'synset': 'sewing_machine.n.01', 'synonyms': ['sewing_machine'], 'def': 'a textile machine used as a home appliance for sewing', 'name': 'sewing_machine'}, {'frequency': 'r', 'id': 952, 'synset': 'shaker.n.03', 'synonyms': ['shaker'], 'def': 'a container in which something can be shaken', 'name': 'shaker'}, {'frequency': 'c', 'id': 953, 'synset': 'shampoo.n.01', 'synonyms': ['shampoo'], 'def': 'cleansing agent consisting of soaps or detergents used for washing the hair', 'name': 'shampoo'}, {'frequency': 'r', 'id': 954, 'synset': 'shark.n.01', 'synonyms': ['shark'], 'def': 'typically large carnivorous fishes with sharpe teeth', 'name': 'shark'}, {'frequency': 'r', 'id': 955, 'synset': 'sharpener.n.01', 'synonyms': ['sharpener'], 'def': 'any implement that is used to make something (an edge or a point) sharper', 'name': 'sharpener'}, {'frequency': 'r', 'id': 956, 'synset': 'sharpie.n.03', 'synonyms': ['Sharpie'], 'def': 'a pen with indelible ink that will write on any surface', 'name': 'Sharpie'}, {'frequency': 'r', 'id': 957, 'synset': 'shaver.n.03', 'synonyms': ['shaver_(electric)', 'electric_shaver', 'electric_razor'], 'def': 'a razor powered by an electric motor', 'name': 'shaver_(electric)'}, {'frequency': 'c', 'id': 958, 'synset': 'shaving_cream.n.01', 'synonyms': ['shaving_cream', 'shaving_soap'], 'def': 'toiletry consisting that forms a rich lather for softening the beard before shaving', 'name': 'shaving_cream'}, {'frequency': 'r', 'id': 959, 'synset': 'shawl.n.01', 'synonyms': ['shawl'], 'def': 'cloak consisting of an oblong piece of cloth used to cover the head and shoulders', 'name': 'shawl'}, {'frequency': 'r', 'id': 960, 'synset': 'shears.n.01', 'synonyms': ['shears'], 'def': 'large scissors with strong blades', 'name': 'shears'}, {'frequency': 'f', 'id': 961, 'synset': 'sheep.n.01', 'synonyms': ['sheep'], 'def': 'woolly usually horned ruminant mammal related to the goat', 'name': 'sheep'}, {'frequency': 'r', 'id': 962, 'synset': 'shepherd_dog.n.01', 'synonyms': ['shepherd_dog', 'sheepdog'], 'def': 'any of various usually long-haired breeds of dog reared to herd and guard sheep', 'name': 'shepherd_dog'}, {'frequency': 'r', 'id': 963, 'synset': 'sherbert.n.01', 'synonyms': ['sherbert', 'sherbet'], 'def': 'a frozen dessert made primarily of fruit juice and sugar', 'name': 'sherbert'}, {'frequency': 'r', 'id': 964, 'synset': 'shield.n.02', 'synonyms': ['shield'], 'def': 'armor carried on the arm to intercept blows', 'name': 'shield'}, {'frequency': 'f', 'id': 965, 'synset': 'shirt.n.01', 'synonyms': ['shirt'], 'def': 'a garment worn on the upper half of the body', 'name': 'shirt'}, {'frequency': 'f', 'id': 966, 'synset': 'shoe.n.01', 'synonyms': ['shoe', 'sneaker_(type_of_shoe)', 'tennis_shoe'], 'def': 'common footwear covering the foot', 'name': 'shoe'}, {'frequency': 'c', 'id': 967, 'synset': 'shopping_bag.n.01', 'synonyms': ['shopping_bag'], 'def': 'a bag made of plastic or strong paper (often with handles); used to transport goods after shopping', 'name': 'shopping_bag'}, {'frequency': 'c', 'id': 968, 'synset': 'shopping_cart.n.01', 'synonyms': ['shopping_cart'], 'def': 'a handcart that holds groceries or other goods while shopping', 'name': 'shopping_cart'}, {'frequency': 'f', 'id': 969, 'synset': 'short_pants.n.01', 'synonyms': ['short_pants', 'shorts_(clothing)', 'trunks_(clothing)'], 'def': 'trousers that end at or above the knee', 'name': 'short_pants'}, {'frequency': 'r', 'id': 970, 'synset': 'shot_glass.n.01', 'synonyms': ['shot_glass'], 'def': 'a small glass adequate to hold a single swallow of whiskey', 'name': 'shot_glass'}, {'frequency': 'c', 'id': 971, 'synset': 'shoulder_bag.n.01', 'synonyms': ['shoulder_bag'], 'def': 'a large handbag that can be carried by a strap looped over the shoulder', 'name': 'shoulder_bag'}, {'frequency': 'c', 'id': 972, 'synset': 'shovel.n.01', 'synonyms': ['shovel'], 'def': 'a hand tool for lifting loose material such as snow, dirt, etc.', 'name': 'shovel'}, {'frequency': 'f', 'id': 973, 'synset': 'shower.n.01', 'synonyms': ['shower_head'], 'def': 'a plumbing fixture that sprays water over you', 'name': 'shower_head'}, {'frequency': 'f', 'id': 974, 'synset': 'shower_curtain.n.01', 'synonyms': ['shower_curtain'], 'def': 'a curtain that keeps water from splashing out of the shower area', 'name': 'shower_curtain'}, {'frequency': 'r', 'id': 975, 'synset': 'shredder.n.01', 'synonyms': ['shredder_(for_paper)'], 'def': 'a device that shreds documents', 'name': 'shredder_(for_paper)'}, {'frequency': 'r', 'id': 976, 'synset': 'sieve.n.01', 'synonyms': ['sieve', 'screen_(sieve)'], 'def': 'a strainer for separating lumps from powdered material or grading particles', 'name': 'sieve'}, {'frequency': 'f', 'id': 977, 'synset': 'signboard.n.01', 'synonyms': ['signboard'], 'def': 'structure displaying a board on which advertisements can be posted', 'name': 'signboard'}, {'frequency': 'c', 'id': 978, 'synset': 'silo.n.01', 'synonyms': ['silo'], 'def': 'a cylindrical tower used for storing goods', 'name': 'silo'}, {'frequency': 'f', 'id': 979, 'synset': 'sink.n.01', 'synonyms': ['sink'], 'def': 'plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe', 'name': 'sink'}, {'frequency': 'f', 'id': 980, 'synset': 'skateboard.n.01', 'synonyms': ['skateboard'], 'def': 'a board with wheels that is ridden in a standing or crouching position and propelled by foot', 'name': 'skateboard'}, {'frequency': 'c', 'id': 981, 'synset': 'skewer.n.01', 'synonyms': ['skewer'], 'def': 'a long pin for holding meat in position while it is being roasted', 'name': 'skewer'}, {'frequency': 'f', 'id': 982, 'synset': 'ski.n.01', 'synonyms': ['ski'], 'def': 'sports equipment for skiing on snow', 'name': 'ski'}, {'frequency': 'f', 'id': 983, 'synset': 'ski_boot.n.01', 'synonyms': ['ski_boot'], 'def': 'a stiff boot that is fastened to a ski with a ski binding', 'name': 'ski_boot'}, {'frequency': 'f', 'id': 984, 'synset': 'ski_parka.n.01', 'synonyms': ['ski_parka', 'ski_jacket'], 'def': 'a parka to be worn while skiing', 'name': 'ski_parka'}, {'frequency': 'f', 'id': 985, 'synset': 'ski_pole.n.01', 'synonyms': ['ski_pole'], 'def': 'a pole with metal points used as an aid in skiing', 'name': 'ski_pole'}, {'frequency': 'f', 'id': 986, 'synset': 'skirt.n.02', 'synonyms': ['skirt'], 'def': 'a garment hanging from the waist; worn mainly by girls and women', 'name': 'skirt'}, {'frequency': 'c', 'id': 987, 'synset': 'sled.n.01', 'synonyms': ['sled', 'sledge', 'sleigh'], 'def': 'a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.', 'name': 'sled'}, {'frequency': 'c', 'id': 988, 'synset': 'sleeping_bag.n.01', 'synonyms': ['sleeping_bag'], 'def': 'large padded bag designed to be slept in outdoors', 'name': 'sleeping_bag'}, {'frequency': 'r', 'id': 989, 'synset': 'sling.n.05', 'synonyms': ['sling_(bandage)', 'triangular_bandage'], 'def': 'bandage to support an injured forearm; slung over the shoulder or neck', 'name': 'sling_(bandage)'}, {'frequency': 'c', 'id': 990, 'synset': 'slipper.n.01', 'synonyms': ['slipper_(footwear)', 'carpet_slipper_(footwear)'], 'def': 'low footwear that can be slipped on and off easily; usually worn indoors', 'name': 'slipper_(footwear)'}, {'frequency': 'r', 'id': 991, 'synset': 'smoothie.n.02', 'synonyms': ['smoothie'], 'def': 'a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk', 'name': 'smoothie'}, {'frequency': 'r', 'id': 992, 'synset': 'snake.n.01', 'synonyms': ['snake', 'serpent'], 'def': 'limbless scaly elongate reptile; some are venomous', 'name': 'snake'}, {'frequency': 'f', 'id': 993, 'synset': 'snowboard.n.01', 'synonyms': ['snowboard'], 'def': 'a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes', 'name': 'snowboard'}, {'frequency': 'c', 'id': 994, 'synset': 'snowman.n.01', 'synonyms': ['snowman'], 'def': 'a figure of a person made of packed snow', 'name': 'snowman'}, {'frequency': 'c', 'id': 995, 'synset': 'snowmobile.n.01', 'synonyms': ['snowmobile'], 'def': 'tracked vehicle for travel on snow having skis in front', 'name': 'snowmobile'}, {'frequency': 'f', 'id': 996, 'synset': 'soap.n.01', 'synonyms': ['soap'], 'def': 'a cleansing agent made from the salts of vegetable or animal fats', 'name': 'soap'}, {'frequency': 'f', 'id': 997, 'synset': 'soccer_ball.n.01', 'synonyms': ['soccer_ball'], 'def': "an inflated ball used in playing soccer (called `football' outside of the United States)", 'name': 'soccer_ball'}, {'frequency': 'f', 'id': 998, 'synset': 'sock.n.01', 'synonyms': ['sock'], 'def': 'cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee', 'name': 'sock'}, {'frequency': 'r', 'id': 999, 'synset': 'soda_fountain.n.02', 'synonyms': ['soda_fountain'], 'def': 'an apparatus for dispensing soda water', 'name': 'soda_fountain'}, {'frequency': 'r', 'id': 1000, 'synset': 'soda_water.n.01', 'synonyms': ['carbonated_water', 'club_soda', 'seltzer', 'sparkling_water'], 'def': 'effervescent beverage artificially charged with carbon dioxide', 'name': 'carbonated_water'}, {'frequency': 'f', 'id': 1001, 'synset': 'sofa.n.01', 'synonyms': ['sofa', 'couch', 'lounge'], 'def': 'an upholstered seat for more than one person', 'name': 'sofa'}, {'frequency': 'r', 'id': 1002, 'synset': 'softball.n.01', 'synonyms': ['softball'], 'def': 'ball used in playing softball', 'name': 'softball'}, {'frequency': 'c', 'id': 1003, 'synset': 'solar_array.n.01', 'synonyms': ['solar_array', 'solar_battery', 'solar_panel'], 'def': 'electrical device consisting of a large array of connected solar cells', 'name': 'solar_array'}, {'frequency': 'r', 'id': 1004, 'synset': 'sombrero.n.02', 'synonyms': ['sombrero'], 'def': 'a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico', 'name': 'sombrero'}, {'frequency': 'c', 'id': 1005, 'synset': 'soup.n.01', 'synonyms': ['soup'], 'def': 'liquid food especially of meat or fish or vegetable stock often containing pieces of solid food', 'name': 'soup'}, {'frequency': 'r', 'id': 1006, 'synset': 'soup_bowl.n.01', 'synonyms': ['soup_bowl'], 'def': 'a bowl for serving soup', 'name': 'soup_bowl'}, {'frequency': 'c', 'id': 1007, 'synset': 'soupspoon.n.01', 'synonyms': ['soupspoon'], 'def': 'a spoon with a rounded bowl for eating soup', 'name': 'soupspoon'}, {'frequency': 'c', 'id': 1008, 'synset': 'sour_cream.n.01', 'synonyms': ['sour_cream', 'soured_cream'], 'def': 'soured light cream', 'name': 'sour_cream'}, {'frequency': 'r', 'id': 1009, 'synset': 'soya_milk.n.01', 'synonyms': ['soya_milk', 'soybean_milk', 'soymilk'], 'def': 'a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu', 'name': 'soya_milk'}, {'frequency': 'r', 'id': 1010, 'synset': 'space_shuttle.n.01', 'synonyms': ['space_shuttle'], 'def': "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", 'name': 'space_shuttle'}, {'frequency': 'r', 'id': 1011, 'synset': 'sparkler.n.02', 'synonyms': ['sparkler_(fireworks)'], 'def': 'a firework that burns slowly and throws out a shower of sparks', 'name': 'sparkler_(fireworks)'}, {'frequency': 'f', 'id': 1012, 'synset': 'spatula.n.02', 'synonyms': ['spatula'], 'def': 'a hand tool with a thin flexible blade used to mix or spread soft substances', 'name': 'spatula'}, {'frequency': 'r', 'id': 1013, 'synset': 'spear.n.01', 'synonyms': ['spear', 'lance'], 'def': 'a long pointed rod used as a tool or weapon', 'name': 'spear'}, {'frequency': 'f', 'id': 1014, 'synset': 'spectacles.n.01', 'synonyms': ['spectacles', 'specs', 'eyeglasses', 'glasses'], 'def': 'optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision', 'name': 'spectacles'}, {'frequency': 'c', 'id': 1015, 'synset': 'spice_rack.n.01', 'synonyms': ['spice_rack'], 'def': 'a rack for displaying containers filled with spices', 'name': 'spice_rack'}, {'frequency': 'r', 'id': 1016, 'synset': 'spider.n.01', 'synonyms': ['spider'], 'def': 'predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body', 'name': 'spider'}, {'frequency': 'c', 'id': 1017, 'synset': 'sponge.n.01', 'synonyms': ['sponge'], 'def': 'a porous mass usable to absorb water typically used for cleaning', 'name': 'sponge'}, {'frequency': 'f', 'id': 1018, 'synset': 'spoon.n.01', 'synonyms': ['spoon'], 'def': 'a piece of cutlery with a shallow bowl-shaped container and a handle', 'name': 'spoon'}, {'frequency': 'c', 'id': 1019, 'synset': 'sportswear.n.01', 'synonyms': ['sportswear', 'athletic_wear', 'activewear'], 'def': 'attire worn for sport or for casual wear', 'name': 'sportswear'}, {'frequency': 'c', 'id': 1020, 'synset': 'spotlight.n.02', 'synonyms': ['spotlight'], 'def': 'a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer', 'name': 'spotlight'}, {'frequency': 'r', 'id': 1021, 'synset': 'squirrel.n.01', 'synonyms': ['squirrel'], 'def': 'a kind of arboreal rodent having a long bushy tail', 'name': 'squirrel'}, {'frequency': 'c', 'id': 1022, 'synset': 'stapler.n.01', 'synonyms': ['stapler_(stapling_machine)'], 'def': 'a machine that inserts staples into sheets of paper in order to fasten them together', 'name': 'stapler_(stapling_machine)'}, {'frequency': 'r', 'id': 1023, 'synset': 'starfish.n.01', 'synonyms': ['starfish', 'sea_star'], 'def': 'echinoderms characterized by five arms extending from a central disk', 'name': 'starfish'}, {'frequency': 'f', 'id': 1024, 'synset': 'statue.n.01', 'synonyms': ['statue_(sculpture)'], 'def': 'a sculpture representing a human or animal', 'name': 'statue_(sculpture)'}, {'frequency': 'c', 'id': 1025, 'synset': 'steak.n.01', 'synonyms': ['steak_(food)'], 'def': 'a slice of meat cut from the fleshy part of an animal or large fish', 'name': 'steak_(food)'}, {'frequency': 'r', 'id': 1026, 'synset': 'steak_knife.n.01', 'synonyms': ['steak_knife'], 'def': 'a sharp table knife used in eating steak', 'name': 'steak_knife'}, {'frequency': 'r', 'id': 1027, 'synset': 'steamer.n.02', 'synonyms': ['steamer_(kitchen_appliance)'], 'def': 'a cooking utensil that can be used to cook food by steaming it', 'name': 'steamer_(kitchen_appliance)'}, {'frequency': 'f', 'id': 1028, 'synset': 'steering_wheel.n.01', 'synonyms': ['steering_wheel'], 'def': 'a handwheel that is used for steering', 'name': 'steering_wheel'}, {'frequency': 'r', 'id': 1029, 'synset': 'stencil.n.01', 'synonyms': ['stencil'], 'def': 'a sheet of material (metal, plastic, etc.) that has been perforated with a pattern; ink or paint can pass through the perforations to create the printed pattern on the surface below', 'name': 'stencil'}, {'frequency': 'r', 'id': 1030, 'synset': 'step_ladder.n.01', 'synonyms': ['stepladder'], 'def': 'a folding portable ladder hinged at the top', 'name': 'stepladder'}, {'frequency': 'c', 'id': 1031, 'synset': 'step_stool.n.01', 'synonyms': ['step_stool'], 'def': 'a stool that has one or two steps that fold under the seat', 'name': 'step_stool'}, {'frequency': 'c', 'id': 1032, 'synset': 'stereo.n.01', 'synonyms': ['stereo_(sound_system)'], 'def': 'electronic device for playing audio', 'name': 'stereo_(sound_system)'}, {'frequency': 'r', 'id': 1033, 'synset': 'stew.n.02', 'synonyms': ['stew'], 'def': 'food prepared by stewing especially meat or fish with vegetables', 'name': 'stew'}, {'frequency': 'r', 'id': 1034, 'synset': 'stirrer.n.02', 'synonyms': ['stirrer'], 'def': 'an implement used for stirring', 'name': 'stirrer'}, {'frequency': 'f', 'id': 1035, 'synset': 'stirrup.n.01', 'synonyms': ['stirrup'], 'def': "support consisting of metal loops into which rider's feet go", 'name': 'stirrup'}, {'frequency': 'c', 'id': 1036, 'synset': 'stocking.n.01', 'synonyms': ['stockings_(leg_wear)'], 'def': 'close-fitting hosiery to cover the foot and leg; come in matched pairs', 'name': 'stockings_(leg_wear)'}, {'frequency': 'f', 'id': 1037, 'synset': 'stool.n.01', 'synonyms': ['stool'], 'def': 'a simple seat without a back or arms', 'name': 'stool'}, {'frequency': 'f', 'id': 1038, 'synset': 'stop_sign.n.01', 'synonyms': ['stop_sign'], 'def': 'a traffic sign to notify drivers that they must come to a complete stop', 'name': 'stop_sign'}, {'frequency': 'f', 'id': 1039, 'synset': 'stoplight.n.01', 'synonyms': ['brake_light'], 'def': 'a red light on the rear of a motor vehicle that signals when the brakes are applied', 'name': 'brake_light'}, {'frequency': 'f', 'id': 1040, 'synset': 'stove.n.01', 'synonyms': ['stove', 'kitchen_stove', 'range_(kitchen_appliance)', 'kitchen_range', 'cooking_stove'], 'def': 'a kitchen appliance used for cooking food', 'name': 'stove'}, {'frequency': 'c', 'id': 1041, 'synset': 'strainer.n.01', 'synonyms': ['strainer'], 'def': 'a filter to retain larger pieces while smaller pieces and liquids pass through', 'name': 'strainer'}, {'frequency': 'f', 'id': 1042, 'synset': 'strap.n.01', 'synonyms': ['strap'], 'def': 'an elongated strip of material for binding things together or holding', 'name': 'strap'}, {'frequency': 'f', 'id': 1043, 'synset': 'straw.n.04', 'synonyms': ['straw_(for_drinking)', 'drinking_straw'], 'def': 'a thin paper or plastic tube used to suck liquids into the mouth', 'name': 'straw_(for_drinking)'}, {'frequency': 'f', 'id': 1044, 'synset': 'strawberry.n.01', 'synonyms': ['strawberry'], 'def': 'sweet fleshy red fruit', 'name': 'strawberry'}, {'frequency': 'f', 'id': 1045, 'synset': 'street_sign.n.01', 'synonyms': ['street_sign'], 'def': 'a sign visible from the street', 'name': 'street_sign'}, {'frequency': 'f', 'id': 1046, 'synset': 'streetlight.n.01', 'synonyms': ['streetlight', 'street_lamp'], 'def': 'a lamp supported on a lamppost; for illuminating a street', 'name': 'streetlight'}, {'frequency': 'r', 'id': 1047, 'synset': 'string_cheese.n.01', 'synonyms': ['string_cheese'], 'def': 'cheese formed in long strings twisted together', 'name': 'string_cheese'}, {'frequency': 'r', 'id': 1048, 'synset': 'stylus.n.02', 'synonyms': ['stylus'], 'def': 'a pointed tool for writing or drawing or engraving', 'name': 'stylus'}, {'frequency': 'r', 'id': 1049, 'synset': 'subwoofer.n.01', 'synonyms': ['subwoofer'], 'def': 'a loudspeaker that is designed to reproduce very low bass frequencies', 'name': 'subwoofer'}, {'frequency': 'r', 'id': 1050, 'synset': 'sugar_bowl.n.01', 'synonyms': ['sugar_bowl'], 'def': 'a dish in which sugar is served', 'name': 'sugar_bowl'}, {'frequency': 'r', 'id': 1051, 'synset': 'sugarcane.n.01', 'synonyms': ['sugarcane_(plant)'], 'def': 'juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice', 'name': 'sugarcane_(plant)'}, {'frequency': 'c', 'id': 1052, 'synset': 'suit.n.01', 'synonyms': ['suit_(clothing)'], 'def': 'a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color', 'name': 'suit_(clothing)'}, {'frequency': 'c', 'id': 1053, 'synset': 'sunflower.n.01', 'synonyms': ['sunflower'], 'def': 'any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays', 'name': 'sunflower'}, {'frequency': 'f', 'id': 1054, 'synset': 'sunglasses.n.01', 'synonyms': ['sunglasses'], 'def': 'spectacles that are darkened or polarized to protect the eyes from the glare of the sun', 'name': 'sunglasses'}, {'frequency': 'c', 'id': 1055, 'synset': 'sunhat.n.01', 'synonyms': ['sunhat'], 'def': 'a hat with a broad brim that protects the face from direct exposure to the sun', 'name': 'sunhat'}, {'frequency': 'r', 'id': 1056, 'synset': 'sunscreen.n.01', 'synonyms': ['sunscreen', 'sunblock'], 'def': 'a cream spread on the skin; contains a chemical to filter out ultraviolet light and so protect from sunburn', 'name': 'sunscreen'}, {'frequency': 'f', 'id': 1057, 'synset': 'surfboard.n.01', 'synonyms': ['surfboard'], 'def': 'a narrow buoyant board for riding surf', 'name': 'surfboard'}, {'frequency': 'c', 'id': 1058, 'synset': 'sushi.n.01', 'synonyms': ['sushi'], 'def': 'rice (with raw fish) wrapped in seaweed', 'name': 'sushi'}, {'frequency': 'c', 'id': 1059, 'synset': 'swab.n.02', 'synonyms': ['mop'], 'def': 'cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors', 'name': 'mop'}, {'frequency': 'c', 'id': 1060, 'synset': 'sweat_pants.n.01', 'synonyms': ['sweat_pants'], 'def': 'loose-fitting trousers with elastic cuffs; worn by athletes', 'name': 'sweat_pants'}, {'frequency': 'c', 'id': 1061, 'synset': 'sweatband.n.02', 'synonyms': ['sweatband'], 'def': 'a band of material tied around the forehead or wrist to absorb sweat', 'name': 'sweatband'}, {'frequency': 'f', 'id': 1062, 'synset': 'sweater.n.01', 'synonyms': ['sweater'], 'def': 'a crocheted or knitted garment covering the upper part of the body', 'name': 'sweater'}, {'frequency': 'f', 'id': 1063, 'synset': 'sweatshirt.n.01', 'synonyms': ['sweatshirt'], 'def': 'cotton knit pullover with long sleeves worn during athletic activity', 'name': 'sweatshirt'}, {'frequency': 'c', 'id': 1064, 'synset': 'sweet_potato.n.02', 'synonyms': ['sweet_potato'], 'def': 'the edible tuberous root of the sweet potato vine', 'name': 'sweet_potato'}, {'frequency': 'f', 'id': 1065, 'synset': 'swimsuit.n.01', 'synonyms': ['swimsuit', 'swimwear', 'bathing_suit', 'swimming_costume', 'bathing_costume', 'swimming_trunks', 'bathing_trunks'], 'def': 'garment worn for swimming', 'name': 'swimsuit'}, {'frequency': 'c', 'id': 1066, 'synset': 'sword.n.01', 'synonyms': ['sword'], 'def': 'a cutting or thrusting weapon that has a long metal blade', 'name': 'sword'}, {'frequency': 'r', 'id': 1067, 'synset': 'syringe.n.01', 'synonyms': ['syringe'], 'def': 'a medical instrument used to inject or withdraw fluids', 'name': 'syringe'}, {'frequency': 'r', 'id': 1068, 'synset': 'tabasco.n.02', 'synonyms': ['Tabasco_sauce'], 'def': 'very spicy sauce (trade name Tabasco) made from fully-aged red peppers', 'name': 'Tabasco_sauce'}, {'frequency': 'r', 'id': 1069, 'synset': 'table-tennis_table.n.01', 'synonyms': ['table-tennis_table', 'ping-pong_table'], 'def': 'a table used for playing table tennis', 'name': 'table-tennis_table'}, {'frequency': 'f', 'id': 1070, 'synset': 'table.n.02', 'synonyms': ['table'], 'def': 'a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs', 'name': 'table'}, {'frequency': 'c', 'id': 1071, 'synset': 'table_lamp.n.01', 'synonyms': ['table_lamp'], 'def': 'a lamp that sits on a table', 'name': 'table_lamp'}, {'frequency': 'f', 'id': 1072, 'synset': 'tablecloth.n.01', 'synonyms': ['tablecloth'], 'def': 'a covering spread over a dining table', 'name': 'tablecloth'}, {'frequency': 'r', 'id': 1073, 'synset': 'tachometer.n.01', 'synonyms': ['tachometer'], 'def': 'measuring instrument for indicating speed of rotation', 'name': 'tachometer'}, {'frequency': 'r', 'id': 1074, 'synset': 'taco.n.02', 'synonyms': ['taco'], 'def': 'a small tortilla cupped around a filling', 'name': 'taco'}, {'frequency': 'f', 'id': 1075, 'synset': 'tag.n.02', 'synonyms': ['tag'], 'def': 'a label associated with something for the purpose of identification or information', 'name': 'tag'}, {'frequency': 'f', 'id': 1076, 'synset': 'taillight.n.01', 'synonyms': ['taillight', 'rear_light'], 'def': 'lamp (usually red) mounted at the rear of a motor vehicle', 'name': 'taillight'}, {'frequency': 'r', 'id': 1077, 'synset': 'tambourine.n.01', 'synonyms': ['tambourine'], 'def': 'a shallow drum with a single drumhead and with metallic disks in the sides', 'name': 'tambourine'}, {'frequency': 'r', 'id': 1078, 'synset': 'tank.n.01', 'synonyms': ['army_tank', 'armored_combat_vehicle', 'armoured_combat_vehicle'], 'def': 'an enclosed armored military vehicle; has a cannon and moves on caterpillar treads', 'name': 'army_tank'}, {'frequency': 'c', 'id': 1079, 'synset': 'tank.n.02', 'synonyms': ['tank_(storage_vessel)', 'storage_tank'], 'def': 'a large (usually metallic) vessel for holding gases or liquids', 'name': 'tank_(storage_vessel)'}, {'frequency': 'f', 'id': 1080, 'synset': 'tank_top.n.01', 'synonyms': ['tank_top_(clothing)'], 'def': 'a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening', 'name': 'tank_top_(clothing)'}, {'frequency': 'c', 'id': 1081, 'synset': 'tape.n.01', 'synonyms': ['tape_(sticky_cloth_or_paper)'], 'def': 'a long thin piece of cloth or paper as used for binding or fastening', 'name': 'tape_(sticky_cloth_or_paper)'}, {'frequency': 'c', 'id': 1082, 'synset': 'tape.n.04', 'synonyms': ['tape_measure', 'measuring_tape'], 'def': 'measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths', 'name': 'tape_measure'}, {'frequency': 'c', 'id': 1083, 'synset': 'tapestry.n.02', 'synonyms': ['tapestry'], 'def': 'a heavy textile with a woven design; used for curtains and upholstery', 'name': 'tapestry'}, {'frequency': 'f', 'id': 1084, 'synset': 'tarpaulin.n.01', 'synonyms': ['tarp'], 'def': 'waterproofed canvas', 'name': 'tarp'}, {'frequency': 'c', 'id': 1085, 'synset': 'tartan.n.01', 'synonyms': ['tartan', 'plaid'], 'def': 'a cloth having a crisscross design', 'name': 'tartan'}, {'frequency': 'c', 'id': 1086, 'synset': 'tassel.n.01', 'synonyms': ['tassel'], 'def': 'adornment consisting of a bunch of cords fastened at one end', 'name': 'tassel'}, {'frequency': 'r', 'id': 1087, 'synset': 'tea_bag.n.01', 'synonyms': ['tea_bag'], 'def': 'a measured amount of tea in a bag for an individual serving of tea', 'name': 'tea_bag'}, {'frequency': 'c', 'id': 1088, 'synset': 'teacup.n.02', 'synonyms': ['teacup'], 'def': 'a cup from which tea is drunk', 'name': 'teacup'}, {'frequency': 'c', 'id': 1089, 'synset': 'teakettle.n.01', 'synonyms': ['teakettle'], 'def': 'kettle for boiling water to make tea', 'name': 'teakettle'}, {'frequency': 'c', 'id': 1090, 'synset': 'teapot.n.01', 'synonyms': ['teapot'], 'def': 'pot for brewing tea; usually has a spout and handle', 'name': 'teapot'}, {'frequency': 'f', 'id': 1091, 'synset': 'teddy.n.01', 'synonyms': ['teddy_bear'], 'def': "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", 'name': 'teddy_bear'}, {'frequency': 'f', 'id': 1092, 'synset': 'telephone.n.01', 'synonyms': ['telephone', 'phone', 'telephone_set'], 'def': 'electronic device for communicating by voice over long distances', 'name': 'telephone'}, {'frequency': 'c', 'id': 1093, 'synset': 'telephone_booth.n.01', 'synonyms': ['telephone_booth', 'phone_booth', 'call_box', 'telephone_box', 'telephone_kiosk'], 'def': 'booth for using a telephone', 'name': 'telephone_booth'}, {'frequency': 'f', 'id': 1094, 'synset': 'telephone_pole.n.01', 'synonyms': ['telephone_pole', 'telegraph_pole', 'telegraph_post'], 'def': 'tall pole supporting telephone wires', 'name': 'telephone_pole'}, {'frequency': 'r', 'id': 1095, 'synset': 'telephoto_lens.n.01', 'synonyms': ['telephoto_lens', 'zoom_lens'], 'def': 'a camera lens that magnifies the image', 'name': 'telephoto_lens'}, {'frequency': 'c', 'id': 1096, 'synset': 'television_camera.n.01', 'synonyms': ['television_camera', 'tv_camera'], 'def': 'television equipment for capturing and recording video', 'name': 'television_camera'}, {'frequency': 'f', 'id': 1097, 'synset': 'television_receiver.n.01', 'synonyms': ['television_set', 'tv', 'tv_set'], 'def': 'an electronic device that receives television signals and displays them on a screen', 'name': 'television_set'}, {'frequency': 'f', 'id': 1098, 'synset': 'tennis_ball.n.01', 'synonyms': ['tennis_ball'], 'def': 'ball about the size of a fist used in playing tennis', 'name': 'tennis_ball'}, {'frequency': 'f', 'id': 1099, 'synset': 'tennis_racket.n.01', 'synonyms': ['tennis_racket'], 'def': 'a racket used to play tennis', 'name': 'tennis_racket'}, {'frequency': 'r', 'id': 1100, 'synset': 'tequila.n.01', 'synonyms': ['tequila'], 'def': 'Mexican liquor made from fermented juices of an agave plant', 'name': 'tequila'}, {'frequency': 'c', 'id': 1101, 'synset': 'thermometer.n.01', 'synonyms': ['thermometer'], 'def': 'measuring instrument for measuring temperature', 'name': 'thermometer'}, {'frequency': 'c', 'id': 1102, 'synset': 'thermos.n.01', 'synonyms': ['thermos_bottle'], 'def': 'vacuum flask that preserves temperature of hot or cold drinks', 'name': 'thermos_bottle'}, {'frequency': 'c', 'id': 1103, 'synset': 'thermostat.n.01', 'synonyms': ['thermostat'], 'def': 'a regulator for automatically regulating temperature by starting or stopping the supply of heat', 'name': 'thermostat'}, {'frequency': 'r', 'id': 1104, 'synset': 'thimble.n.02', 'synonyms': ['thimble'], 'def': 'a small metal cap to protect the finger while sewing; can be used as a small container', 'name': 'thimble'}, {'frequency': 'c', 'id': 1105, 'synset': 'thread.n.01', 'synonyms': ['thread', 'yarn'], 'def': 'a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving', 'name': 'thread'}, {'frequency': 'c', 'id': 1106, 'synset': 'thumbtack.n.01', 'synonyms': ['thumbtack', 'drawing_pin', 'pushpin'], 'def': 'a tack for attaching papers to a bulletin board or drawing board', 'name': 'thumbtack'}, {'frequency': 'c', 'id': 1107, 'synset': 'tiara.n.01', 'synonyms': ['tiara'], 'def': 'a jeweled headdress worn by women on formal occasions', 'name': 'tiara'}, {'frequency': 'c', 'id': 1108, 'synset': 'tiger.n.02', 'synonyms': ['tiger'], 'def': 'large feline of forests in most of Asia having a tawny coat with black stripes', 'name': 'tiger'}, {'frequency': 'c', 'id': 1109, 'synset': 'tights.n.01', 'synonyms': ['tights_(clothing)', 'leotards'], 'def': 'skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls', 'name': 'tights_(clothing)'}, {'frequency': 'c', 'id': 1110, 'synset': 'timer.n.01', 'synonyms': ['timer', 'stopwatch'], 'def': 'a timepiece that measures a time interval and signals its end', 'name': 'timer'}, {'frequency': 'f', 'id': 1111, 'synset': 'tinfoil.n.01', 'synonyms': ['tinfoil'], 'def': 'foil made of tin or an alloy of tin and lead', 'name': 'tinfoil'}, {'frequency': 'r', 'id': 1112, 'synset': 'tinsel.n.01', 'synonyms': ['tinsel'], 'def': 'a showy decoration that is basically valueless', 'name': 'tinsel'}, {'frequency': 'f', 'id': 1113, 'synset': 'tissue.n.02', 'synonyms': ['tissue_paper'], 'def': 'a soft thin (usually translucent) paper', 'name': 'tissue_paper'}, {'frequency': 'c', 'id': 1114, 'synset': 'toast.n.01', 'synonyms': ['toast_(food)'], 'def': 'slice of bread that has been toasted', 'name': 'toast_(food)'}, {'frequency': 'f', 'id': 1115, 'synset': 'toaster.n.02', 'synonyms': ['toaster'], 'def': 'a kitchen appliance (usually electric) for toasting bread', 'name': 'toaster'}, {'frequency': 'c', 'id': 1116, 'synset': 'toaster_oven.n.01', 'synonyms': ['toaster_oven'], 'def': 'kitchen appliance consisting of a small electric oven for toasting or warming food', 'name': 'toaster_oven'}, {'frequency': 'f', 'id': 1117, 'synset': 'toilet.n.02', 'synonyms': ['toilet'], 'def': 'a plumbing fixture for defecation and urination', 'name': 'toilet'}, {'frequency': 'f', 'id': 1118, 'synset': 'toilet_tissue.n.01', 'synonyms': ['toilet_tissue', 'toilet_paper', 'bathroom_tissue'], 'def': 'a soft thin absorbent paper for use in toilets', 'name': 'toilet_tissue'}, {'frequency': 'f', 'id': 1119, 'synset': 'tomato.n.01', 'synonyms': ['tomato'], 'def': 'mildly acid red or yellow pulpy fruit eaten as a vegetable', 'name': 'tomato'}, {'frequency': 'c', 'id': 1120, 'synset': 'tongs.n.01', 'synonyms': ['tongs'], 'def': 'any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below', 'name': 'tongs'}, {'frequency': 'c', 'id': 1121, 'synset': 'toolbox.n.01', 'synonyms': ['toolbox'], 'def': 'a box or chest or cabinet for holding hand tools', 'name': 'toolbox'}, {'frequency': 'f', 'id': 1122, 'synset': 'toothbrush.n.01', 'synonyms': ['toothbrush'], 'def': 'small brush; has long handle; used to clean teeth', 'name': 'toothbrush'}, {'frequency': 'f', 'id': 1123, 'synset': 'toothpaste.n.01', 'synonyms': ['toothpaste'], 'def': 'a dentifrice in the form of a paste', 'name': 'toothpaste'}, {'frequency': 'c', 'id': 1124, 'synset': 'toothpick.n.01', 'synonyms': ['toothpick'], 'def': 'pick consisting of a small strip of wood or plastic; used to pick food from between the teeth', 'name': 'toothpick'}, {'frequency': 'c', 'id': 1125, 'synset': 'top.n.09', 'synonyms': ['cover'], 'def': 'covering for a hole (especially a hole in the top of a container)', 'name': 'cover'}, {'frequency': 'c', 'id': 1126, 'synset': 'tortilla.n.01', 'synonyms': ['tortilla'], 'def': 'thin unleavened pancake made from cornmeal or wheat flour', 'name': 'tortilla'}, {'frequency': 'c', 'id': 1127, 'synset': 'tow_truck.n.01', 'synonyms': ['tow_truck'], 'def': 'a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)', 'name': 'tow_truck'}, {'frequency': 'f', 'id': 1128, 'synset': 'towel.n.01', 'synonyms': ['towel'], 'def': 'a rectangular piece of absorbent cloth (or paper) for drying or wiping', 'name': 'towel'}, {'frequency': 'f', 'id': 1129, 'synset': 'towel_rack.n.01', 'synonyms': ['towel_rack', 'towel_rail', 'towel_bar'], 'def': 'a rack consisting of one or more bars on which towels can be hung', 'name': 'towel_rack'}, {'frequency': 'f', 'id': 1130, 'synset': 'toy.n.03', 'synonyms': ['toy'], 'def': 'a device regarded as providing amusement', 'name': 'toy'}, {'frequency': 'c', 'id': 1131, 'synset': 'tractor.n.01', 'synonyms': ['tractor_(farm_equipment)'], 'def': 'a wheeled vehicle with large wheels; used in farming and other applications', 'name': 'tractor_(farm_equipment)'}, {'frequency': 'f', 'id': 1132, 'synset': 'traffic_light.n.01', 'synonyms': ['traffic_light'], 'def': 'a device to control vehicle traffic often consisting of three or more lights', 'name': 'traffic_light'}, {'frequency': 'r', 'id': 1133, 'synset': 'trail_bike.n.01', 'synonyms': ['dirt_bike'], 'def': 'a lightweight motorcycle equipped with rugged tires and suspension for off-road use', 'name': 'dirt_bike'}, {'frequency': 'c', 'id': 1134, 'synset': 'trailer_truck.n.01', 'synonyms': ['trailer_truck', 'tractor_trailer', 'trucking_rig', 'articulated_lorry', 'semi_truck'], 'def': 'a truck consisting of a tractor and trailer together', 'name': 'trailer_truck'}, {'frequency': 'f', 'id': 1135, 'synset': 'train.n.01', 'synonyms': ['train_(railroad_vehicle)', 'railroad_train'], 'def': 'public or private transport provided by a line of railway cars coupled together and drawn by a locomotive', 'name': 'train_(railroad_vehicle)'}, {'frequency': 'r', 'id': 1136, 'synset': 'trampoline.n.01', 'synonyms': ['trampoline'], 'def': 'gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame', 'name': 'trampoline'}, {'frequency': 'f', 'id': 1137, 'synset': 'tray.n.01', 'synonyms': ['tray'], 'def': 'an open receptacle for holding or displaying or serving articles or food', 'name': 'tray'}, {'frequency': 'r', 'id': 1138, 'synset': 'tree_house.n.01', 'synonyms': ['tree_house'], 'def': '(NOT A TREE) a PLAYHOUSE built in the branches of a tree', 'name': 'tree_house'}, {'frequency': 'r', 'id': 1139, 'synset': 'trench_coat.n.01', 'synonyms': ['trench_coat'], 'def': 'a military style raincoat; belted with deep pockets', 'name': 'trench_coat'}, {'frequency': 'r', 'id': 1140, 'synset': 'triangle.n.05', 'synonyms': ['triangle_(musical_instrument)'], 'def': 'a percussion instrument consisting of a metal bar bent in the shape of an open triangle', 'name': 'triangle_(musical_instrument)'}, {'frequency': 'r', 'id': 1141, 'synset': 'tricycle.n.01', 'synonyms': ['tricycle'], 'def': 'a vehicle with three wheels that is moved by foot pedals', 'name': 'tricycle'}, {'frequency': 'c', 'id': 1142, 'synset': 'tripod.n.01', 'synonyms': ['tripod'], 'def': 'a three-legged rack used for support', 'name': 'tripod'}, {'frequency': 'f', 'id': 1143, 'synset': 'trouser.n.01', 'synonyms': ['trousers', 'pants_(clothing)'], 'def': 'a garment extending from the waist to the knee or ankle, covering each leg separately', 'name': 'trousers'}, {'frequency': 'f', 'id': 1144, 'synset': 'truck.n.01', 'synonyms': ['truck'], 'def': 'an automotive vehicle suitable for hauling', 'name': 'truck'}, {'frequency': 'r', 'id': 1145, 'synset': 'truffle.n.03', 'synonyms': ['truffle_(chocolate)', 'chocolate_truffle'], 'def': 'creamy chocolate candy', 'name': 'truffle_(chocolate)'}, {'frequency': 'c', 'id': 1146, 'synset': 'trunk.n.02', 'synonyms': ['trunk'], 'def': 'luggage consisting of a large strong case used when traveling or for storage', 'name': 'trunk'}, {'frequency': 'r', 'id': 1147, 'synset': 'tub.n.02', 'synonyms': ['vat'], 'def': 'a large open vessel for holding or storing liquids', 'name': 'vat'}, {'frequency': 'c', 'id': 1148, 'synset': 'turban.n.01', 'synonyms': ['turban'], 'def': 'a traditional headdress consisting of a long scarf wrapped around the head', 'name': 'turban'}, {'frequency': 'r', 'id': 1149, 'synset': 'turkey.n.01', 'synonyms': ['turkey_(bird)'], 'def': 'large gallinaceous bird with fan-shaped tail; widely domesticated for food', 'name': 'turkey_(bird)'}, {'frequency': 'c', 'id': 1150, 'synset': 'turkey.n.04', 'synonyms': ['turkey_(food)'], 'def': 'flesh of large domesticated fowl usually roasted', 'name': 'turkey_(food)'}, {'frequency': 'r', 'id': 1151, 'synset': 'turnip.n.01', 'synonyms': ['turnip'], 'def': 'widely cultivated plant having a large fleshy edible white or yellow root', 'name': 'turnip'}, {'frequency': 'c', 'id': 1152, 'synset': 'turtle.n.02', 'synonyms': ['turtle'], 'def': 'any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming', 'name': 'turtle'}, {'frequency': 'r', 'id': 1153, 'synset': 'turtleneck.n.01', 'synonyms': ['turtleneck_(clothing)', 'polo-neck'], 'def': 'a sweater or jersey with a high close-fitting collar', 'name': 'turtleneck_(clothing)'}, {'frequency': 'r', 'id': 1154, 'synset': 'typewriter.n.01', 'synonyms': ['typewriter'], 'def': 'hand-operated character printer for printing written messages one character at a time', 'name': 'typewriter'}, {'frequency': 'f', 'id': 1155, 'synset': 'umbrella.n.01', 'synonyms': ['umbrella'], 'def': 'a lightweight handheld collapsible canopy', 'name': 'umbrella'}, {'frequency': 'c', 'id': 1156, 'synset': 'underwear.n.01', 'synonyms': ['underwear', 'underclothes', 'underclothing', 'underpants'], 'def': 'undergarment worn next to the skin and under the outer garments', 'name': 'underwear'}, {'frequency': 'r', 'id': 1157, 'synset': 'unicycle.n.01', 'synonyms': ['unicycle'], 'def': 'a vehicle with a single wheel that is driven by pedals', 'name': 'unicycle'}, {'frequency': 'c', 'id': 1158, 'synset': 'urinal.n.01', 'synonyms': ['urinal'], 'def': 'a plumbing fixture (usually attached to the wall) used by men to urinate', 'name': 'urinal'}, {'frequency': 'r', 'id': 1159, 'synset': 'urn.n.01', 'synonyms': ['urn'], 'def': 'a large vase that usually has a pedestal or feet', 'name': 'urn'}, {'frequency': 'c', 'id': 1160, 'synset': 'vacuum.n.04', 'synonyms': ['vacuum_cleaner'], 'def': 'an electrical home appliance that cleans by suction', 'name': 'vacuum_cleaner'}, {'frequency': 'c', 'id': 1161, 'synset': 'valve.n.03', 'synonyms': ['valve'], 'def': 'control consisting of a mechanical device for controlling the flow of a fluid', 'name': 'valve'}, {'frequency': 'f', 'id': 1162, 'synset': 'vase.n.01', 'synonyms': ['vase'], 'def': 'an open jar of glass or porcelain used as an ornament or to hold flowers', 'name': 'vase'}, {'frequency': 'c', 'id': 1163, 'synset': 'vending_machine.n.01', 'synonyms': ['vending_machine'], 'def': 'a slot machine for selling goods', 'name': 'vending_machine'}, {'frequency': 'f', 'id': 1164, 'synset': 'vent.n.01', 'synonyms': ['vent', 'blowhole', 'air_vent'], 'def': 'a hole for the escape of gas or air', 'name': 'vent'}, {'frequency': 'c', 'id': 1165, 'synset': 'videotape.n.01', 'synonyms': ['videotape'], 'def': 'a video recording made on magnetic tape', 'name': 'videotape'}, {'frequency': 'r', 'id': 1166, 'synset': 'vinegar.n.01', 'synonyms': ['vinegar'], 'def': 'sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative', 'name': 'vinegar'}, {'frequency': 'r', 'id': 1167, 'synset': 'violin.n.01', 'synonyms': ['violin', 'fiddle'], 'def': 'bowed stringed instrument that is the highest member of the violin family', 'name': 'violin'}, {'frequency': 'r', 'id': 1168, 'synset': 'vodka.n.01', 'synonyms': ['vodka'], 'def': 'unaged colorless liquor originating in Russia', 'name': 'vodka'}, {'frequency': 'r', 'id': 1169, 'synset': 'volleyball.n.02', 'synonyms': ['volleyball'], 'def': 'an inflated ball used in playing volleyball', 'name': 'volleyball'}, {'frequency': 'r', 'id': 1170, 'synset': 'vulture.n.01', 'synonyms': ['vulture'], 'def': 'any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion', 'name': 'vulture'}, {'frequency': 'c', 'id': 1171, 'synset': 'waffle.n.01', 'synonyms': ['waffle'], 'def': 'pancake batter baked in a waffle iron', 'name': 'waffle'}, {'frequency': 'r', 'id': 1172, 'synset': 'waffle_iron.n.01', 'synonyms': ['waffle_iron'], 'def': 'a kitchen appliance for baking waffles', 'name': 'waffle_iron'}, {'frequency': 'c', 'id': 1173, 'synset': 'wagon.n.01', 'synonyms': ['wagon'], 'def': 'any of various kinds of wheeled vehicles drawn by an animal or a tractor', 'name': 'wagon'}, {'frequency': 'c', 'id': 1174, 'synset': 'wagon_wheel.n.01', 'synonyms': ['wagon_wheel'], 'def': 'a wheel of a wagon', 'name': 'wagon_wheel'}, {'frequency': 'c', 'id': 1175, 'synset': 'walking_stick.n.01', 'synonyms': ['walking_stick'], 'def': 'a stick carried in the hand for support in walking', 'name': 'walking_stick'}, {'frequency': 'c', 'id': 1176, 'synset': 'wall_clock.n.01', 'synonyms': ['wall_clock'], 'def': 'a clock mounted on a wall', 'name': 'wall_clock'}, {'frequency': 'f', 'id': 1177, 'synset': 'wall_socket.n.01', 'synonyms': ['wall_socket', 'wall_plug', 'electric_outlet', 'electrical_outlet', 'outlet', 'electric_receptacle'], 'def': 'receptacle providing a place in a wiring system where current can be taken to run electrical devices', 'name': 'wall_socket'}, {'frequency': 'c', 'id': 1178, 'synset': 'wallet.n.01', 'synonyms': ['wallet', 'billfold'], 'def': 'a pocket-size case for holding papers and paper money', 'name': 'wallet'}, {'frequency': 'r', 'id': 1179, 'synset': 'walrus.n.01', 'synonyms': ['walrus'], 'def': 'either of two large northern marine mammals having ivory tusks and tough hide over thick blubber', 'name': 'walrus'}, {'frequency': 'r', 'id': 1180, 'synset': 'wardrobe.n.01', 'synonyms': ['wardrobe'], 'def': 'a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes', 'name': 'wardrobe'}, {'frequency': 'r', 'id': 1181, 'synset': 'wasabi.n.02', 'synonyms': ['wasabi'], 'def': 'the thick green root of the wasabi plant that the Japanese use in cooking and that tastes like strong horseradish', 'name': 'wasabi'}, {'frequency': 'c', 'id': 1182, 'synset': 'washer.n.03', 'synonyms': ['automatic_washer', 'washing_machine'], 'def': 'a home appliance for washing clothes and linens automatically', 'name': 'automatic_washer'}, {'frequency': 'f', 'id': 1183, 'synset': 'watch.n.01', 'synonyms': ['watch', 'wristwatch'], 'def': 'a small, portable timepiece', 'name': 'watch'}, {'frequency': 'f', 'id': 1184, 'synset': 'water_bottle.n.01', 'synonyms': ['water_bottle'], 'def': 'a bottle for holding water', 'name': 'water_bottle'}, {'frequency': 'c', 'id': 1185, 'synset': 'water_cooler.n.01', 'synonyms': ['water_cooler'], 'def': 'a device for cooling and dispensing drinking water', 'name': 'water_cooler'}, {'frequency': 'c', 'id': 1186, 'synset': 'water_faucet.n.01', 'synonyms': ['water_faucet', 'water_tap', 'tap_(water_faucet)'], 'def': 'a faucet for drawing water from a pipe or cask', 'name': 'water_faucet'}, {'frequency': 'r', 'id': 1187, 'synset': 'water_filter.n.01', 'synonyms': ['water_filter'], 'def': 'a filter to remove impurities from the water supply', 'name': 'water_filter'}, {'frequency': 'r', 'id': 1188, 'synset': 'water_heater.n.01', 'synonyms': ['water_heater', 'hot-water_heater'], 'def': 'a heater and storage tank to supply heated water', 'name': 'water_heater'}, {'frequency': 'r', 'id': 1189, 'synset': 'water_jug.n.01', 'synonyms': ['water_jug'], 'def': 'a jug that holds water', 'name': 'water_jug'}, {'frequency': 'r', 'id': 1190, 'synset': 'water_pistol.n.01', 'synonyms': ['water_gun', 'squirt_gun'], 'def': 'plaything consisting of a toy pistol that squirts water', 'name': 'water_gun'}, {'frequency': 'c', 'id': 1191, 'synset': 'water_scooter.n.01', 'synonyms': ['water_scooter', 'sea_scooter', 'jet_ski'], 'def': 'a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)', 'name': 'water_scooter'}, {'frequency': 'c', 'id': 1192, 'synset': 'water_ski.n.01', 'synonyms': ['water_ski'], 'def': 'broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)', 'name': 'water_ski'}, {'frequency': 'c', 'id': 1193, 'synset': 'water_tower.n.01', 'synonyms': ['water_tower'], 'def': 'a large reservoir for water', 'name': 'water_tower'}, {'frequency': 'c', 'id': 1194, 'synset': 'watering_can.n.01', 'synonyms': ['watering_can'], 'def': 'a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants', 'name': 'watering_can'}, {'frequency': 'c', 'id': 1195, 'synset': 'watermelon.n.02', 'synonyms': ['watermelon'], 'def': 'large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp', 'name': 'watermelon'}, {'frequency': 'f', 'id': 1196, 'synset': 'weathervane.n.01', 'synonyms': ['weathervane', 'vane_(weathervane)', 'wind_vane'], 'def': 'mechanical device attached to an elevated structure; rotates freely to show the direction of the wind', 'name': 'weathervane'}, {'frequency': 'c', 'id': 1197, 'synset': 'webcam.n.01', 'synonyms': ['webcam'], 'def': 'a digital camera designed to take digital photographs and transmit them over the internet', 'name': 'webcam'}, {'frequency': 'c', 'id': 1198, 'synset': 'wedding_cake.n.01', 'synonyms': ['wedding_cake', 'bridecake'], 'def': 'a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception', 'name': 'wedding_cake'}, {'frequency': 'c', 'id': 1199, 'synset': 'wedding_ring.n.01', 'synonyms': ['wedding_ring', 'wedding_band'], 'def': 'a ring given to the bride and/or groom at the wedding', 'name': 'wedding_ring'}, {'frequency': 'f', 'id': 1200, 'synset': 'wet_suit.n.01', 'synonyms': ['wet_suit'], 'def': 'a close-fitting garment made of a permeable material; worn in cold water to retain body heat', 'name': 'wet_suit'}, {'frequency': 'f', 'id': 1201, 'synset': 'wheel.n.01', 'synonyms': ['wheel'], 'def': 'a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle', 'name': 'wheel'}, {'frequency': 'c', 'id': 1202, 'synset': 'wheelchair.n.01', 'synonyms': ['wheelchair'], 'def': 'a movable chair mounted on large wheels', 'name': 'wheelchair'}, {'frequency': 'c', 'id': 1203, 'synset': 'whipped_cream.n.01', 'synonyms': ['whipped_cream'], 'def': 'cream that has been beaten until light and fluffy', 'name': 'whipped_cream'}, {'frequency': 'r', 'id': 1204, 'synset': 'whiskey.n.01', 'synonyms': ['whiskey'], 'def': 'a liquor made from fermented mash of grain', 'name': 'whiskey'}, {'frequency': 'r', 'id': 1205, 'synset': 'whistle.n.03', 'synonyms': ['whistle'], 'def': 'a small wind instrument that produces a whistling sound by blowing into it', 'name': 'whistle'}, {'frequency': 'r', 'id': 1206, 'synset': 'wick.n.02', 'synonyms': ['wick'], 'def': 'a loosely woven cord in a candle or oil lamp that is lit on fire', 'name': 'wick'}, {'frequency': 'c', 'id': 1207, 'synset': 'wig.n.01', 'synonyms': ['wig'], 'def': 'hairpiece covering the head and made of real or synthetic hair', 'name': 'wig'}, {'frequency': 'c', 'id': 1208, 'synset': 'wind_chime.n.01', 'synonyms': ['wind_chime'], 'def': 'a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle', 'name': 'wind_chime'}, {'frequency': 'c', 'id': 1209, 'synset': 'windmill.n.01', 'synonyms': ['windmill'], 'def': 'a mill that is powered by the wind', 'name': 'windmill'}, {'frequency': 'c', 'id': 1210, 'synset': 'window_box.n.01', 'synonyms': ['window_box_(for_plants)'], 'def': 'a container for growing plants on a windowsill', 'name': 'window_box_(for_plants)'}, {'frequency': 'f', 'id': 1211, 'synset': 'windshield_wiper.n.01', 'synonyms': ['windshield_wiper', 'windscreen_wiper', 'wiper_(for_windshield/screen)'], 'def': 'a mechanical device that cleans the windshield', 'name': 'windshield_wiper'}, {'frequency': 'c', 'id': 1212, 'synset': 'windsock.n.01', 'synonyms': ['windsock', 'air_sock', 'air-sleeve', 'wind_sleeve', 'wind_cone'], 'def': 'a truncated cloth cone mounted on a mast/pole; shows wind direction', 'name': 'windsock'}, {'frequency': 'f', 'id': 1213, 'synset': 'wine_bottle.n.01', 'synonyms': ['wine_bottle'], 'def': 'a bottle for holding wine', 'name': 'wine_bottle'}, {'frequency': 'r', 'id': 1214, 'synset': 'wine_bucket.n.01', 'synonyms': ['wine_bucket', 'wine_cooler'], 'def': 'a bucket of ice used to chill a bottle of wine', 'name': 'wine_bucket'}, {'frequency': 'f', 'id': 1215, 'synset': 'wineglass.n.01', 'synonyms': ['wineglass'], 'def': 'a glass that has a stem and in which wine is served', 'name': 'wineglass'}, {'frequency': 'r', 'id': 1216, 'synset': 'wing_chair.n.01', 'synonyms': ['wing_chair'], 'def': 'easy chair having wings on each side of a high back', 'name': 'wing_chair'}, {'frequency': 'c', 'id': 1217, 'synset': 'winker.n.02', 'synonyms': ['blinder_(for_horses)'], 'def': 'blinds that prevent a horse from seeing something on either side', 'name': 'blinder_(for_horses)'}, {'frequency': 'c', 'id': 1218, 'synset': 'wok.n.01', 'synonyms': ['wok'], 'def': 'pan with a convex bottom; used for frying in Chinese cooking', 'name': 'wok'}, {'frequency': 'r', 'id': 1219, 'synset': 'wolf.n.01', 'synonyms': ['wolf'], 'def': 'a wild carnivorous mammal of the dog family, living and hunting in packs', 'name': 'wolf'}, {'frequency': 'c', 'id': 1220, 'synset': 'wooden_spoon.n.02', 'synonyms': ['wooden_spoon'], 'def': 'a spoon made of wood', 'name': 'wooden_spoon'}, {'frequency': 'c', 'id': 1221, 'synset': 'wreath.n.01', 'synonyms': ['wreath'], 'def': 'an arrangement of flowers, leaves, or stems fastened in a ring', 'name': 'wreath'}, {'frequency': 'c', 'id': 1222, 'synset': 'wrench.n.03', 'synonyms': ['wrench', 'spanner'], 'def': 'a hand tool that is used to hold or twist a nut or bolt', 'name': 'wrench'}, {'frequency': 'c', 'id': 1223, 'synset': 'wristband.n.01', 'synonyms': ['wristband'], 'def': 'band consisting of a part of a sleeve that covers the wrist', 'name': 'wristband'}, {'frequency': 'f', 'id': 1224, 'synset': 'wristlet.n.01', 'synonyms': ['wristlet', 'wrist_band'], 'def': 'a band or bracelet worn around the wrist', 'name': 'wristlet'}, {'frequency': 'r', 'id': 1225, 'synset': 'yacht.n.01', 'synonyms': ['yacht'], 'def': 'an expensive vessel propelled by sail or power and used for cruising or racing', 'name': 'yacht'}, {'frequency': 'r', 'id': 1226, 'synset': 'yak.n.02', 'synonyms': ['yak'], 'def': 'large long-haired wild ox of Tibet often domesticated', 'name': 'yak'}, {'frequency': 'c', 'id': 1227, 'synset': 'yogurt.n.01', 'synonyms': ['yogurt', 'yoghurt', 'yoghourt'], 'def': 'a custard-like food made from curdled milk', 'name': 'yogurt'}, {'frequency': 'r', 'id': 1228, 'synset': 'yoke.n.07', 'synonyms': ['yoke_(animal_equipment)'], 'def': 'gear joining two animals at the neck; NOT egg yolk', 'name': 'yoke_(animal_equipment)'}, {'frequency': 'f', 'id': 1229, 'synset': 'zebra.n.01', 'synonyms': ['zebra'], 'def': 'any of several fleet black-and-white striped African equines', 'name': 'zebra'}, {'frequency': 'c', 'id': 1230, 'synset': 'zucchini.n.02', 'synonyms': ['zucchini', 'courgette'], 'def': 'small cucumber-shaped vegetable marrow; typically dark green', 'name': 'zucchini'}] # noqa
+# fmt: on
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis_v1_categories.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis_v1_categories.py
new file mode 100644
index 0000000000000000000000000000000000000000..7374e6968bb006f5d8c49e75d9d3b31ea3d77d05
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis_v1_categories.py
@@ -0,0 +1,16 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# Autogen with
+# with open("lvis_v1_val.json", "r") as f:
+# a = json.load(f)
+# c = a["categories"]
+# for x in c:
+# del x["image_count"]
+# del x["instance_count"]
+# LVIS_CATEGORIES = repr(c) + " # noqa"
+# with open("/tmp/lvis_categories.py", "wt") as f:
+# f.write(f"LVIS_CATEGORIES = {LVIS_CATEGORIES}")
+# Then paste the contents of that file below
+
+# fmt: off
+LVIS_CATEGORIES = [{'frequency': 'c', 'synset': 'aerosol.n.02', 'synonyms': ['aerosol_can', 'spray_can'], 'id': 1, 'def': 'a dispenser that holds a substance under pressure', 'name': 'aerosol_can'}, {'frequency': 'f', 'synset': 'air_conditioner.n.01', 'synonyms': ['air_conditioner'], 'id': 2, 'def': 'a machine that keeps air cool and dry', 'name': 'air_conditioner'}, {'frequency': 'f', 'synset': 'airplane.n.01', 'synonyms': ['airplane', 'aeroplane'], 'id': 3, 'def': 'an aircraft that has a fixed wing and is powered by propellers or jets', 'name': 'airplane'}, {'frequency': 'f', 'synset': 'alarm_clock.n.01', 'synonyms': ['alarm_clock'], 'id': 4, 'def': 'a clock that wakes a sleeper at some preset time', 'name': 'alarm_clock'}, {'frequency': 'c', 'synset': 'alcohol.n.01', 'synonyms': ['alcohol', 'alcoholic_beverage'], 'id': 5, 'def': 'a liquor or brew containing alcohol as the active agent', 'name': 'alcohol'}, {'frequency': 'c', 'synset': 'alligator.n.02', 'synonyms': ['alligator', 'gator'], 'id': 6, 'def': 'amphibious reptiles related to crocodiles but with shorter broader snouts', 'name': 'alligator'}, {'frequency': 'c', 'synset': 'almond.n.02', 'synonyms': ['almond'], 'id': 7, 'def': 'oval-shaped edible seed of the almond tree', 'name': 'almond'}, {'frequency': 'c', 'synset': 'ambulance.n.01', 'synonyms': ['ambulance'], 'id': 8, 'def': 'a vehicle that takes people to and from hospitals', 'name': 'ambulance'}, {'frequency': 'c', 'synset': 'amplifier.n.01', 'synonyms': ['amplifier'], 'id': 9, 'def': 'electronic equipment that increases strength of signals', 'name': 'amplifier'}, {'frequency': 'c', 'synset': 'anklet.n.03', 'synonyms': ['anklet', 'ankle_bracelet'], 'id': 10, 'def': 'an ornament worn around the ankle', 'name': 'anklet'}, {'frequency': 'f', 'synset': 'antenna.n.01', 'synonyms': ['antenna', 'aerial', 'transmitting_aerial'], 'id': 11, 'def': 'an electrical device that sends or receives radio or television signals', 'name': 'antenna'}, {'frequency': 'f', 'synset': 'apple.n.01', 'synonyms': ['apple'], 'id': 12, 'def': 'fruit with red or yellow or green skin and sweet to tart crisp whitish flesh', 'name': 'apple'}, {'frequency': 'r', 'synset': 'applesauce.n.01', 'synonyms': ['applesauce'], 'id': 13, 'def': 'puree of stewed apples usually sweetened and spiced', 'name': 'applesauce'}, {'frequency': 'r', 'synset': 'apricot.n.02', 'synonyms': ['apricot'], 'id': 14, 'def': 'downy yellow to rosy-colored fruit resembling a small peach', 'name': 'apricot'}, {'frequency': 'f', 'synset': 'apron.n.01', 'synonyms': ['apron'], 'id': 15, 'def': 'a garment of cloth that is tied about the waist and worn to protect clothing', 'name': 'apron'}, {'frequency': 'c', 'synset': 'aquarium.n.01', 'synonyms': ['aquarium', 'fish_tank'], 'id': 16, 'def': 'a tank/pool/bowl filled with water for keeping live fish and underwater animals', 'name': 'aquarium'}, {'frequency': 'r', 'synset': 'arctic.n.02', 'synonyms': ['arctic_(type_of_shoe)', 'galosh', 'golosh', 'rubber_(type_of_shoe)', 'gumshoe'], 'id': 17, 'def': 'a waterproof overshoe that protects shoes from water or snow', 'name': 'arctic_(type_of_shoe)'}, {'frequency': 'c', 'synset': 'armband.n.02', 'synonyms': ['armband'], 'id': 18, 'def': 'a band worn around the upper arm', 'name': 'armband'}, {'frequency': 'f', 'synset': 'armchair.n.01', 'synonyms': ['armchair'], 'id': 19, 'def': 'chair with a support on each side for arms', 'name': 'armchair'}, {'frequency': 'r', 'synset': 'armoire.n.01', 'synonyms': ['armoire'], 'id': 20, 'def': 'a large wardrobe or cabinet', 'name': 'armoire'}, {'frequency': 'r', 'synset': 'armor.n.01', 'synonyms': ['armor', 'armour'], 'id': 21, 'def': 'protective covering made of metal and used in combat', 'name': 'armor'}, {'frequency': 'c', 'synset': 'artichoke.n.02', 'synonyms': ['artichoke'], 'id': 22, 'def': 'a thistlelike flower head with edible fleshy leaves and heart', 'name': 'artichoke'}, {'frequency': 'f', 'synset': 'ashcan.n.01', 'synonyms': ['trash_can', 'garbage_can', 'wastebin', 'dustbin', 'trash_barrel', 'trash_bin'], 'id': 23, 'def': 'a bin that holds rubbish until it is collected', 'name': 'trash_can'}, {'frequency': 'c', 'synset': 'ashtray.n.01', 'synonyms': ['ashtray'], 'id': 24, 'def': "a receptacle for the ash from smokers' cigars or cigarettes", 'name': 'ashtray'}, {'frequency': 'c', 'synset': 'asparagus.n.02', 'synonyms': ['asparagus'], 'id': 25, 'def': 'edible young shoots of the asparagus plant', 'name': 'asparagus'}, {'frequency': 'c', 'synset': 'atomizer.n.01', 'synonyms': ['atomizer', 'atomiser', 'spray', 'sprayer', 'nebulizer', 'nebuliser'], 'id': 26, 'def': 'a dispenser that turns a liquid (such as perfume) into a fine mist', 'name': 'atomizer'}, {'frequency': 'f', 'synset': 'avocado.n.01', 'synonyms': ['avocado'], 'id': 27, 'def': 'a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed', 'name': 'avocado'}, {'frequency': 'c', 'synset': 'award.n.02', 'synonyms': ['award', 'accolade'], 'id': 28, 'def': 'a tangible symbol signifying approval or distinction', 'name': 'award'}, {'frequency': 'f', 'synset': 'awning.n.01', 'synonyms': ['awning'], 'id': 29, 'def': 'a canopy made of canvas to shelter people or things from rain or sun', 'name': 'awning'}, {'frequency': 'r', 'synset': 'ax.n.01', 'synonyms': ['ax', 'axe'], 'id': 30, 'def': 'an edge tool with a heavy bladed head mounted across a handle', 'name': 'ax'}, {'frequency': 'r', 'synset': 'baboon.n.01', 'synonyms': ['baboon'], 'id': 31, 'def': 'large terrestrial monkeys having doglike muzzles', 'name': 'baboon'}, {'frequency': 'f', 'synset': 'baby_buggy.n.01', 'synonyms': ['baby_buggy', 'baby_carriage', 'perambulator', 'pram', 'stroller'], 'id': 32, 'def': 'a small vehicle with four wheels in which a baby or child is pushed around', 'name': 'baby_buggy'}, {'frequency': 'c', 'synset': 'backboard.n.01', 'synonyms': ['basketball_backboard'], 'id': 33, 'def': 'a raised vertical board with basket attached; used to play basketball', 'name': 'basketball_backboard'}, {'frequency': 'f', 'synset': 'backpack.n.01', 'synonyms': ['backpack', 'knapsack', 'packsack', 'rucksack', 'haversack'], 'id': 34, 'def': 'a bag carried by a strap on your back or shoulder', 'name': 'backpack'}, {'frequency': 'f', 'synset': 'bag.n.04', 'synonyms': ['handbag', 'purse', 'pocketbook'], 'id': 35, 'def': 'a container used for carrying money and small personal items or accessories', 'name': 'handbag'}, {'frequency': 'f', 'synset': 'bag.n.06', 'synonyms': ['suitcase', 'baggage', 'luggage'], 'id': 36, 'def': 'cases used to carry belongings when traveling', 'name': 'suitcase'}, {'frequency': 'c', 'synset': 'bagel.n.01', 'synonyms': ['bagel', 'beigel'], 'id': 37, 'def': 'glazed yeast-raised doughnut-shaped roll with hard crust', 'name': 'bagel'}, {'frequency': 'r', 'synset': 'bagpipe.n.01', 'synonyms': ['bagpipe'], 'id': 38, 'def': 'a tubular wind instrument; the player blows air into a bag and squeezes it out', 'name': 'bagpipe'}, {'frequency': 'r', 'synset': 'baguet.n.01', 'synonyms': ['baguet', 'baguette'], 'id': 39, 'def': 'narrow French stick loaf', 'name': 'baguet'}, {'frequency': 'r', 'synset': 'bait.n.02', 'synonyms': ['bait', 'lure'], 'id': 40, 'def': 'something used to lure fish or other animals into danger so they can be trapped or killed', 'name': 'bait'}, {'frequency': 'f', 'synset': 'ball.n.06', 'synonyms': ['ball'], 'id': 41, 'def': 'a spherical object used as a plaything', 'name': 'ball'}, {'frequency': 'r', 'synset': 'ballet_skirt.n.01', 'synonyms': ['ballet_skirt', 'tutu'], 'id': 42, 'def': 'very short skirt worn by ballerinas', 'name': 'ballet_skirt'}, {'frequency': 'f', 'synset': 'balloon.n.01', 'synonyms': ['balloon'], 'id': 43, 'def': 'large tough nonrigid bag filled with gas or heated air', 'name': 'balloon'}, {'frequency': 'c', 'synset': 'bamboo.n.02', 'synonyms': ['bamboo'], 'id': 44, 'def': 'woody tropical grass having hollow woody stems', 'name': 'bamboo'}, {'frequency': 'f', 'synset': 'banana.n.02', 'synonyms': ['banana'], 'id': 45, 'def': 'elongated crescent-shaped yellow fruit with soft sweet flesh', 'name': 'banana'}, {'frequency': 'c', 'synset': 'band_aid.n.01', 'synonyms': ['Band_Aid'], 'id': 46, 'def': 'trade name for an adhesive bandage to cover small cuts or blisters', 'name': 'Band_Aid'}, {'frequency': 'c', 'synset': 'bandage.n.01', 'synonyms': ['bandage'], 'id': 47, 'def': 'a piece of soft material that covers and protects an injured part of the body', 'name': 'bandage'}, {'frequency': 'f', 'synset': 'bandanna.n.01', 'synonyms': ['bandanna', 'bandana'], 'id': 48, 'def': 'large and brightly colored handkerchief; often used as a neckerchief', 'name': 'bandanna'}, {'frequency': 'r', 'synset': 'banjo.n.01', 'synonyms': ['banjo'], 'id': 49, 'def': 'a stringed instrument of the guitar family with a long neck and circular body', 'name': 'banjo'}, {'frequency': 'f', 'synset': 'banner.n.01', 'synonyms': ['banner', 'streamer'], 'id': 50, 'def': 'long strip of cloth or paper used for decoration or advertising', 'name': 'banner'}, {'frequency': 'r', 'synset': 'barbell.n.01', 'synonyms': ['barbell'], 'id': 51, 'def': 'a bar to which heavy discs are attached at each end; used in weightlifting', 'name': 'barbell'}, {'frequency': 'r', 'synset': 'barge.n.01', 'synonyms': ['barge'], 'id': 52, 'def': 'a flatbottom boat for carrying heavy loads (especially on canals)', 'name': 'barge'}, {'frequency': 'f', 'synset': 'barrel.n.02', 'synonyms': ['barrel', 'cask'], 'id': 53, 'def': 'a cylindrical container that holds liquids', 'name': 'barrel'}, {'frequency': 'c', 'synset': 'barrette.n.01', 'synonyms': ['barrette'], 'id': 54, 'def': "a pin for holding women's hair in place", 'name': 'barrette'}, {'frequency': 'c', 'synset': 'barrow.n.03', 'synonyms': ['barrow', 'garden_cart', 'lawn_cart', 'wheelbarrow'], 'id': 55, 'def': 'a cart for carrying small loads; has handles and one or more wheels', 'name': 'barrow'}, {'frequency': 'f', 'synset': 'base.n.03', 'synonyms': ['baseball_base'], 'id': 56, 'def': 'a place that the runner must touch before scoring', 'name': 'baseball_base'}, {'frequency': 'f', 'synset': 'baseball.n.02', 'synonyms': ['baseball'], 'id': 57, 'def': 'a ball used in playing baseball', 'name': 'baseball'}, {'frequency': 'f', 'synset': 'baseball_bat.n.01', 'synonyms': ['baseball_bat'], 'id': 58, 'def': 'an implement used in baseball by the batter', 'name': 'baseball_bat'}, {'frequency': 'f', 'synset': 'baseball_cap.n.01', 'synonyms': ['baseball_cap', 'jockey_cap', 'golf_cap'], 'id': 59, 'def': 'a cap with a bill', 'name': 'baseball_cap'}, {'frequency': 'f', 'synset': 'baseball_glove.n.01', 'synonyms': ['baseball_glove', 'baseball_mitt'], 'id': 60, 'def': 'the handwear used by fielders in playing baseball', 'name': 'baseball_glove'}, {'frequency': 'f', 'synset': 'basket.n.01', 'synonyms': ['basket', 'handbasket'], 'id': 61, 'def': 'a container that is usually woven and has handles', 'name': 'basket'}, {'frequency': 'c', 'synset': 'basketball.n.02', 'synonyms': ['basketball'], 'id': 62, 'def': 'an inflated ball used in playing basketball', 'name': 'basketball'}, {'frequency': 'r', 'synset': 'bass_horn.n.01', 'synonyms': ['bass_horn', 'sousaphone', 'tuba'], 'id': 63, 'def': 'the lowest brass wind instrument', 'name': 'bass_horn'}, {'frequency': 'c', 'synset': 'bat.n.01', 'synonyms': ['bat_(animal)'], 'id': 64, 'def': 'nocturnal mouselike mammal with forelimbs modified to form membranous wings', 'name': 'bat_(animal)'}, {'frequency': 'f', 'synset': 'bath_mat.n.01', 'synonyms': ['bath_mat'], 'id': 65, 'def': 'a heavy towel or mat to stand on while drying yourself after a bath', 'name': 'bath_mat'}, {'frequency': 'f', 'synset': 'bath_towel.n.01', 'synonyms': ['bath_towel'], 'id': 66, 'def': 'a large towel; to dry yourself after a bath', 'name': 'bath_towel'}, {'frequency': 'c', 'synset': 'bathrobe.n.01', 'synonyms': ['bathrobe'], 'id': 67, 'def': 'a loose-fitting robe of towelling; worn after a bath or swim', 'name': 'bathrobe'}, {'frequency': 'f', 'synset': 'bathtub.n.01', 'synonyms': ['bathtub', 'bathing_tub'], 'id': 68, 'def': 'a large open container that you fill with water and use to wash the body', 'name': 'bathtub'}, {'frequency': 'r', 'synset': 'batter.n.02', 'synonyms': ['batter_(food)'], 'id': 69, 'def': 'a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking', 'name': 'batter_(food)'}, {'frequency': 'c', 'synset': 'battery.n.02', 'synonyms': ['battery'], 'id': 70, 'def': 'a portable device that produces electricity', 'name': 'battery'}, {'frequency': 'r', 'synset': 'beach_ball.n.01', 'synonyms': ['beachball'], 'id': 71, 'def': 'large and light ball; for play at the seaside', 'name': 'beachball'}, {'frequency': 'c', 'synset': 'bead.n.01', 'synonyms': ['bead'], 'id': 72, 'def': 'a small ball with a hole through the middle used for ornamentation, jewellery, etc.', 'name': 'bead'}, {'frequency': 'c', 'synset': 'bean_curd.n.01', 'synonyms': ['bean_curd', 'tofu'], 'id': 73, 'def': 'cheeselike food made of curdled soybean milk', 'name': 'bean_curd'}, {'frequency': 'c', 'synset': 'beanbag.n.01', 'synonyms': ['beanbag'], 'id': 74, 'def': 'a bag filled with dried beans or similar items; used in games or to sit on', 'name': 'beanbag'}, {'frequency': 'f', 'synset': 'beanie.n.01', 'synonyms': ['beanie', 'beany'], 'id': 75, 'def': 'a small skullcap; formerly worn by schoolboys and college freshmen', 'name': 'beanie'}, {'frequency': 'f', 'synset': 'bear.n.01', 'synonyms': ['bear'], 'id': 76, 'def': 'large carnivorous or omnivorous mammals with shaggy coats and claws', 'name': 'bear'}, {'frequency': 'f', 'synset': 'bed.n.01', 'synonyms': ['bed'], 'id': 77, 'def': 'a piece of furniture that provides a place to sleep', 'name': 'bed'}, {'frequency': 'r', 'synset': 'bedpan.n.01', 'synonyms': ['bedpan'], 'id': 78, 'def': 'a shallow vessel used by a bedridden patient for defecation and urination', 'name': 'bedpan'}, {'frequency': 'f', 'synset': 'bedspread.n.01', 'synonyms': ['bedspread', 'bedcover', 'bed_covering', 'counterpane', 'spread'], 'id': 79, 'def': 'decorative cover for a bed', 'name': 'bedspread'}, {'frequency': 'f', 'synset': 'beef.n.01', 'synonyms': ['cow'], 'id': 80, 'def': 'cattle/cow', 'name': 'cow'}, {'frequency': 'f', 'synset': 'beef.n.02', 'synonyms': ['beef_(food)', 'boeuf_(food)'], 'id': 81, 'def': 'meat from an adult domestic bovine', 'name': 'beef_(food)'}, {'frequency': 'r', 'synset': 'beeper.n.01', 'synonyms': ['beeper', 'pager'], 'id': 82, 'def': 'an device that beeps when the person carrying it is being paged', 'name': 'beeper'}, {'frequency': 'f', 'synset': 'beer_bottle.n.01', 'synonyms': ['beer_bottle'], 'id': 83, 'def': 'a bottle that holds beer', 'name': 'beer_bottle'}, {'frequency': 'c', 'synset': 'beer_can.n.01', 'synonyms': ['beer_can'], 'id': 84, 'def': 'a can that holds beer', 'name': 'beer_can'}, {'frequency': 'r', 'synset': 'beetle.n.01', 'synonyms': ['beetle'], 'id': 85, 'def': 'insect with hard wing covers', 'name': 'beetle'}, {'frequency': 'f', 'synset': 'bell.n.01', 'synonyms': ['bell'], 'id': 86, 'def': 'a hollow device made of metal that makes a ringing sound when struck', 'name': 'bell'}, {'frequency': 'f', 'synset': 'bell_pepper.n.02', 'synonyms': ['bell_pepper', 'capsicum'], 'id': 87, 'def': 'large bell-shaped sweet pepper in green or red or yellow or orange or black varieties', 'name': 'bell_pepper'}, {'frequency': 'f', 'synset': 'belt.n.02', 'synonyms': ['belt'], 'id': 88, 'def': 'a band to tie or buckle around the body (usually at the waist)', 'name': 'belt'}, {'frequency': 'f', 'synset': 'belt_buckle.n.01', 'synonyms': ['belt_buckle'], 'id': 89, 'def': 'the buckle used to fasten a belt', 'name': 'belt_buckle'}, {'frequency': 'f', 'synset': 'bench.n.01', 'synonyms': ['bench'], 'id': 90, 'def': 'a long seat for more than one person', 'name': 'bench'}, {'frequency': 'c', 'synset': 'beret.n.01', 'synonyms': ['beret'], 'id': 91, 'def': 'a cap with no brim or bill; made of soft cloth', 'name': 'beret'}, {'frequency': 'c', 'synset': 'bib.n.02', 'synonyms': ['bib'], 'id': 92, 'def': 'a napkin tied under the chin of a child while eating', 'name': 'bib'}, {'frequency': 'r', 'synset': 'bible.n.01', 'synonyms': ['Bible'], 'id': 93, 'def': 'the sacred writings of the Christian religions', 'name': 'Bible'}, {'frequency': 'f', 'synset': 'bicycle.n.01', 'synonyms': ['bicycle', 'bike_(bicycle)'], 'id': 94, 'def': 'a wheeled vehicle that has two wheels and is moved by foot pedals', 'name': 'bicycle'}, {'frequency': 'f', 'synset': 'bill.n.09', 'synonyms': ['visor', 'vizor'], 'id': 95, 'def': 'a brim that projects to the front to shade the eyes', 'name': 'visor'}, {'frequency': 'f', 'synset': 'billboard.n.01', 'synonyms': ['billboard'], 'id': 96, 'def': 'large outdoor signboard', 'name': 'billboard'}, {'frequency': 'c', 'synset': 'binder.n.03', 'synonyms': ['binder', 'ring-binder'], 'id': 97, 'def': 'holds loose papers or magazines', 'name': 'binder'}, {'frequency': 'c', 'synset': 'binoculars.n.01', 'synonyms': ['binoculars', 'field_glasses', 'opera_glasses'], 'id': 98, 'def': 'an optical instrument designed for simultaneous use by both eyes', 'name': 'binoculars'}, {'frequency': 'f', 'synset': 'bird.n.01', 'synonyms': ['bird'], 'id': 99, 'def': 'animal characterized by feathers and wings', 'name': 'bird'}, {'frequency': 'c', 'synset': 'bird_feeder.n.01', 'synonyms': ['birdfeeder'], 'id': 100, 'def': 'an outdoor device that supplies food for wild birds', 'name': 'birdfeeder'}, {'frequency': 'c', 'synset': 'birdbath.n.01', 'synonyms': ['birdbath'], 'id': 101, 'def': 'an ornamental basin (usually in a garden) for birds to bathe in', 'name': 'birdbath'}, {'frequency': 'c', 'synset': 'birdcage.n.01', 'synonyms': ['birdcage'], 'id': 102, 'def': 'a cage in which a bird can be kept', 'name': 'birdcage'}, {'frequency': 'c', 'synset': 'birdhouse.n.01', 'synonyms': ['birdhouse'], 'id': 103, 'def': 'a shelter for birds', 'name': 'birdhouse'}, {'frequency': 'f', 'synset': 'birthday_cake.n.01', 'synonyms': ['birthday_cake'], 'id': 104, 'def': 'decorated cake served at a birthday party', 'name': 'birthday_cake'}, {'frequency': 'r', 'synset': 'birthday_card.n.01', 'synonyms': ['birthday_card'], 'id': 105, 'def': 'a card expressing a birthday greeting', 'name': 'birthday_card'}, {'frequency': 'r', 'synset': 'black_flag.n.01', 'synonyms': ['pirate_flag'], 'id': 106, 'def': 'a flag usually bearing a white skull and crossbones on a black background', 'name': 'pirate_flag'}, {'frequency': 'c', 'synset': 'black_sheep.n.02', 'synonyms': ['black_sheep'], 'id': 107, 'def': 'sheep with a black coat', 'name': 'black_sheep'}, {'frequency': 'c', 'synset': 'blackberry.n.01', 'synonyms': ['blackberry'], 'id': 108, 'def': 'large sweet black or very dark purple edible aggregate fruit', 'name': 'blackberry'}, {'frequency': 'f', 'synset': 'blackboard.n.01', 'synonyms': ['blackboard', 'chalkboard'], 'id': 109, 'def': 'sheet of slate; for writing with chalk', 'name': 'blackboard'}, {'frequency': 'f', 'synset': 'blanket.n.01', 'synonyms': ['blanket'], 'id': 110, 'def': 'bedding that keeps a person warm in bed', 'name': 'blanket'}, {'frequency': 'c', 'synset': 'blazer.n.01', 'synonyms': ['blazer', 'sport_jacket', 'sport_coat', 'sports_jacket', 'sports_coat'], 'id': 111, 'def': 'lightweight jacket; often striped in the colors of a club or school', 'name': 'blazer'}, {'frequency': 'f', 'synset': 'blender.n.01', 'synonyms': ['blender', 'liquidizer', 'liquidiser'], 'id': 112, 'def': 'an electrically powered mixer that mix or chop or liquefy foods', 'name': 'blender'}, {'frequency': 'r', 'synset': 'blimp.n.02', 'synonyms': ['blimp'], 'id': 113, 'def': 'a small nonrigid airship used for observation or as a barrage balloon', 'name': 'blimp'}, {'frequency': 'f', 'synset': 'blinker.n.01', 'synonyms': ['blinker', 'flasher'], 'id': 114, 'def': 'a light that flashes on and off; used as a signal or to send messages', 'name': 'blinker'}, {'frequency': 'f', 'synset': 'blouse.n.01', 'synonyms': ['blouse'], 'id': 115, 'def': 'a top worn by women', 'name': 'blouse'}, {'frequency': 'f', 'synset': 'blueberry.n.02', 'synonyms': ['blueberry'], 'id': 116, 'def': 'sweet edible dark-blue berries of blueberry plants', 'name': 'blueberry'}, {'frequency': 'r', 'synset': 'board.n.09', 'synonyms': ['gameboard'], 'id': 117, 'def': 'a flat portable surface (usually rectangular) designed for board games', 'name': 'gameboard'}, {'frequency': 'f', 'synset': 'boat.n.01', 'synonyms': ['boat', 'ship_(boat)'], 'id': 118, 'def': 'a vessel for travel on water', 'name': 'boat'}, {'frequency': 'r', 'synset': 'bob.n.05', 'synonyms': ['bob', 'bobber', 'bobfloat'], 'id': 119, 'def': 'a small float usually made of cork; attached to a fishing line', 'name': 'bob'}, {'frequency': 'c', 'synset': 'bobbin.n.01', 'synonyms': ['bobbin', 'spool', 'reel'], 'id': 120, 'def': 'a thing around which thread/tape/film or other flexible materials can be wound', 'name': 'bobbin'}, {'frequency': 'c', 'synset': 'bobby_pin.n.01', 'synonyms': ['bobby_pin', 'hairgrip'], 'id': 121, 'def': 'a flat wire hairpin used to hold bobbed hair in place', 'name': 'bobby_pin'}, {'frequency': 'c', 'synset': 'boiled_egg.n.01', 'synonyms': ['boiled_egg', 'coddled_egg'], 'id': 122, 'def': 'egg cooked briefly in the shell in gently boiling water', 'name': 'boiled_egg'}, {'frequency': 'r', 'synset': 'bolo_tie.n.01', 'synonyms': ['bolo_tie', 'bolo', 'bola_tie', 'bola'], 'id': 123, 'def': 'a cord fastened around the neck with an ornamental clasp and worn as a necktie', 'name': 'bolo_tie'}, {'frequency': 'c', 'synset': 'bolt.n.03', 'synonyms': ['deadbolt'], 'id': 124, 'def': 'the part of a lock that is engaged or withdrawn with a key', 'name': 'deadbolt'}, {'frequency': 'f', 'synset': 'bolt.n.06', 'synonyms': ['bolt'], 'id': 125, 'def': 'a screw that screws into a nut to form a fastener', 'name': 'bolt'}, {'frequency': 'r', 'synset': 'bonnet.n.01', 'synonyms': ['bonnet'], 'id': 126, 'def': 'a hat tied under the chin', 'name': 'bonnet'}, {'frequency': 'f', 'synset': 'book.n.01', 'synonyms': ['book'], 'id': 127, 'def': 'a written work or composition that has been published', 'name': 'book'}, {'frequency': 'c', 'synset': 'bookcase.n.01', 'synonyms': ['bookcase'], 'id': 128, 'def': 'a piece of furniture with shelves for storing books', 'name': 'bookcase'}, {'frequency': 'c', 'synset': 'booklet.n.01', 'synonyms': ['booklet', 'brochure', 'leaflet', 'pamphlet'], 'id': 129, 'def': 'a small book usually having a paper cover', 'name': 'booklet'}, {'frequency': 'r', 'synset': 'bookmark.n.01', 'synonyms': ['bookmark', 'bookmarker'], 'id': 130, 'def': 'a marker (a piece of paper or ribbon) placed between the pages of a book', 'name': 'bookmark'}, {'frequency': 'r', 'synset': 'boom.n.04', 'synonyms': ['boom_microphone', 'microphone_boom'], 'id': 131, 'def': 'a pole carrying an overhead microphone projected over a film or tv set', 'name': 'boom_microphone'}, {'frequency': 'f', 'synset': 'boot.n.01', 'synonyms': ['boot'], 'id': 132, 'def': 'footwear that covers the whole foot and lower leg', 'name': 'boot'}, {'frequency': 'f', 'synset': 'bottle.n.01', 'synonyms': ['bottle'], 'id': 133, 'def': 'a glass or plastic vessel used for storing drinks or other liquids', 'name': 'bottle'}, {'frequency': 'c', 'synset': 'bottle_opener.n.01', 'synonyms': ['bottle_opener'], 'id': 134, 'def': 'an opener for removing caps or corks from bottles', 'name': 'bottle_opener'}, {'frequency': 'c', 'synset': 'bouquet.n.01', 'synonyms': ['bouquet'], 'id': 135, 'def': 'an arrangement of flowers that is usually given as a present', 'name': 'bouquet'}, {'frequency': 'r', 'synset': 'bow.n.04', 'synonyms': ['bow_(weapon)'], 'id': 136, 'def': 'a weapon for shooting arrows', 'name': 'bow_(weapon)'}, {'frequency': 'f', 'synset': 'bow.n.08', 'synonyms': ['bow_(decorative_ribbons)'], 'id': 137, 'def': 'a decorative interlacing of ribbons', 'name': 'bow_(decorative_ribbons)'}, {'frequency': 'f', 'synset': 'bow_tie.n.01', 'synonyms': ['bow-tie', 'bowtie'], 'id': 138, 'def': "a man's tie that ties in a bow", 'name': 'bow-tie'}, {'frequency': 'f', 'synset': 'bowl.n.03', 'synonyms': ['bowl'], 'id': 139, 'def': 'a dish that is round and open at the top for serving foods', 'name': 'bowl'}, {'frequency': 'r', 'synset': 'bowl.n.08', 'synonyms': ['pipe_bowl'], 'id': 140, 'def': 'a small round container that is open at the top for holding tobacco', 'name': 'pipe_bowl'}, {'frequency': 'c', 'synset': 'bowler_hat.n.01', 'synonyms': ['bowler_hat', 'bowler', 'derby_hat', 'derby', 'plug_hat'], 'id': 141, 'def': 'a felt hat that is round and hard with a narrow brim', 'name': 'bowler_hat'}, {'frequency': 'r', 'synset': 'bowling_ball.n.01', 'synonyms': ['bowling_ball'], 'id': 142, 'def': 'a large ball with finger holes used in the sport of bowling', 'name': 'bowling_ball'}, {'frequency': 'f', 'synset': 'box.n.01', 'synonyms': ['box'], 'id': 143, 'def': 'a (usually rectangular) container; may have a lid', 'name': 'box'}, {'frequency': 'r', 'synset': 'boxing_glove.n.01', 'synonyms': ['boxing_glove'], 'id': 144, 'def': 'large glove coverings the fists of a fighter worn for the sport of boxing', 'name': 'boxing_glove'}, {'frequency': 'c', 'synset': 'brace.n.06', 'synonyms': ['suspenders'], 'id': 145, 'def': 'elastic straps that hold trousers up (usually used in the plural)', 'name': 'suspenders'}, {'frequency': 'f', 'synset': 'bracelet.n.02', 'synonyms': ['bracelet', 'bangle'], 'id': 146, 'def': 'jewelry worn around the wrist for decoration', 'name': 'bracelet'}, {'frequency': 'r', 'synset': 'brass.n.07', 'synonyms': ['brass_plaque'], 'id': 147, 'def': 'a memorial made of brass', 'name': 'brass_plaque'}, {'frequency': 'c', 'synset': 'brassiere.n.01', 'synonyms': ['brassiere', 'bra', 'bandeau'], 'id': 148, 'def': 'an undergarment worn by women to support their breasts', 'name': 'brassiere'}, {'frequency': 'c', 'synset': 'bread-bin.n.01', 'synonyms': ['bread-bin', 'breadbox'], 'id': 149, 'def': 'a container used to keep bread or cake in', 'name': 'bread-bin'}, {'frequency': 'f', 'synset': 'bread.n.01', 'synonyms': ['bread'], 'id': 150, 'def': 'food made from dough of flour or meal and usually raised with yeast or baking powder and then baked', 'name': 'bread'}, {'frequency': 'r', 'synset': 'breechcloth.n.01', 'synonyms': ['breechcloth', 'breechclout', 'loincloth'], 'id': 151, 'def': 'a garment that provides covering for the loins', 'name': 'breechcloth'}, {'frequency': 'f', 'synset': 'bridal_gown.n.01', 'synonyms': ['bridal_gown', 'wedding_gown', 'wedding_dress'], 'id': 152, 'def': 'a gown worn by the bride at a wedding', 'name': 'bridal_gown'}, {'frequency': 'c', 'synset': 'briefcase.n.01', 'synonyms': ['briefcase'], 'id': 153, 'def': 'a case with a handle; for carrying papers or files or books', 'name': 'briefcase'}, {'frequency': 'f', 'synset': 'broccoli.n.01', 'synonyms': ['broccoli'], 'id': 154, 'def': 'plant with dense clusters of tight green flower buds', 'name': 'broccoli'}, {'frequency': 'r', 'synset': 'brooch.n.01', 'synonyms': ['broach'], 'id': 155, 'def': 'a decorative pin worn by women', 'name': 'broach'}, {'frequency': 'c', 'synset': 'broom.n.01', 'synonyms': ['broom'], 'id': 156, 'def': 'bundle of straws or twigs attached to a long handle; used for cleaning', 'name': 'broom'}, {'frequency': 'c', 'synset': 'brownie.n.03', 'synonyms': ['brownie'], 'id': 157, 'def': 'square or bar of very rich chocolate cake usually with nuts', 'name': 'brownie'}, {'frequency': 'c', 'synset': 'brussels_sprouts.n.01', 'synonyms': ['brussels_sprouts'], 'id': 158, 'def': 'the small edible cabbage-like buds growing along a stalk', 'name': 'brussels_sprouts'}, {'frequency': 'r', 'synset': 'bubble_gum.n.01', 'synonyms': ['bubble_gum'], 'id': 159, 'def': 'a kind of chewing gum that can be blown into bubbles', 'name': 'bubble_gum'}, {'frequency': 'f', 'synset': 'bucket.n.01', 'synonyms': ['bucket', 'pail'], 'id': 160, 'def': 'a roughly cylindrical vessel that is open at the top', 'name': 'bucket'}, {'frequency': 'r', 'synset': 'buggy.n.01', 'synonyms': ['horse_buggy'], 'id': 161, 'def': 'a small lightweight carriage; drawn by a single horse', 'name': 'horse_buggy'}, {'frequency': 'c', 'synset': 'bull.n.11', 'synonyms': ['horned_cow'], 'id': 162, 'def': 'a cow with horns', 'name': 'bull'}, {'frequency': 'c', 'synset': 'bulldog.n.01', 'synonyms': ['bulldog'], 'id': 163, 'def': 'a thickset short-haired dog with a large head and strong undershot lower jaw', 'name': 'bulldog'}, {'frequency': 'r', 'synset': 'bulldozer.n.01', 'synonyms': ['bulldozer', 'dozer'], 'id': 164, 'def': 'large powerful tractor; a large blade in front flattens areas of ground', 'name': 'bulldozer'}, {'frequency': 'c', 'synset': 'bullet_train.n.01', 'synonyms': ['bullet_train'], 'id': 165, 'def': 'a high-speed passenger train', 'name': 'bullet_train'}, {'frequency': 'c', 'synset': 'bulletin_board.n.02', 'synonyms': ['bulletin_board', 'notice_board'], 'id': 166, 'def': 'a board that hangs on a wall; displays announcements', 'name': 'bulletin_board'}, {'frequency': 'r', 'synset': 'bulletproof_vest.n.01', 'synonyms': ['bulletproof_vest'], 'id': 167, 'def': 'a vest capable of resisting the impact of a bullet', 'name': 'bulletproof_vest'}, {'frequency': 'c', 'synset': 'bullhorn.n.01', 'synonyms': ['bullhorn', 'megaphone'], 'id': 168, 'def': 'a portable loudspeaker with built-in microphone and amplifier', 'name': 'bullhorn'}, {'frequency': 'f', 'synset': 'bun.n.01', 'synonyms': ['bun', 'roll'], 'id': 169, 'def': 'small rounded bread either plain or sweet', 'name': 'bun'}, {'frequency': 'c', 'synset': 'bunk_bed.n.01', 'synonyms': ['bunk_bed'], 'id': 170, 'def': 'beds built one above the other', 'name': 'bunk_bed'}, {'frequency': 'f', 'synset': 'buoy.n.01', 'synonyms': ['buoy'], 'id': 171, 'def': 'a float attached by rope to the seabed to mark channels in a harbor or underwater hazards', 'name': 'buoy'}, {'frequency': 'r', 'synset': 'burrito.n.01', 'synonyms': ['burrito'], 'id': 172, 'def': 'a flour tortilla folded around a filling', 'name': 'burrito'}, {'frequency': 'f', 'synset': 'bus.n.01', 'synonyms': ['bus_(vehicle)', 'autobus', 'charabanc', 'double-decker', 'motorbus', 'motorcoach'], 'id': 173, 'def': 'a vehicle carrying many passengers; used for public transport', 'name': 'bus_(vehicle)'}, {'frequency': 'c', 'synset': 'business_card.n.01', 'synonyms': ['business_card'], 'id': 174, 'def': "a card on which are printed the person's name and business affiliation", 'name': 'business_card'}, {'frequency': 'f', 'synset': 'butter.n.01', 'synonyms': ['butter'], 'id': 175, 'def': 'an edible emulsion of fat globules made by churning milk or cream; for cooking and table use', 'name': 'butter'}, {'frequency': 'c', 'synset': 'butterfly.n.01', 'synonyms': ['butterfly'], 'id': 176, 'def': 'insect typically having a slender body with knobbed antennae and broad colorful wings', 'name': 'butterfly'}, {'frequency': 'f', 'synset': 'button.n.01', 'synonyms': ['button'], 'id': 177, 'def': 'a round fastener sewn to shirts and coats etc to fit through buttonholes', 'name': 'button'}, {'frequency': 'f', 'synset': 'cab.n.03', 'synonyms': ['cab_(taxi)', 'taxi', 'taxicab'], 'id': 178, 'def': 'a car that takes passengers where they want to go in exchange for money', 'name': 'cab_(taxi)'}, {'frequency': 'r', 'synset': 'cabana.n.01', 'synonyms': ['cabana'], 'id': 179, 'def': 'a small tent used as a dressing room beside the sea or a swimming pool', 'name': 'cabana'}, {'frequency': 'c', 'synset': 'cabin_car.n.01', 'synonyms': ['cabin_car', 'caboose'], 'id': 180, 'def': 'a car on a freight train for use of the train crew; usually the last car on the train', 'name': 'cabin_car'}, {'frequency': 'f', 'synset': 'cabinet.n.01', 'synonyms': ['cabinet'], 'id': 181, 'def': 'a piece of furniture resembling a cupboard with doors and shelves and drawers', 'name': 'cabinet'}, {'frequency': 'r', 'synset': 'cabinet.n.03', 'synonyms': ['locker', 'storage_locker'], 'id': 182, 'def': 'a storage compartment for clothes and valuables; usually it has a lock', 'name': 'locker'}, {'frequency': 'f', 'synset': 'cake.n.03', 'synonyms': ['cake'], 'id': 183, 'def': 'baked goods made from or based on a mixture of flour, sugar, eggs, and fat', 'name': 'cake'}, {'frequency': 'c', 'synset': 'calculator.n.02', 'synonyms': ['calculator'], 'id': 184, 'def': 'a small machine that is used for mathematical calculations', 'name': 'calculator'}, {'frequency': 'f', 'synset': 'calendar.n.02', 'synonyms': ['calendar'], 'id': 185, 'def': 'a list or register of events (appointments/social events/court cases, etc)', 'name': 'calendar'}, {'frequency': 'c', 'synset': 'calf.n.01', 'synonyms': ['calf'], 'id': 186, 'def': 'young of domestic cattle', 'name': 'calf'}, {'frequency': 'c', 'synset': 'camcorder.n.01', 'synonyms': ['camcorder'], 'id': 187, 'def': 'a portable television camera and videocassette recorder', 'name': 'camcorder'}, {'frequency': 'c', 'synset': 'camel.n.01', 'synonyms': ['camel'], 'id': 188, 'def': 'cud-chewing mammal used as a draft or saddle animal in desert regions', 'name': 'camel'}, {'frequency': 'f', 'synset': 'camera.n.01', 'synonyms': ['camera'], 'id': 189, 'def': 'equipment for taking photographs', 'name': 'camera'}, {'frequency': 'c', 'synset': 'camera_lens.n.01', 'synonyms': ['camera_lens'], 'id': 190, 'def': 'a lens that focuses the image in a camera', 'name': 'camera_lens'}, {'frequency': 'c', 'synset': 'camper.n.02', 'synonyms': ['camper_(vehicle)', 'camping_bus', 'motor_home'], 'id': 191, 'def': 'a recreational vehicle equipped for camping out while traveling', 'name': 'camper_(vehicle)'}, {'frequency': 'f', 'synset': 'can.n.01', 'synonyms': ['can', 'tin_can'], 'id': 192, 'def': 'airtight sealed metal container for food or drink or paint etc.', 'name': 'can'}, {'frequency': 'c', 'synset': 'can_opener.n.01', 'synonyms': ['can_opener', 'tin_opener'], 'id': 193, 'def': 'a device for cutting cans open', 'name': 'can_opener'}, {'frequency': 'f', 'synset': 'candle.n.01', 'synonyms': ['candle', 'candlestick'], 'id': 194, 'def': 'stick of wax with a wick in the middle', 'name': 'candle'}, {'frequency': 'f', 'synset': 'candlestick.n.01', 'synonyms': ['candle_holder'], 'id': 195, 'def': 'a holder with sockets for candles', 'name': 'candle_holder'}, {'frequency': 'r', 'synset': 'candy_bar.n.01', 'synonyms': ['candy_bar'], 'id': 196, 'def': 'a candy shaped as a bar', 'name': 'candy_bar'}, {'frequency': 'c', 'synset': 'candy_cane.n.01', 'synonyms': ['candy_cane'], 'id': 197, 'def': 'a hard candy in the shape of a rod (usually with stripes)', 'name': 'candy_cane'}, {'frequency': 'c', 'synset': 'cane.n.01', 'synonyms': ['walking_cane'], 'id': 198, 'def': 'a stick that people can lean on to help them walk', 'name': 'walking_cane'}, {'frequency': 'c', 'synset': 'canister.n.02', 'synonyms': ['canister', 'cannister'], 'id': 199, 'def': 'metal container for storing dry foods such as tea or flour', 'name': 'canister'}, {'frequency': 'c', 'synset': 'canoe.n.01', 'synonyms': ['canoe'], 'id': 200, 'def': 'small and light boat; pointed at both ends; propelled with a paddle', 'name': 'canoe'}, {'frequency': 'c', 'synset': 'cantaloup.n.02', 'synonyms': ['cantaloup', 'cantaloupe'], 'id': 201, 'def': 'the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh', 'name': 'cantaloup'}, {'frequency': 'r', 'synset': 'canteen.n.01', 'synonyms': ['canteen'], 'id': 202, 'def': 'a flask for carrying water; used by soldiers or travelers', 'name': 'canteen'}, {'frequency': 'f', 'synset': 'cap.n.01', 'synonyms': ['cap_(headwear)'], 'id': 203, 'def': 'a tight-fitting headwear', 'name': 'cap_(headwear)'}, {'frequency': 'f', 'synset': 'cap.n.02', 'synonyms': ['bottle_cap', 'cap_(container_lid)'], 'id': 204, 'def': 'a top (as for a bottle)', 'name': 'bottle_cap'}, {'frequency': 'c', 'synset': 'cape.n.02', 'synonyms': ['cape'], 'id': 205, 'def': 'a sleeveless garment like a cloak but shorter', 'name': 'cape'}, {'frequency': 'c', 'synset': 'cappuccino.n.01', 'synonyms': ['cappuccino', 'coffee_cappuccino'], 'id': 206, 'def': 'equal parts of espresso and steamed milk', 'name': 'cappuccino'}, {'frequency': 'f', 'synset': 'car.n.01', 'synonyms': ['car_(automobile)', 'auto_(automobile)', 'automobile'], 'id': 207, 'def': 'a motor vehicle with four wheels', 'name': 'car_(automobile)'}, {'frequency': 'f', 'synset': 'car.n.02', 'synonyms': ['railcar_(part_of_a_train)', 'railway_car_(part_of_a_train)', 'railroad_car_(part_of_a_train)'], 'id': 208, 'def': 'a wheeled vehicle adapted to the rails of railroad (mark each individual railcar separately)', 'name': 'railcar_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'car.n.04', 'synonyms': ['elevator_car'], 'id': 209, 'def': 'where passengers ride up and down', 'name': 'elevator_car'}, {'frequency': 'r', 'synset': 'car_battery.n.01', 'synonyms': ['car_battery', 'automobile_battery'], 'id': 210, 'def': 'a battery in a motor vehicle', 'name': 'car_battery'}, {'frequency': 'c', 'synset': 'card.n.02', 'synonyms': ['identity_card'], 'id': 211, 'def': 'a card certifying the identity of the bearer', 'name': 'identity_card'}, {'frequency': 'c', 'synset': 'card.n.03', 'synonyms': ['card'], 'id': 212, 'def': 'a rectangular piece of paper used to send messages (e.g. greetings or pictures)', 'name': 'card'}, {'frequency': 'c', 'synset': 'cardigan.n.01', 'synonyms': ['cardigan'], 'id': 213, 'def': 'knitted jacket that is fastened up the front with buttons or a zipper', 'name': 'cardigan'}, {'frequency': 'r', 'synset': 'cargo_ship.n.01', 'synonyms': ['cargo_ship', 'cargo_vessel'], 'id': 214, 'def': 'a ship designed to carry cargo', 'name': 'cargo_ship'}, {'frequency': 'r', 'synset': 'carnation.n.01', 'synonyms': ['carnation'], 'id': 215, 'def': 'plant with pink to purple-red spice-scented usually double flowers', 'name': 'carnation'}, {'frequency': 'c', 'synset': 'carriage.n.02', 'synonyms': ['horse_carriage'], 'id': 216, 'def': 'a vehicle with wheels drawn by one or more horses', 'name': 'horse_carriage'}, {'frequency': 'f', 'synset': 'carrot.n.01', 'synonyms': ['carrot'], 'id': 217, 'def': 'deep orange edible root of the cultivated carrot plant', 'name': 'carrot'}, {'frequency': 'f', 'synset': 'carryall.n.01', 'synonyms': ['tote_bag'], 'id': 218, 'def': 'a capacious bag or basket', 'name': 'tote_bag'}, {'frequency': 'c', 'synset': 'cart.n.01', 'synonyms': ['cart'], 'id': 219, 'def': 'a heavy open wagon usually having two wheels and drawn by an animal', 'name': 'cart'}, {'frequency': 'c', 'synset': 'carton.n.02', 'synonyms': ['carton'], 'id': 220, 'def': 'a container made of cardboard for holding food or drink', 'name': 'carton'}, {'frequency': 'c', 'synset': 'cash_register.n.01', 'synonyms': ['cash_register', 'register_(for_cash_transactions)'], 'id': 221, 'def': 'a cashbox with an adding machine to register transactions', 'name': 'cash_register'}, {'frequency': 'r', 'synset': 'casserole.n.01', 'synonyms': ['casserole'], 'id': 222, 'def': 'food cooked and served in a casserole', 'name': 'casserole'}, {'frequency': 'r', 'synset': 'cassette.n.01', 'synonyms': ['cassette'], 'id': 223, 'def': 'a container that holds a magnetic tape used for recording or playing sound or video', 'name': 'cassette'}, {'frequency': 'c', 'synset': 'cast.n.05', 'synonyms': ['cast', 'plaster_cast', 'plaster_bandage'], 'id': 224, 'def': 'bandage consisting of a firm covering that immobilizes broken bones while they heal', 'name': 'cast'}, {'frequency': 'f', 'synset': 'cat.n.01', 'synonyms': ['cat'], 'id': 225, 'def': 'a domestic house cat', 'name': 'cat'}, {'frequency': 'f', 'synset': 'cauliflower.n.02', 'synonyms': ['cauliflower'], 'id': 226, 'def': 'edible compact head of white undeveloped flowers', 'name': 'cauliflower'}, {'frequency': 'c', 'synset': 'cayenne.n.02', 'synonyms': ['cayenne_(spice)', 'cayenne_pepper_(spice)', 'red_pepper_(spice)'], 'id': 227, 'def': 'ground pods and seeds of pungent red peppers of the genus Capsicum', 'name': 'cayenne_(spice)'}, {'frequency': 'c', 'synset': 'cd_player.n.01', 'synonyms': ['CD_player'], 'id': 228, 'def': 'electronic equipment for playing compact discs (CDs)', 'name': 'CD_player'}, {'frequency': 'f', 'synset': 'celery.n.01', 'synonyms': ['celery'], 'id': 229, 'def': 'widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked', 'name': 'celery'}, {'frequency': 'f', 'synset': 'cellular_telephone.n.01', 'synonyms': ['cellular_telephone', 'cellular_phone', 'cellphone', 'mobile_phone', 'smart_phone'], 'id': 230, 'def': 'a hand-held mobile telephone', 'name': 'cellular_telephone'}, {'frequency': 'r', 'synset': 'chain_mail.n.01', 'synonyms': ['chain_mail', 'ring_mail', 'chain_armor', 'chain_armour', 'ring_armor', 'ring_armour'], 'id': 231, 'def': '(Middle Ages) flexible armor made of interlinked metal rings', 'name': 'chain_mail'}, {'frequency': 'f', 'synset': 'chair.n.01', 'synonyms': ['chair'], 'id': 232, 'def': 'a seat for one person, with a support for the back', 'name': 'chair'}, {'frequency': 'r', 'synset': 'chaise_longue.n.01', 'synonyms': ['chaise_longue', 'chaise', 'daybed'], 'id': 233, 'def': 'a long chair; for reclining', 'name': 'chaise_longue'}, {'frequency': 'r', 'synset': 'chalice.n.01', 'synonyms': ['chalice'], 'id': 234, 'def': 'a bowl-shaped drinking vessel; especially the Eucharistic cup', 'name': 'chalice'}, {'frequency': 'f', 'synset': 'chandelier.n.01', 'synonyms': ['chandelier'], 'id': 235, 'def': 'branched lighting fixture; often ornate; hangs from the ceiling', 'name': 'chandelier'}, {'frequency': 'r', 'synset': 'chap.n.04', 'synonyms': ['chap'], 'id': 236, 'def': 'leather leggings without a seat; worn over trousers by cowboys to protect their legs', 'name': 'chap'}, {'frequency': 'r', 'synset': 'checkbook.n.01', 'synonyms': ['checkbook', 'chequebook'], 'id': 237, 'def': 'a book issued to holders of checking accounts', 'name': 'checkbook'}, {'frequency': 'r', 'synset': 'checkerboard.n.01', 'synonyms': ['checkerboard'], 'id': 238, 'def': 'a board having 64 squares of two alternating colors', 'name': 'checkerboard'}, {'frequency': 'c', 'synset': 'cherry.n.03', 'synonyms': ['cherry'], 'id': 239, 'def': 'a red fruit with a single hard stone', 'name': 'cherry'}, {'frequency': 'r', 'synset': 'chessboard.n.01', 'synonyms': ['chessboard'], 'id': 240, 'def': 'a checkerboard used to play chess', 'name': 'chessboard'}, {'frequency': 'c', 'synset': 'chicken.n.02', 'synonyms': ['chicken_(animal)'], 'id': 241, 'def': 'a domestic fowl bred for flesh or eggs', 'name': 'chicken_(animal)'}, {'frequency': 'c', 'synset': 'chickpea.n.01', 'synonyms': ['chickpea', 'garbanzo'], 'id': 242, 'def': 'the seed of the chickpea plant; usually dried', 'name': 'chickpea'}, {'frequency': 'c', 'synset': 'chili.n.02', 'synonyms': ['chili_(vegetable)', 'chili_pepper_(vegetable)', 'chilli_(vegetable)', 'chilly_(vegetable)', 'chile_(vegetable)'], 'id': 243, 'def': 'very hot and finely tapering pepper of special pungency', 'name': 'chili_(vegetable)'}, {'frequency': 'r', 'synset': 'chime.n.01', 'synonyms': ['chime', 'gong'], 'id': 244, 'def': 'an instrument consisting of a set of bells that are struck with a hammer', 'name': 'chime'}, {'frequency': 'r', 'synset': 'chinaware.n.01', 'synonyms': ['chinaware'], 'id': 245, 'def': 'dishware made of high quality porcelain', 'name': 'chinaware'}, {'frequency': 'c', 'synset': 'chip.n.04', 'synonyms': ['crisp_(potato_chip)', 'potato_chip'], 'id': 246, 'def': 'a thin crisp slice of potato fried in deep fat', 'name': 'crisp_(potato_chip)'}, {'frequency': 'r', 'synset': 'chip.n.06', 'synonyms': ['poker_chip'], 'id': 247, 'def': 'a small disk-shaped counter used to represent money when gambling', 'name': 'poker_chip'}, {'frequency': 'c', 'synset': 'chocolate_bar.n.01', 'synonyms': ['chocolate_bar'], 'id': 248, 'def': 'a bar of chocolate candy', 'name': 'chocolate_bar'}, {'frequency': 'c', 'synset': 'chocolate_cake.n.01', 'synonyms': ['chocolate_cake'], 'id': 249, 'def': 'cake containing chocolate', 'name': 'chocolate_cake'}, {'frequency': 'r', 'synset': 'chocolate_milk.n.01', 'synonyms': ['chocolate_milk'], 'id': 250, 'def': 'milk flavored with chocolate syrup', 'name': 'chocolate_milk'}, {'frequency': 'r', 'synset': 'chocolate_mousse.n.01', 'synonyms': ['chocolate_mousse'], 'id': 251, 'def': 'dessert mousse made with chocolate', 'name': 'chocolate_mousse'}, {'frequency': 'f', 'synset': 'choker.n.03', 'synonyms': ['choker', 'collar', 'neckband'], 'id': 252, 'def': 'shirt collar, animal collar, or tight-fitting necklace', 'name': 'choker'}, {'frequency': 'f', 'synset': 'chopping_board.n.01', 'synonyms': ['chopping_board', 'cutting_board', 'chopping_block'], 'id': 253, 'def': 'a wooden board where meats or vegetables can be cut', 'name': 'chopping_board'}, {'frequency': 'f', 'synset': 'chopstick.n.01', 'synonyms': ['chopstick'], 'id': 254, 'def': 'one of a pair of slender sticks used as oriental tableware to eat food with', 'name': 'chopstick'}, {'frequency': 'f', 'synset': 'christmas_tree.n.05', 'synonyms': ['Christmas_tree'], 'id': 255, 'def': 'an ornamented evergreen used as a Christmas decoration', 'name': 'Christmas_tree'}, {'frequency': 'c', 'synset': 'chute.n.02', 'synonyms': ['slide'], 'id': 256, 'def': 'sloping channel through which things can descend', 'name': 'slide'}, {'frequency': 'r', 'synset': 'cider.n.01', 'synonyms': ['cider', 'cyder'], 'id': 257, 'def': 'a beverage made from juice pressed from apples', 'name': 'cider'}, {'frequency': 'r', 'synset': 'cigar_box.n.01', 'synonyms': ['cigar_box'], 'id': 258, 'def': 'a box for holding cigars', 'name': 'cigar_box'}, {'frequency': 'f', 'synset': 'cigarette.n.01', 'synonyms': ['cigarette'], 'id': 259, 'def': 'finely ground tobacco wrapped in paper; for smoking', 'name': 'cigarette'}, {'frequency': 'c', 'synset': 'cigarette_case.n.01', 'synonyms': ['cigarette_case', 'cigarette_pack'], 'id': 260, 'def': 'a small flat case for holding cigarettes', 'name': 'cigarette_case'}, {'frequency': 'f', 'synset': 'cistern.n.02', 'synonyms': ['cistern', 'water_tank'], 'id': 261, 'def': 'a tank that holds the water used to flush a toilet', 'name': 'cistern'}, {'frequency': 'r', 'synset': 'clarinet.n.01', 'synonyms': ['clarinet'], 'id': 262, 'def': 'a single-reed instrument with a straight tube', 'name': 'clarinet'}, {'frequency': 'c', 'synset': 'clasp.n.01', 'synonyms': ['clasp'], 'id': 263, 'def': 'a fastener (as a buckle or hook) that is used to hold two things together', 'name': 'clasp'}, {'frequency': 'c', 'synset': 'cleansing_agent.n.01', 'synonyms': ['cleansing_agent', 'cleanser', 'cleaner'], 'id': 264, 'def': 'a preparation used in cleaning something', 'name': 'cleansing_agent'}, {'frequency': 'r', 'synset': 'cleat.n.02', 'synonyms': ['cleat_(for_securing_rope)'], 'id': 265, 'def': 'a fastener (usually with two projecting horns) around which a rope can be secured', 'name': 'cleat_(for_securing_rope)'}, {'frequency': 'r', 'synset': 'clementine.n.01', 'synonyms': ['clementine'], 'id': 266, 'def': 'a variety of mandarin orange', 'name': 'clementine'}, {'frequency': 'c', 'synset': 'clip.n.03', 'synonyms': ['clip'], 'id': 267, 'def': 'any of various small fasteners used to hold loose articles together', 'name': 'clip'}, {'frequency': 'c', 'synset': 'clipboard.n.01', 'synonyms': ['clipboard'], 'id': 268, 'def': 'a small writing board with a clip at the top for holding papers', 'name': 'clipboard'}, {'frequency': 'r', 'synset': 'clipper.n.03', 'synonyms': ['clippers_(for_plants)'], 'id': 269, 'def': 'shears for cutting grass or shrubbery (often used in the plural)', 'name': 'clippers_(for_plants)'}, {'frequency': 'r', 'synset': 'cloak.n.02', 'synonyms': ['cloak'], 'id': 270, 'def': 'a loose outer garment', 'name': 'cloak'}, {'frequency': 'f', 'synset': 'clock.n.01', 'synonyms': ['clock', 'timepiece', 'timekeeper'], 'id': 271, 'def': 'a timepiece that shows the time of day', 'name': 'clock'}, {'frequency': 'f', 'synset': 'clock_tower.n.01', 'synonyms': ['clock_tower'], 'id': 272, 'def': 'a tower with a large clock visible high up on an outside face', 'name': 'clock_tower'}, {'frequency': 'c', 'synset': 'clothes_hamper.n.01', 'synonyms': ['clothes_hamper', 'laundry_basket', 'clothes_basket'], 'id': 273, 'def': 'a hamper that holds dirty clothes to be washed or wet clothes to be dried', 'name': 'clothes_hamper'}, {'frequency': 'c', 'synset': 'clothespin.n.01', 'synonyms': ['clothespin', 'clothes_peg'], 'id': 274, 'def': 'wood or plastic fastener; for holding clothes on a clothesline', 'name': 'clothespin'}, {'frequency': 'r', 'synset': 'clutch_bag.n.01', 'synonyms': ['clutch_bag'], 'id': 275, 'def': "a woman's strapless purse that is carried in the hand", 'name': 'clutch_bag'}, {'frequency': 'f', 'synset': 'coaster.n.03', 'synonyms': ['coaster'], 'id': 276, 'def': 'a covering (plate or mat) that protects the surface of a table', 'name': 'coaster'}, {'frequency': 'f', 'synset': 'coat.n.01', 'synonyms': ['coat'], 'id': 277, 'def': 'an outer garment that has sleeves and covers the body from shoulder down', 'name': 'coat'}, {'frequency': 'c', 'synset': 'coat_hanger.n.01', 'synonyms': ['coat_hanger', 'clothes_hanger', 'dress_hanger'], 'id': 278, 'def': "a hanger that is shaped like a person's shoulders", 'name': 'coat_hanger'}, {'frequency': 'c', 'synset': 'coatrack.n.01', 'synonyms': ['coatrack', 'hatrack'], 'id': 279, 'def': 'a rack with hooks for temporarily holding coats and hats', 'name': 'coatrack'}, {'frequency': 'c', 'synset': 'cock.n.04', 'synonyms': ['cock', 'rooster'], 'id': 280, 'def': 'adult male chicken', 'name': 'cock'}, {'frequency': 'r', 'synset': 'cockroach.n.01', 'synonyms': ['cockroach'], 'id': 281, 'def': 'any of numerous chiefly nocturnal insects; some are domestic pests', 'name': 'cockroach'}, {'frequency': 'r', 'synset': 'cocoa.n.01', 'synonyms': ['cocoa_(beverage)', 'hot_chocolate_(beverage)', 'drinking_chocolate'], 'id': 282, 'def': 'a beverage made from cocoa powder and milk and sugar; usually drunk hot', 'name': 'cocoa_(beverage)'}, {'frequency': 'c', 'synset': 'coconut.n.02', 'synonyms': ['coconut', 'cocoanut'], 'id': 283, 'def': 'large hard-shelled brown oval nut with a fibrous husk', 'name': 'coconut'}, {'frequency': 'f', 'synset': 'coffee_maker.n.01', 'synonyms': ['coffee_maker', 'coffee_machine'], 'id': 284, 'def': 'a kitchen appliance for brewing coffee automatically', 'name': 'coffee_maker'}, {'frequency': 'f', 'synset': 'coffee_table.n.01', 'synonyms': ['coffee_table', 'cocktail_table'], 'id': 285, 'def': 'low table where magazines can be placed and coffee or cocktails are served', 'name': 'coffee_table'}, {'frequency': 'c', 'synset': 'coffeepot.n.01', 'synonyms': ['coffeepot'], 'id': 286, 'def': 'tall pot in which coffee is brewed', 'name': 'coffeepot'}, {'frequency': 'r', 'synset': 'coil.n.05', 'synonyms': ['coil'], 'id': 287, 'def': 'tubing that is wound in a spiral', 'name': 'coil'}, {'frequency': 'c', 'synset': 'coin.n.01', 'synonyms': ['coin'], 'id': 288, 'def': 'a flat metal piece (usually a disc) used as money', 'name': 'coin'}, {'frequency': 'c', 'synset': 'colander.n.01', 'synonyms': ['colander', 'cullender'], 'id': 289, 'def': 'bowl-shaped strainer; used to wash or drain foods', 'name': 'colander'}, {'frequency': 'c', 'synset': 'coleslaw.n.01', 'synonyms': ['coleslaw', 'slaw'], 'id': 290, 'def': 'basically shredded cabbage', 'name': 'coleslaw'}, {'frequency': 'r', 'synset': 'coloring_material.n.01', 'synonyms': ['coloring_material', 'colouring_material'], 'id': 291, 'def': 'any material used for its color', 'name': 'coloring_material'}, {'frequency': 'r', 'synset': 'combination_lock.n.01', 'synonyms': ['combination_lock'], 'id': 292, 'def': 'lock that can be opened only by turning dials in a special sequence', 'name': 'combination_lock'}, {'frequency': 'c', 'synset': 'comforter.n.04', 'synonyms': ['pacifier', 'teething_ring'], 'id': 293, 'def': 'device used for an infant to suck or bite on', 'name': 'pacifier'}, {'frequency': 'r', 'synset': 'comic_book.n.01', 'synonyms': ['comic_book'], 'id': 294, 'def': 'a magazine devoted to comic strips', 'name': 'comic_book'}, {'frequency': 'r', 'synset': 'compass.n.01', 'synonyms': ['compass'], 'id': 295, 'def': 'navigational instrument for finding directions', 'name': 'compass'}, {'frequency': 'f', 'synset': 'computer_keyboard.n.01', 'synonyms': ['computer_keyboard', 'keyboard_(computer)'], 'id': 296, 'def': 'a keyboard that is a data input device for computers', 'name': 'computer_keyboard'}, {'frequency': 'f', 'synset': 'condiment.n.01', 'synonyms': ['condiment'], 'id': 297, 'def': 'a preparation (a sauce or relish or spice) to enhance flavor or enjoyment', 'name': 'condiment'}, {'frequency': 'f', 'synset': 'cone.n.01', 'synonyms': ['cone', 'traffic_cone'], 'id': 298, 'def': 'a cone-shaped object used to direct traffic', 'name': 'cone'}, {'frequency': 'f', 'synset': 'control.n.09', 'synonyms': ['control', 'controller'], 'id': 299, 'def': 'a mechanism that controls the operation of a machine', 'name': 'control'}, {'frequency': 'r', 'synset': 'convertible.n.01', 'synonyms': ['convertible_(automobile)'], 'id': 300, 'def': 'a car that has top that can be folded or removed', 'name': 'convertible_(automobile)'}, {'frequency': 'r', 'synset': 'convertible.n.03', 'synonyms': ['sofa_bed'], 'id': 301, 'def': 'a sofa that can be converted into a bed', 'name': 'sofa_bed'}, {'frequency': 'r', 'synset': 'cooker.n.01', 'synonyms': ['cooker'], 'id': 302, 'def': 'a utensil for cooking', 'name': 'cooker'}, {'frequency': 'f', 'synset': 'cookie.n.01', 'synonyms': ['cookie', 'cooky', 'biscuit_(cookie)'], 'id': 303, 'def': "any of various small flat sweet cakes (`biscuit' is the British term)", 'name': 'cookie'}, {'frequency': 'r', 'synset': 'cooking_utensil.n.01', 'synonyms': ['cooking_utensil'], 'id': 304, 'def': 'a kitchen utensil made of material that does not melt easily; used for cooking', 'name': 'cooking_utensil'}, {'frequency': 'f', 'synset': 'cooler.n.01', 'synonyms': ['cooler_(for_food)', 'ice_chest'], 'id': 305, 'def': 'an insulated box for storing food often with ice', 'name': 'cooler_(for_food)'}, {'frequency': 'f', 'synset': 'cork.n.04', 'synonyms': ['cork_(bottle_plug)', 'bottle_cork'], 'id': 306, 'def': 'the plug in the mouth of a bottle (especially a wine bottle)', 'name': 'cork_(bottle_plug)'}, {'frequency': 'r', 'synset': 'corkboard.n.01', 'synonyms': ['corkboard'], 'id': 307, 'def': 'a sheet consisting of cork granules', 'name': 'corkboard'}, {'frequency': 'c', 'synset': 'corkscrew.n.01', 'synonyms': ['corkscrew', 'bottle_screw'], 'id': 308, 'def': 'a bottle opener that pulls corks', 'name': 'corkscrew'}, {'frequency': 'f', 'synset': 'corn.n.03', 'synonyms': ['edible_corn', 'corn', 'maize'], 'id': 309, 'def': 'ears or kernels of corn that can be prepared and served for human food (only mark individual ears or kernels)', 'name': 'edible_corn'}, {'frequency': 'r', 'synset': 'cornbread.n.01', 'synonyms': ['cornbread'], 'id': 310, 'def': 'bread made primarily of cornmeal', 'name': 'cornbread'}, {'frequency': 'c', 'synset': 'cornet.n.01', 'synonyms': ['cornet', 'horn', 'trumpet'], 'id': 311, 'def': 'a brass musical instrument with a narrow tube and a flared bell and many valves', 'name': 'cornet'}, {'frequency': 'c', 'synset': 'cornice.n.01', 'synonyms': ['cornice', 'valance', 'valance_board', 'pelmet'], 'id': 312, 'def': 'a decorative framework to conceal curtain fixtures at the top of a window casing', 'name': 'cornice'}, {'frequency': 'r', 'synset': 'cornmeal.n.01', 'synonyms': ['cornmeal'], 'id': 313, 'def': 'coarsely ground corn', 'name': 'cornmeal'}, {'frequency': 'c', 'synset': 'corset.n.01', 'synonyms': ['corset', 'girdle'], 'id': 314, 'def': "a woman's close-fitting foundation garment", 'name': 'corset'}, {'frequency': 'c', 'synset': 'costume.n.04', 'synonyms': ['costume'], 'id': 315, 'def': 'the attire characteristic of a country or a time or a social class', 'name': 'costume'}, {'frequency': 'r', 'synset': 'cougar.n.01', 'synonyms': ['cougar', 'puma', 'catamount', 'mountain_lion', 'panther'], 'id': 316, 'def': 'large American feline resembling a lion', 'name': 'cougar'}, {'frequency': 'r', 'synset': 'coverall.n.01', 'synonyms': ['coverall'], 'id': 317, 'def': 'a loose-fitting protective garment that is worn over other clothing', 'name': 'coverall'}, {'frequency': 'c', 'synset': 'cowbell.n.01', 'synonyms': ['cowbell'], 'id': 318, 'def': 'a bell hung around the neck of cow so that the cow can be easily located', 'name': 'cowbell'}, {'frequency': 'f', 'synset': 'cowboy_hat.n.01', 'synonyms': ['cowboy_hat', 'ten-gallon_hat'], 'id': 319, 'def': 'a hat with a wide brim and a soft crown; worn by American ranch hands', 'name': 'cowboy_hat'}, {'frequency': 'c', 'synset': 'crab.n.01', 'synonyms': ['crab_(animal)'], 'id': 320, 'def': 'decapod having eyes on short stalks and a broad flattened shell and pincers', 'name': 'crab_(animal)'}, {'frequency': 'r', 'synset': 'crab.n.05', 'synonyms': ['crabmeat'], 'id': 321, 'def': 'the edible flesh of any of various crabs', 'name': 'crabmeat'}, {'frequency': 'c', 'synset': 'cracker.n.01', 'synonyms': ['cracker'], 'id': 322, 'def': 'a thin crisp wafer', 'name': 'cracker'}, {'frequency': 'r', 'synset': 'crape.n.01', 'synonyms': ['crape', 'crepe', 'French_pancake'], 'id': 323, 'def': 'small very thin pancake', 'name': 'crape'}, {'frequency': 'f', 'synset': 'crate.n.01', 'synonyms': ['crate'], 'id': 324, 'def': 'a rugged box (usually made of wood); used for shipping', 'name': 'crate'}, {'frequency': 'c', 'synset': 'crayon.n.01', 'synonyms': ['crayon', 'wax_crayon'], 'id': 325, 'def': 'writing or drawing implement made of a colored stick of composition wax', 'name': 'crayon'}, {'frequency': 'r', 'synset': 'cream_pitcher.n.01', 'synonyms': ['cream_pitcher'], 'id': 326, 'def': 'a small pitcher for serving cream', 'name': 'cream_pitcher'}, {'frequency': 'c', 'synset': 'crescent_roll.n.01', 'synonyms': ['crescent_roll', 'croissant'], 'id': 327, 'def': 'very rich flaky crescent-shaped roll', 'name': 'crescent_roll'}, {'frequency': 'c', 'synset': 'crib.n.01', 'synonyms': ['crib', 'cot'], 'id': 328, 'def': 'baby bed with high sides made of slats', 'name': 'crib'}, {'frequency': 'c', 'synset': 'crock.n.03', 'synonyms': ['crock_pot', 'earthenware_jar'], 'id': 329, 'def': 'an earthen jar (made of baked clay) or a modern electric crockpot', 'name': 'crock_pot'}, {'frequency': 'f', 'synset': 'crossbar.n.01', 'synonyms': ['crossbar'], 'id': 330, 'def': 'a horizontal bar that goes across something', 'name': 'crossbar'}, {'frequency': 'r', 'synset': 'crouton.n.01', 'synonyms': ['crouton'], 'id': 331, 'def': 'a small piece of toasted or fried bread; served in soup or salads', 'name': 'crouton'}, {'frequency': 'c', 'synset': 'crow.n.01', 'synonyms': ['crow'], 'id': 332, 'def': 'black birds having a raucous call', 'name': 'crow'}, {'frequency': 'r', 'synset': 'crowbar.n.01', 'synonyms': ['crowbar', 'wrecking_bar', 'pry_bar'], 'id': 333, 'def': 'a heavy iron lever with one end forged into a wedge', 'name': 'crowbar'}, {'frequency': 'c', 'synset': 'crown.n.04', 'synonyms': ['crown'], 'id': 334, 'def': 'an ornamental jeweled headdress signifying sovereignty', 'name': 'crown'}, {'frequency': 'c', 'synset': 'crucifix.n.01', 'synonyms': ['crucifix'], 'id': 335, 'def': 'representation of the cross on which Jesus died', 'name': 'crucifix'}, {'frequency': 'c', 'synset': 'cruise_ship.n.01', 'synonyms': ['cruise_ship', 'cruise_liner'], 'id': 336, 'def': 'a passenger ship used commercially for pleasure cruises', 'name': 'cruise_ship'}, {'frequency': 'c', 'synset': 'cruiser.n.01', 'synonyms': ['police_cruiser', 'patrol_car', 'police_car', 'squad_car'], 'id': 337, 'def': 'a car in which policemen cruise the streets', 'name': 'police_cruiser'}, {'frequency': 'f', 'synset': 'crumb.n.03', 'synonyms': ['crumb'], 'id': 338, 'def': 'small piece of e.g. bread or cake', 'name': 'crumb'}, {'frequency': 'c', 'synset': 'crutch.n.01', 'synonyms': ['crutch'], 'id': 339, 'def': 'a wooden or metal staff that fits under the armpit and reaches to the ground', 'name': 'crutch'}, {'frequency': 'c', 'synset': 'cub.n.03', 'synonyms': ['cub_(animal)'], 'id': 340, 'def': 'the young of certain carnivorous mammals such as the bear or wolf or lion', 'name': 'cub_(animal)'}, {'frequency': 'c', 'synset': 'cube.n.05', 'synonyms': ['cube', 'square_block'], 'id': 341, 'def': 'a block in the (approximate) shape of a cube', 'name': 'cube'}, {'frequency': 'f', 'synset': 'cucumber.n.02', 'synonyms': ['cucumber', 'cuke'], 'id': 342, 'def': 'cylindrical green fruit with thin green rind and white flesh eaten as a vegetable', 'name': 'cucumber'}, {'frequency': 'c', 'synset': 'cufflink.n.01', 'synonyms': ['cufflink'], 'id': 343, 'def': 'jewelry consisting of linked buttons used to fasten the cuffs of a shirt', 'name': 'cufflink'}, {'frequency': 'f', 'synset': 'cup.n.01', 'synonyms': ['cup'], 'id': 344, 'def': 'a small open container usually used for drinking; usually has a handle', 'name': 'cup'}, {'frequency': 'c', 'synset': 'cup.n.08', 'synonyms': ['trophy_cup'], 'id': 345, 'def': 'a metal award or cup-shaped vessel with handles that is awarded as a trophy to a competition winner', 'name': 'trophy_cup'}, {'frequency': 'f', 'synset': 'cupboard.n.01', 'synonyms': ['cupboard', 'closet'], 'id': 346, 'def': 'a small room (or recess) or cabinet used for storage space', 'name': 'cupboard'}, {'frequency': 'f', 'synset': 'cupcake.n.01', 'synonyms': ['cupcake'], 'id': 347, 'def': 'small cake baked in a muffin tin', 'name': 'cupcake'}, {'frequency': 'r', 'synset': 'curler.n.01', 'synonyms': ['hair_curler', 'hair_roller', 'hair_crimper'], 'id': 348, 'def': 'a cylindrical tube around which the hair is wound to curl it', 'name': 'hair_curler'}, {'frequency': 'r', 'synset': 'curling_iron.n.01', 'synonyms': ['curling_iron'], 'id': 349, 'def': 'a cylindrical home appliance that heats hair that has been curled around it', 'name': 'curling_iron'}, {'frequency': 'f', 'synset': 'curtain.n.01', 'synonyms': ['curtain', 'drapery'], 'id': 350, 'def': 'hanging cloth used as a blind (especially for a window)', 'name': 'curtain'}, {'frequency': 'f', 'synset': 'cushion.n.03', 'synonyms': ['cushion'], 'id': 351, 'def': 'a soft bag filled with air or padding such as feathers or foam rubber', 'name': 'cushion'}, {'frequency': 'r', 'synset': 'cylinder.n.04', 'synonyms': ['cylinder'], 'id': 352, 'def': 'a cylindrical container', 'name': 'cylinder'}, {'frequency': 'r', 'synset': 'cymbal.n.01', 'synonyms': ['cymbal'], 'id': 353, 'def': 'a percussion instrument consisting of a concave brass disk', 'name': 'cymbal'}, {'frequency': 'r', 'synset': 'dagger.n.01', 'synonyms': ['dagger'], 'id': 354, 'def': 'a short knife with a pointed blade used for piercing or stabbing', 'name': 'dagger'}, {'frequency': 'r', 'synset': 'dalmatian.n.02', 'synonyms': ['dalmatian'], 'id': 355, 'def': 'a large breed having a smooth white coat with black or brown spots', 'name': 'dalmatian'}, {'frequency': 'c', 'synset': 'dartboard.n.01', 'synonyms': ['dartboard'], 'id': 356, 'def': 'a circular board of wood or cork used as the target in the game of darts', 'name': 'dartboard'}, {'frequency': 'r', 'synset': 'date.n.08', 'synonyms': ['date_(fruit)'], 'id': 357, 'def': 'sweet edible fruit of the date palm with a single long woody seed', 'name': 'date_(fruit)'}, {'frequency': 'f', 'synset': 'deck_chair.n.01', 'synonyms': ['deck_chair', 'beach_chair'], 'id': 358, 'def': 'a folding chair for use outdoors; a wooden frame supports a length of canvas', 'name': 'deck_chair'}, {'frequency': 'c', 'synset': 'deer.n.01', 'synonyms': ['deer', 'cervid'], 'id': 359, 'def': "distinguished from Bovidae by the male's having solid deciduous antlers", 'name': 'deer'}, {'frequency': 'c', 'synset': 'dental_floss.n.01', 'synonyms': ['dental_floss', 'floss'], 'id': 360, 'def': 'a soft thread for cleaning the spaces between the teeth', 'name': 'dental_floss'}, {'frequency': 'f', 'synset': 'desk.n.01', 'synonyms': ['desk'], 'id': 361, 'def': 'a piece of furniture with a writing surface and usually drawers or other compartments', 'name': 'desk'}, {'frequency': 'r', 'synset': 'detergent.n.01', 'synonyms': ['detergent'], 'id': 362, 'def': 'a surface-active chemical widely used in industry and laundering', 'name': 'detergent'}, {'frequency': 'c', 'synset': 'diaper.n.01', 'synonyms': ['diaper'], 'id': 363, 'def': 'garment consisting of a folded cloth drawn up between the legs and fastened at the waist', 'name': 'diaper'}, {'frequency': 'r', 'synset': 'diary.n.01', 'synonyms': ['diary', 'journal'], 'id': 364, 'def': 'yearly planner book', 'name': 'diary'}, {'frequency': 'r', 'synset': 'die.n.01', 'synonyms': ['die', 'dice'], 'id': 365, 'def': 'a small cube with 1 to 6 spots on the six faces; used in gambling', 'name': 'die'}, {'frequency': 'r', 'synset': 'dinghy.n.01', 'synonyms': ['dinghy', 'dory', 'rowboat'], 'id': 366, 'def': 'a small boat of shallow draft with seats and oars with which it is propelled', 'name': 'dinghy'}, {'frequency': 'f', 'synset': 'dining_table.n.01', 'synonyms': ['dining_table'], 'id': 367, 'def': 'a table at which meals are served', 'name': 'dining_table'}, {'frequency': 'r', 'synset': 'dinner_jacket.n.01', 'synonyms': ['tux', 'tuxedo'], 'id': 368, 'def': 'semiformal evening dress for men', 'name': 'tux'}, {'frequency': 'f', 'synset': 'dish.n.01', 'synonyms': ['dish'], 'id': 369, 'def': 'a piece of dishware normally used as a container for holding or serving food', 'name': 'dish'}, {'frequency': 'c', 'synset': 'dish.n.05', 'synonyms': ['dish_antenna'], 'id': 370, 'def': 'directional antenna consisting of a parabolic reflector', 'name': 'dish_antenna'}, {'frequency': 'c', 'synset': 'dishrag.n.01', 'synonyms': ['dishrag', 'dishcloth'], 'id': 371, 'def': 'a cloth for washing dishes or cleaning in general', 'name': 'dishrag'}, {'frequency': 'f', 'synset': 'dishtowel.n.01', 'synonyms': ['dishtowel', 'tea_towel'], 'id': 372, 'def': 'a towel for drying dishes', 'name': 'dishtowel'}, {'frequency': 'f', 'synset': 'dishwasher.n.01', 'synonyms': ['dishwasher', 'dishwashing_machine'], 'id': 373, 'def': 'a machine for washing dishes', 'name': 'dishwasher'}, {'frequency': 'r', 'synset': 'dishwasher_detergent.n.01', 'synonyms': ['dishwasher_detergent', 'dishwashing_detergent', 'dishwashing_liquid', 'dishsoap'], 'id': 374, 'def': 'dishsoap or dish detergent designed for use in dishwashers', 'name': 'dishwasher_detergent'}, {'frequency': 'f', 'synset': 'dispenser.n.01', 'synonyms': ['dispenser'], 'id': 375, 'def': 'a container so designed that the contents can be used in prescribed amounts', 'name': 'dispenser'}, {'frequency': 'r', 'synset': 'diving_board.n.01', 'synonyms': ['diving_board'], 'id': 376, 'def': 'a springboard from which swimmers can dive', 'name': 'diving_board'}, {'frequency': 'f', 'synset': 'dixie_cup.n.01', 'synonyms': ['Dixie_cup', 'paper_cup'], 'id': 377, 'def': 'a disposable cup made of paper; for holding drinks', 'name': 'Dixie_cup'}, {'frequency': 'f', 'synset': 'dog.n.01', 'synonyms': ['dog'], 'id': 378, 'def': 'a common domesticated dog', 'name': 'dog'}, {'frequency': 'f', 'synset': 'dog_collar.n.01', 'synonyms': ['dog_collar'], 'id': 379, 'def': 'a collar for a dog', 'name': 'dog_collar'}, {'frequency': 'f', 'synset': 'doll.n.01', 'synonyms': ['doll'], 'id': 380, 'def': 'a toy replica of a HUMAN (NOT AN ANIMAL)', 'name': 'doll'}, {'frequency': 'r', 'synset': 'dollar.n.02', 'synonyms': ['dollar', 'dollar_bill', 'one_dollar_bill'], 'id': 381, 'def': 'a piece of paper money worth one dollar', 'name': 'dollar'}, {'frequency': 'r', 'synset': 'dollhouse.n.01', 'synonyms': ['dollhouse', "doll's_house"], 'id': 382, 'def': "a house so small that it is likened to a child's plaything", 'name': 'dollhouse'}, {'frequency': 'c', 'synset': 'dolphin.n.02', 'synonyms': ['dolphin'], 'id': 383, 'def': 'any of various small toothed whales with a beaklike snout; larger than porpoises', 'name': 'dolphin'}, {'frequency': 'c', 'synset': 'domestic_ass.n.01', 'synonyms': ['domestic_ass', 'donkey'], 'id': 384, 'def': 'domestic beast of burden descended from the African wild ass; patient but stubborn', 'name': 'domestic_ass'}, {'frequency': 'f', 'synset': 'doorknob.n.01', 'synonyms': ['doorknob', 'doorhandle'], 'id': 385, 'def': "a knob used to open a door (often called `doorhandle' in Great Britain)", 'name': 'doorknob'}, {'frequency': 'c', 'synset': 'doormat.n.02', 'synonyms': ['doormat', 'welcome_mat'], 'id': 386, 'def': 'a mat placed outside an exterior door for wiping the shoes before entering', 'name': 'doormat'}, {'frequency': 'f', 'synset': 'doughnut.n.02', 'synonyms': ['doughnut', 'donut'], 'id': 387, 'def': 'a small ring-shaped friedcake', 'name': 'doughnut'}, {'frequency': 'r', 'synset': 'dove.n.01', 'synonyms': ['dove'], 'id': 388, 'def': 'any of numerous small pigeons', 'name': 'dove'}, {'frequency': 'r', 'synset': 'dragonfly.n.01', 'synonyms': ['dragonfly'], 'id': 389, 'def': 'slender-bodied non-stinging insect having iridescent wings that are outspread at rest', 'name': 'dragonfly'}, {'frequency': 'f', 'synset': 'drawer.n.01', 'synonyms': ['drawer'], 'id': 390, 'def': 'a boxlike container in a piece of furniture; made so as to slide in and out', 'name': 'drawer'}, {'frequency': 'c', 'synset': 'drawers.n.01', 'synonyms': ['underdrawers', 'boxers', 'boxershorts'], 'id': 391, 'def': 'underpants worn by men', 'name': 'underdrawers'}, {'frequency': 'f', 'synset': 'dress.n.01', 'synonyms': ['dress', 'frock'], 'id': 392, 'def': 'a one-piece garment for a woman; has skirt and bodice', 'name': 'dress'}, {'frequency': 'c', 'synset': 'dress_hat.n.01', 'synonyms': ['dress_hat', 'high_hat', 'opera_hat', 'silk_hat', 'top_hat'], 'id': 393, 'def': "a man's hat with a tall crown; usually covered with silk or with beaver fur", 'name': 'dress_hat'}, {'frequency': 'f', 'synset': 'dress_suit.n.01', 'synonyms': ['dress_suit'], 'id': 394, 'def': 'formalwear consisting of full evening dress for men', 'name': 'dress_suit'}, {'frequency': 'f', 'synset': 'dresser.n.05', 'synonyms': ['dresser'], 'id': 395, 'def': 'a cabinet with shelves', 'name': 'dresser'}, {'frequency': 'c', 'synset': 'drill.n.01', 'synonyms': ['drill'], 'id': 396, 'def': 'a tool with a sharp rotating point for making holes in hard materials', 'name': 'drill'}, {'frequency': 'r', 'synset': 'drone.n.04', 'synonyms': ['drone'], 'id': 397, 'def': 'an aircraft without a pilot that is operated by remote control', 'name': 'drone'}, {'frequency': 'r', 'synset': 'dropper.n.01', 'synonyms': ['dropper', 'eye_dropper'], 'id': 398, 'def': 'pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time', 'name': 'dropper'}, {'frequency': 'c', 'synset': 'drum.n.01', 'synonyms': ['drum_(musical_instrument)'], 'id': 399, 'def': 'a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end', 'name': 'drum_(musical_instrument)'}, {'frequency': 'r', 'synset': 'drumstick.n.02', 'synonyms': ['drumstick'], 'id': 400, 'def': 'a stick used for playing a drum', 'name': 'drumstick'}, {'frequency': 'f', 'synset': 'duck.n.01', 'synonyms': ['duck'], 'id': 401, 'def': 'small web-footed broad-billed swimming bird', 'name': 'duck'}, {'frequency': 'c', 'synset': 'duckling.n.02', 'synonyms': ['duckling'], 'id': 402, 'def': 'young duck', 'name': 'duckling'}, {'frequency': 'c', 'synset': 'duct_tape.n.01', 'synonyms': ['duct_tape'], 'id': 403, 'def': 'a wide silvery adhesive tape', 'name': 'duct_tape'}, {'frequency': 'f', 'synset': 'duffel_bag.n.01', 'synonyms': ['duffel_bag', 'duffle_bag', 'duffel', 'duffle'], 'id': 404, 'def': 'a large cylindrical bag of heavy cloth (does not include suitcases)', 'name': 'duffel_bag'}, {'frequency': 'r', 'synset': 'dumbbell.n.01', 'synonyms': ['dumbbell'], 'id': 405, 'def': 'an exercising weight with two ball-like ends connected by a short handle', 'name': 'dumbbell'}, {'frequency': 'c', 'synset': 'dumpster.n.01', 'synonyms': ['dumpster'], 'id': 406, 'def': 'a container designed to receive and transport and dump waste', 'name': 'dumpster'}, {'frequency': 'r', 'synset': 'dustpan.n.02', 'synonyms': ['dustpan'], 'id': 407, 'def': 'a short-handled receptacle into which dust can be swept', 'name': 'dustpan'}, {'frequency': 'c', 'synset': 'eagle.n.01', 'synonyms': ['eagle'], 'id': 408, 'def': 'large birds of prey noted for their broad wings and strong soaring flight', 'name': 'eagle'}, {'frequency': 'f', 'synset': 'earphone.n.01', 'synonyms': ['earphone', 'earpiece', 'headphone'], 'id': 409, 'def': 'device for listening to audio that is held over or inserted into the ear', 'name': 'earphone'}, {'frequency': 'r', 'synset': 'earplug.n.01', 'synonyms': ['earplug'], 'id': 410, 'def': 'a soft plug that is inserted into the ear canal to block sound', 'name': 'earplug'}, {'frequency': 'f', 'synset': 'earring.n.01', 'synonyms': ['earring'], 'id': 411, 'def': 'jewelry to ornament the ear', 'name': 'earring'}, {'frequency': 'c', 'synset': 'easel.n.01', 'synonyms': ['easel'], 'id': 412, 'def': "an upright tripod for displaying something (usually an artist's canvas)", 'name': 'easel'}, {'frequency': 'r', 'synset': 'eclair.n.01', 'synonyms': ['eclair'], 'id': 413, 'def': 'oblong cream puff', 'name': 'eclair'}, {'frequency': 'r', 'synset': 'eel.n.01', 'synonyms': ['eel'], 'id': 414, 'def': 'an elongate fish with fatty flesh', 'name': 'eel'}, {'frequency': 'f', 'synset': 'egg.n.02', 'synonyms': ['egg', 'eggs'], 'id': 415, 'def': 'oval reproductive body of a fowl (especially a hen) used as food', 'name': 'egg'}, {'frequency': 'r', 'synset': 'egg_roll.n.01', 'synonyms': ['egg_roll', 'spring_roll'], 'id': 416, 'def': 'minced vegetables and meat wrapped in a pancake and fried', 'name': 'egg_roll'}, {'frequency': 'c', 'synset': 'egg_yolk.n.01', 'synonyms': ['egg_yolk', 'yolk_(egg)'], 'id': 417, 'def': 'the yellow spherical part of an egg', 'name': 'egg_yolk'}, {'frequency': 'c', 'synset': 'eggbeater.n.02', 'synonyms': ['eggbeater', 'eggwhisk'], 'id': 418, 'def': 'a mixer for beating eggs or whipping cream', 'name': 'eggbeater'}, {'frequency': 'c', 'synset': 'eggplant.n.01', 'synonyms': ['eggplant', 'aubergine'], 'id': 419, 'def': 'egg-shaped vegetable having a shiny skin typically dark purple', 'name': 'eggplant'}, {'frequency': 'r', 'synset': 'electric_chair.n.01', 'synonyms': ['electric_chair'], 'id': 420, 'def': 'a chair-shaped instrument of execution by electrocution', 'name': 'electric_chair'}, {'frequency': 'f', 'synset': 'electric_refrigerator.n.01', 'synonyms': ['refrigerator'], 'id': 421, 'def': 'a refrigerator in which the coolant is pumped around by an electric motor', 'name': 'refrigerator'}, {'frequency': 'f', 'synset': 'elephant.n.01', 'synonyms': ['elephant'], 'id': 422, 'def': 'a common elephant', 'name': 'elephant'}, {'frequency': 'c', 'synset': 'elk.n.01', 'synonyms': ['elk', 'moose'], 'id': 423, 'def': 'large northern deer with enormous flattened antlers in the male', 'name': 'elk'}, {'frequency': 'c', 'synset': 'envelope.n.01', 'synonyms': ['envelope'], 'id': 424, 'def': 'a flat (usually rectangular) container for a letter, thin package, etc.', 'name': 'envelope'}, {'frequency': 'c', 'synset': 'eraser.n.01', 'synonyms': ['eraser'], 'id': 425, 'def': 'an implement used to erase something', 'name': 'eraser'}, {'frequency': 'r', 'synset': 'escargot.n.01', 'synonyms': ['escargot'], 'id': 426, 'def': 'edible snail usually served in the shell with a sauce of melted butter and garlic', 'name': 'escargot'}, {'frequency': 'r', 'synset': 'eyepatch.n.01', 'synonyms': ['eyepatch'], 'id': 427, 'def': 'a protective cloth covering for an injured eye', 'name': 'eyepatch'}, {'frequency': 'r', 'synset': 'falcon.n.01', 'synonyms': ['falcon'], 'id': 428, 'def': 'birds of prey having long pointed powerful wings adapted for swift flight', 'name': 'falcon'}, {'frequency': 'f', 'synset': 'fan.n.01', 'synonyms': ['fan'], 'id': 429, 'def': 'a device for creating a current of air by movement of a surface or surfaces', 'name': 'fan'}, {'frequency': 'f', 'synset': 'faucet.n.01', 'synonyms': ['faucet', 'spigot', 'tap'], 'id': 430, 'def': 'a regulator for controlling the flow of a liquid from a reservoir', 'name': 'faucet'}, {'frequency': 'r', 'synset': 'fedora.n.01', 'synonyms': ['fedora'], 'id': 431, 'def': 'a hat made of felt with a creased crown', 'name': 'fedora'}, {'frequency': 'r', 'synset': 'ferret.n.02', 'synonyms': ['ferret'], 'id': 432, 'def': 'domesticated albino variety of the European polecat bred for hunting rats and rabbits', 'name': 'ferret'}, {'frequency': 'c', 'synset': 'ferris_wheel.n.01', 'synonyms': ['Ferris_wheel'], 'id': 433, 'def': 'a large wheel with suspended seats that remain upright as the wheel rotates', 'name': 'Ferris_wheel'}, {'frequency': 'c', 'synset': 'ferry.n.01', 'synonyms': ['ferry', 'ferryboat'], 'id': 434, 'def': 'a boat that transports people or vehicles across a body of water and operates on a regular schedule', 'name': 'ferry'}, {'frequency': 'r', 'synset': 'fig.n.04', 'synonyms': ['fig_(fruit)'], 'id': 435, 'def': 'fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried', 'name': 'fig_(fruit)'}, {'frequency': 'c', 'synset': 'fighter.n.02', 'synonyms': ['fighter_jet', 'fighter_aircraft', 'attack_aircraft'], 'id': 436, 'def': 'a high-speed military or naval airplane designed to destroy enemy targets', 'name': 'fighter_jet'}, {'frequency': 'f', 'synset': 'figurine.n.01', 'synonyms': ['figurine'], 'id': 437, 'def': 'a small carved or molded figure', 'name': 'figurine'}, {'frequency': 'c', 'synset': 'file.n.03', 'synonyms': ['file_cabinet', 'filing_cabinet'], 'id': 438, 'def': 'office furniture consisting of a container for keeping papers in order', 'name': 'file_cabinet'}, {'frequency': 'r', 'synset': 'file.n.04', 'synonyms': ['file_(tool)'], 'id': 439, 'def': 'a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal', 'name': 'file_(tool)'}, {'frequency': 'f', 'synset': 'fire_alarm.n.02', 'synonyms': ['fire_alarm', 'smoke_alarm'], 'id': 440, 'def': 'an alarm that is tripped off by fire or smoke', 'name': 'fire_alarm'}, {'frequency': 'f', 'synset': 'fire_engine.n.01', 'synonyms': ['fire_engine', 'fire_truck'], 'id': 441, 'def': 'large trucks that carry firefighters and equipment to the site of a fire', 'name': 'fire_engine'}, {'frequency': 'f', 'synset': 'fire_extinguisher.n.01', 'synonyms': ['fire_extinguisher', 'extinguisher'], 'id': 442, 'def': 'a manually operated device for extinguishing small fires', 'name': 'fire_extinguisher'}, {'frequency': 'c', 'synset': 'fire_hose.n.01', 'synonyms': ['fire_hose'], 'id': 443, 'def': 'a large hose that carries water from a fire hydrant to the site of the fire', 'name': 'fire_hose'}, {'frequency': 'f', 'synset': 'fireplace.n.01', 'synonyms': ['fireplace'], 'id': 444, 'def': 'an open recess in a wall at the base of a chimney where a fire can be built', 'name': 'fireplace'}, {'frequency': 'f', 'synset': 'fireplug.n.01', 'synonyms': ['fireplug', 'fire_hydrant', 'hydrant'], 'id': 445, 'def': 'an upright hydrant for drawing water to use in fighting a fire', 'name': 'fireplug'}, {'frequency': 'r', 'synset': 'first-aid_kit.n.01', 'synonyms': ['first-aid_kit'], 'id': 446, 'def': 'kit consisting of a set of bandages and medicines for giving first aid', 'name': 'first-aid_kit'}, {'frequency': 'f', 'synset': 'fish.n.01', 'synonyms': ['fish'], 'id': 447, 'def': 'any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills', 'name': 'fish'}, {'frequency': 'c', 'synset': 'fish.n.02', 'synonyms': ['fish_(food)'], 'id': 448, 'def': 'the flesh of fish used as food', 'name': 'fish_(food)'}, {'frequency': 'r', 'synset': 'fishbowl.n.02', 'synonyms': ['fishbowl', 'goldfish_bowl'], 'id': 449, 'def': 'a transparent bowl in which small fish are kept', 'name': 'fishbowl'}, {'frequency': 'c', 'synset': 'fishing_rod.n.01', 'synonyms': ['fishing_rod', 'fishing_pole'], 'id': 450, 'def': 'a rod that is used in fishing to extend the fishing line', 'name': 'fishing_rod'}, {'frequency': 'f', 'synset': 'flag.n.01', 'synonyms': ['flag'], 'id': 451, 'def': 'emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)', 'name': 'flag'}, {'frequency': 'f', 'synset': 'flagpole.n.02', 'synonyms': ['flagpole', 'flagstaff'], 'id': 452, 'def': 'a tall staff or pole on which a flag is raised', 'name': 'flagpole'}, {'frequency': 'c', 'synset': 'flamingo.n.01', 'synonyms': ['flamingo'], 'id': 453, 'def': 'large pink web-footed bird with down-bent bill', 'name': 'flamingo'}, {'frequency': 'c', 'synset': 'flannel.n.01', 'synonyms': ['flannel'], 'id': 454, 'def': 'a soft light woolen fabric; used for clothing', 'name': 'flannel'}, {'frequency': 'c', 'synset': 'flap.n.01', 'synonyms': ['flap'], 'id': 455, 'def': 'any broad thin covering attached at one edge, such as a mud flap next to a wheel or a flap on an airplane wing', 'name': 'flap'}, {'frequency': 'r', 'synset': 'flash.n.10', 'synonyms': ['flash', 'flashbulb'], 'id': 456, 'def': 'a lamp for providing momentary light to take a photograph', 'name': 'flash'}, {'frequency': 'c', 'synset': 'flashlight.n.01', 'synonyms': ['flashlight', 'torch'], 'id': 457, 'def': 'a small portable battery-powered electric lamp', 'name': 'flashlight'}, {'frequency': 'r', 'synset': 'fleece.n.03', 'synonyms': ['fleece'], 'id': 458, 'def': 'a soft bulky fabric with deep pile; used chiefly for clothing', 'name': 'fleece'}, {'frequency': 'f', 'synset': 'flip-flop.n.02', 'synonyms': ['flip-flop_(sandal)'], 'id': 459, 'def': 'a backless sandal held to the foot by a thong between two toes', 'name': 'flip-flop_(sandal)'}, {'frequency': 'c', 'synset': 'flipper.n.01', 'synonyms': ['flipper_(footwear)', 'fin_(footwear)'], 'id': 460, 'def': 'a shoe to aid a person in swimming', 'name': 'flipper_(footwear)'}, {'frequency': 'f', 'synset': 'flower_arrangement.n.01', 'synonyms': ['flower_arrangement', 'floral_arrangement'], 'id': 461, 'def': 'a decorative arrangement of flowers', 'name': 'flower_arrangement'}, {'frequency': 'c', 'synset': 'flute.n.02', 'synonyms': ['flute_glass', 'champagne_flute'], 'id': 462, 'def': 'a tall narrow wineglass', 'name': 'flute_glass'}, {'frequency': 'c', 'synset': 'foal.n.01', 'synonyms': ['foal'], 'id': 463, 'def': 'a young horse', 'name': 'foal'}, {'frequency': 'c', 'synset': 'folding_chair.n.01', 'synonyms': ['folding_chair'], 'id': 464, 'def': 'a chair that can be folded flat for storage', 'name': 'folding_chair'}, {'frequency': 'c', 'synset': 'food_processor.n.01', 'synonyms': ['food_processor'], 'id': 465, 'def': 'a kitchen appliance for shredding, blending, chopping, or slicing food', 'name': 'food_processor'}, {'frequency': 'c', 'synset': 'football.n.02', 'synonyms': ['football_(American)'], 'id': 466, 'def': 'the inflated oblong ball used in playing American football', 'name': 'football_(American)'}, {'frequency': 'r', 'synset': 'football_helmet.n.01', 'synonyms': ['football_helmet'], 'id': 467, 'def': 'a padded helmet with a face mask to protect the head of football players', 'name': 'football_helmet'}, {'frequency': 'c', 'synset': 'footstool.n.01', 'synonyms': ['footstool', 'footrest'], 'id': 468, 'def': 'a low seat or a stool to rest the feet of a seated person', 'name': 'footstool'}, {'frequency': 'f', 'synset': 'fork.n.01', 'synonyms': ['fork'], 'id': 469, 'def': 'cutlery used for serving and eating food', 'name': 'fork'}, {'frequency': 'c', 'synset': 'forklift.n.01', 'synonyms': ['forklift'], 'id': 470, 'def': 'an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them', 'name': 'forklift'}, {'frequency': 'c', 'synset': 'freight_car.n.01', 'synonyms': ['freight_car'], 'id': 471, 'def': 'a railway car that carries freight', 'name': 'freight_car'}, {'frequency': 'c', 'synset': 'french_toast.n.01', 'synonyms': ['French_toast'], 'id': 472, 'def': 'bread slice dipped in egg and milk and fried', 'name': 'French_toast'}, {'frequency': 'c', 'synset': 'freshener.n.01', 'synonyms': ['freshener', 'air_freshener'], 'id': 473, 'def': 'anything that freshens air by removing or covering odor', 'name': 'freshener'}, {'frequency': 'f', 'synset': 'frisbee.n.01', 'synonyms': ['frisbee'], 'id': 474, 'def': 'a light, plastic disk propelled with a flip of the wrist for recreation or competition', 'name': 'frisbee'}, {'frequency': 'c', 'synset': 'frog.n.01', 'synonyms': ['frog', 'toad', 'toad_frog'], 'id': 475, 'def': 'a tailless stout-bodied amphibians with long hind limbs for leaping', 'name': 'frog'}, {'frequency': 'c', 'synset': 'fruit_juice.n.01', 'synonyms': ['fruit_juice'], 'id': 476, 'def': 'drink produced by squeezing or crushing fruit', 'name': 'fruit_juice'}, {'frequency': 'f', 'synset': 'frying_pan.n.01', 'synonyms': ['frying_pan', 'frypan', 'skillet'], 'id': 477, 'def': 'a pan used for frying foods', 'name': 'frying_pan'}, {'frequency': 'r', 'synset': 'fudge.n.01', 'synonyms': ['fudge'], 'id': 478, 'def': 'soft creamy candy', 'name': 'fudge'}, {'frequency': 'r', 'synset': 'funnel.n.02', 'synonyms': ['funnel'], 'id': 479, 'def': 'a cone-shaped utensil used to channel a substance into a container with a small mouth', 'name': 'funnel'}, {'frequency': 'r', 'synset': 'futon.n.01', 'synonyms': ['futon'], 'id': 480, 'def': 'a pad that is used for sleeping on the floor or on a raised frame', 'name': 'futon'}, {'frequency': 'r', 'synset': 'gag.n.02', 'synonyms': ['gag', 'muzzle'], 'id': 481, 'def': "restraint put into a person's mouth to prevent speaking or shouting", 'name': 'gag'}, {'frequency': 'r', 'synset': 'garbage.n.03', 'synonyms': ['garbage'], 'id': 482, 'def': 'a receptacle where waste can be discarded', 'name': 'garbage'}, {'frequency': 'c', 'synset': 'garbage_truck.n.01', 'synonyms': ['garbage_truck'], 'id': 483, 'def': 'a truck for collecting domestic refuse', 'name': 'garbage_truck'}, {'frequency': 'c', 'synset': 'garden_hose.n.01', 'synonyms': ['garden_hose'], 'id': 484, 'def': 'a hose used for watering a lawn or garden', 'name': 'garden_hose'}, {'frequency': 'c', 'synset': 'gargle.n.01', 'synonyms': ['gargle', 'mouthwash'], 'id': 485, 'def': 'a medicated solution used for gargling and rinsing the mouth', 'name': 'gargle'}, {'frequency': 'r', 'synset': 'gargoyle.n.02', 'synonyms': ['gargoyle'], 'id': 486, 'def': 'an ornament consisting of a grotesquely carved figure of a person or animal', 'name': 'gargoyle'}, {'frequency': 'c', 'synset': 'garlic.n.02', 'synonyms': ['garlic', 'ail'], 'id': 487, 'def': 'aromatic bulb used as seasoning', 'name': 'garlic'}, {'frequency': 'r', 'synset': 'gasmask.n.01', 'synonyms': ['gasmask', 'respirator', 'gas_helmet'], 'id': 488, 'def': 'a protective face mask with a filter', 'name': 'gasmask'}, {'frequency': 'c', 'synset': 'gazelle.n.01', 'synonyms': ['gazelle'], 'id': 489, 'def': 'small swift graceful antelope of Africa and Asia having lustrous eyes', 'name': 'gazelle'}, {'frequency': 'c', 'synset': 'gelatin.n.02', 'synonyms': ['gelatin', 'jelly'], 'id': 490, 'def': 'an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods', 'name': 'gelatin'}, {'frequency': 'r', 'synset': 'gem.n.02', 'synonyms': ['gemstone'], 'id': 491, 'def': 'a crystalline rock that can be cut and polished for jewelry', 'name': 'gemstone'}, {'frequency': 'r', 'synset': 'generator.n.02', 'synonyms': ['generator'], 'id': 492, 'def': 'engine that converts mechanical energy into electrical energy by electromagnetic induction', 'name': 'generator'}, {'frequency': 'c', 'synset': 'giant_panda.n.01', 'synonyms': ['giant_panda', 'panda', 'panda_bear'], 'id': 493, 'def': 'large black-and-white herbivorous mammal of bamboo forests of China and Tibet', 'name': 'giant_panda'}, {'frequency': 'c', 'synset': 'gift_wrap.n.01', 'synonyms': ['gift_wrap'], 'id': 494, 'def': 'attractive wrapping paper suitable for wrapping gifts', 'name': 'gift_wrap'}, {'frequency': 'c', 'synset': 'ginger.n.03', 'synonyms': ['ginger', 'gingerroot'], 'id': 495, 'def': 'the root of the common ginger plant; used fresh as a seasoning', 'name': 'ginger'}, {'frequency': 'f', 'synset': 'giraffe.n.01', 'synonyms': ['giraffe'], 'id': 496, 'def': 'tall animal having a spotted coat and small horns and very long neck and legs', 'name': 'giraffe'}, {'frequency': 'c', 'synset': 'girdle.n.02', 'synonyms': ['cincture', 'sash', 'waistband', 'waistcloth'], 'id': 497, 'def': 'a band of material around the waist that strengthens a skirt or trousers', 'name': 'cincture'}, {'frequency': 'f', 'synset': 'glass.n.02', 'synonyms': ['glass_(drink_container)', 'drinking_glass'], 'id': 498, 'def': 'a container for holding liquids while drinking', 'name': 'glass_(drink_container)'}, {'frequency': 'c', 'synset': 'globe.n.03', 'synonyms': ['globe'], 'id': 499, 'def': 'a sphere on which a map (especially of the earth) is represented', 'name': 'globe'}, {'frequency': 'f', 'synset': 'glove.n.02', 'synonyms': ['glove'], 'id': 500, 'def': 'handwear covering the hand', 'name': 'glove'}, {'frequency': 'c', 'synset': 'goat.n.01', 'synonyms': ['goat'], 'id': 501, 'def': 'a common goat', 'name': 'goat'}, {'frequency': 'f', 'synset': 'goggles.n.01', 'synonyms': ['goggles'], 'id': 502, 'def': 'tight-fitting spectacles worn to protect the eyes', 'name': 'goggles'}, {'frequency': 'r', 'synset': 'goldfish.n.01', 'synonyms': ['goldfish'], 'id': 503, 'def': 'small golden or orange-red freshwater fishes used as pond or aquarium pets', 'name': 'goldfish'}, {'frequency': 'c', 'synset': 'golf_club.n.02', 'synonyms': ['golf_club', 'golf-club'], 'id': 504, 'def': 'golf equipment used by a golfer to hit a golf ball', 'name': 'golf_club'}, {'frequency': 'c', 'synset': 'golfcart.n.01', 'synonyms': ['golfcart'], 'id': 505, 'def': 'a small motor vehicle in which golfers can ride between shots', 'name': 'golfcart'}, {'frequency': 'r', 'synset': 'gondola.n.02', 'synonyms': ['gondola_(boat)'], 'id': 506, 'def': 'long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice', 'name': 'gondola_(boat)'}, {'frequency': 'c', 'synset': 'goose.n.01', 'synonyms': ['goose'], 'id': 507, 'def': 'loud, web-footed long-necked aquatic birds usually larger than ducks', 'name': 'goose'}, {'frequency': 'r', 'synset': 'gorilla.n.01', 'synonyms': ['gorilla'], 'id': 508, 'def': 'largest ape', 'name': 'gorilla'}, {'frequency': 'r', 'synset': 'gourd.n.02', 'synonyms': ['gourd'], 'id': 509, 'def': 'any of numerous inedible fruits with hard rinds', 'name': 'gourd'}, {'frequency': 'f', 'synset': 'grape.n.01', 'synonyms': ['grape'], 'id': 510, 'def': 'any of various juicy fruit with green or purple skins; grow in clusters', 'name': 'grape'}, {'frequency': 'c', 'synset': 'grater.n.01', 'synonyms': ['grater'], 'id': 511, 'def': 'utensil with sharp perforations for shredding foods (as vegetables or cheese)', 'name': 'grater'}, {'frequency': 'c', 'synset': 'gravestone.n.01', 'synonyms': ['gravestone', 'headstone', 'tombstone'], 'id': 512, 'def': 'a stone that is used to mark a grave', 'name': 'gravestone'}, {'frequency': 'r', 'synset': 'gravy_boat.n.01', 'synonyms': ['gravy_boat', 'gravy_holder'], 'id': 513, 'def': 'a dish (often boat-shaped) for serving gravy or sauce', 'name': 'gravy_boat'}, {'frequency': 'f', 'synset': 'green_bean.n.02', 'synonyms': ['green_bean'], 'id': 514, 'def': 'a common bean plant cultivated for its slender green edible pods', 'name': 'green_bean'}, {'frequency': 'f', 'synset': 'green_onion.n.01', 'synonyms': ['green_onion', 'spring_onion', 'scallion'], 'id': 515, 'def': 'a young onion before the bulb has enlarged', 'name': 'green_onion'}, {'frequency': 'r', 'synset': 'griddle.n.01', 'synonyms': ['griddle'], 'id': 516, 'def': 'cooking utensil consisting of a flat heated surface on which food is cooked', 'name': 'griddle'}, {'frequency': 'f', 'synset': 'grill.n.02', 'synonyms': ['grill', 'grille', 'grillwork', 'radiator_grille'], 'id': 517, 'def': 'a framework of metal bars used as a partition or a grate', 'name': 'grill'}, {'frequency': 'r', 'synset': 'grits.n.01', 'synonyms': ['grits', 'hominy_grits'], 'id': 518, 'def': 'coarsely ground corn boiled as a breakfast dish', 'name': 'grits'}, {'frequency': 'c', 'synset': 'grizzly.n.01', 'synonyms': ['grizzly', 'grizzly_bear'], 'id': 519, 'def': 'powerful brownish-yellow bear of the uplands of western North America', 'name': 'grizzly'}, {'frequency': 'c', 'synset': 'grocery_bag.n.01', 'synonyms': ['grocery_bag'], 'id': 520, 'def': "a sack for holding customer's groceries", 'name': 'grocery_bag'}, {'frequency': 'f', 'synset': 'guitar.n.01', 'synonyms': ['guitar'], 'id': 521, 'def': 'a stringed instrument usually having six strings; played by strumming or plucking', 'name': 'guitar'}, {'frequency': 'c', 'synset': 'gull.n.02', 'synonyms': ['gull', 'seagull'], 'id': 522, 'def': 'mostly white aquatic bird having long pointed wings and short legs', 'name': 'gull'}, {'frequency': 'c', 'synset': 'gun.n.01', 'synonyms': ['gun'], 'id': 523, 'def': 'a weapon that discharges a bullet at high velocity from a metal tube', 'name': 'gun'}, {'frequency': 'f', 'synset': 'hairbrush.n.01', 'synonyms': ['hairbrush'], 'id': 524, 'def': "a brush used to groom a person's hair", 'name': 'hairbrush'}, {'frequency': 'c', 'synset': 'hairnet.n.01', 'synonyms': ['hairnet'], 'id': 525, 'def': 'a small net that someone wears over their hair to keep it in place', 'name': 'hairnet'}, {'frequency': 'c', 'synset': 'hairpin.n.01', 'synonyms': ['hairpin'], 'id': 526, 'def': "a double pronged pin used to hold women's hair in place", 'name': 'hairpin'}, {'frequency': 'r', 'synset': 'halter.n.03', 'synonyms': ['halter_top'], 'id': 527, 'def': "a woman's top that fastens behind the back and neck leaving the back and arms uncovered", 'name': 'halter_top'}, {'frequency': 'f', 'synset': 'ham.n.01', 'synonyms': ['ham', 'jambon', 'gammon'], 'id': 528, 'def': 'meat cut from the thigh of a hog (usually smoked)', 'name': 'ham'}, {'frequency': 'c', 'synset': 'hamburger.n.01', 'synonyms': ['hamburger', 'beefburger', 'burger'], 'id': 529, 'def': 'a sandwich consisting of a patty of minced beef served on a bun', 'name': 'hamburger'}, {'frequency': 'c', 'synset': 'hammer.n.02', 'synonyms': ['hammer'], 'id': 530, 'def': 'a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking', 'name': 'hammer'}, {'frequency': 'c', 'synset': 'hammock.n.02', 'synonyms': ['hammock'], 'id': 531, 'def': 'a hanging bed of canvas or rope netting (usually suspended between two trees)', 'name': 'hammock'}, {'frequency': 'r', 'synset': 'hamper.n.02', 'synonyms': ['hamper'], 'id': 532, 'def': 'a basket usually with a cover', 'name': 'hamper'}, {'frequency': 'c', 'synset': 'hamster.n.01', 'synonyms': ['hamster'], 'id': 533, 'def': 'short-tailed burrowing rodent with large cheek pouches', 'name': 'hamster'}, {'frequency': 'f', 'synset': 'hand_blower.n.01', 'synonyms': ['hair_dryer'], 'id': 534, 'def': 'a hand-held electric blower that can blow warm air onto the hair', 'name': 'hair_dryer'}, {'frequency': 'r', 'synset': 'hand_glass.n.01', 'synonyms': ['hand_glass', 'hand_mirror'], 'id': 535, 'def': 'a mirror intended to be held in the hand', 'name': 'hand_glass'}, {'frequency': 'f', 'synset': 'hand_towel.n.01', 'synonyms': ['hand_towel', 'face_towel'], 'id': 536, 'def': 'a small towel used to dry the hands or face', 'name': 'hand_towel'}, {'frequency': 'c', 'synset': 'handcart.n.01', 'synonyms': ['handcart', 'pushcart', 'hand_truck'], 'id': 537, 'def': 'wheeled vehicle that can be pushed by a person', 'name': 'handcart'}, {'frequency': 'r', 'synset': 'handcuff.n.01', 'synonyms': ['handcuff'], 'id': 538, 'def': 'shackle that consists of a metal loop that can be locked around the wrist', 'name': 'handcuff'}, {'frequency': 'c', 'synset': 'handkerchief.n.01', 'synonyms': ['handkerchief'], 'id': 539, 'def': 'a square piece of cloth used for wiping the eyes or nose or as a costume accessory', 'name': 'handkerchief'}, {'frequency': 'f', 'synset': 'handle.n.01', 'synonyms': ['handle', 'grip', 'handgrip'], 'id': 540, 'def': 'the appendage to an object that is designed to be held in order to use or move it', 'name': 'handle'}, {'frequency': 'r', 'synset': 'handsaw.n.01', 'synonyms': ['handsaw', "carpenter's_saw"], 'id': 541, 'def': 'a saw used with one hand for cutting wood', 'name': 'handsaw'}, {'frequency': 'r', 'synset': 'hardback.n.01', 'synonyms': ['hardback_book', 'hardcover_book'], 'id': 542, 'def': 'a book with cardboard or cloth or leather covers', 'name': 'hardback_book'}, {'frequency': 'r', 'synset': 'harmonium.n.01', 'synonyms': ['harmonium', 'organ_(musical_instrument)', 'reed_organ_(musical_instrument)'], 'id': 543, 'def': 'a free-reed instrument in which air is forced through the reeds by bellows', 'name': 'harmonium'}, {'frequency': 'f', 'synset': 'hat.n.01', 'synonyms': ['hat'], 'id': 544, 'def': 'headwear that protects the head from bad weather, sun, or worn for fashion', 'name': 'hat'}, {'frequency': 'r', 'synset': 'hatbox.n.01', 'synonyms': ['hatbox'], 'id': 545, 'def': 'a round piece of luggage for carrying hats', 'name': 'hatbox'}, {'frequency': 'c', 'synset': 'head_covering.n.01', 'synonyms': ['veil'], 'id': 546, 'def': 'a garment that covers the head OR face', 'name': 'veil'}, {'frequency': 'f', 'synset': 'headband.n.01', 'synonyms': ['headband'], 'id': 547, 'def': 'a band worn around or over the head', 'name': 'headband'}, {'frequency': 'f', 'synset': 'headboard.n.01', 'synonyms': ['headboard'], 'id': 548, 'def': 'a vertical board or panel forming the head of a bedstead', 'name': 'headboard'}, {'frequency': 'f', 'synset': 'headlight.n.01', 'synonyms': ['headlight', 'headlamp'], 'id': 549, 'def': 'a powerful light with reflector; attached to the front of an automobile or locomotive', 'name': 'headlight'}, {'frequency': 'c', 'synset': 'headscarf.n.01', 'synonyms': ['headscarf'], 'id': 550, 'def': 'a kerchief worn over the head and tied under the chin', 'name': 'headscarf'}, {'frequency': 'r', 'synset': 'headset.n.01', 'synonyms': ['headset'], 'id': 551, 'def': 'receiver consisting of a pair of headphones', 'name': 'headset'}, {'frequency': 'c', 'synset': 'headstall.n.01', 'synonyms': ['headstall_(for_horses)', 'headpiece_(for_horses)'], 'id': 552, 'def': "the band that is the part of a bridle that fits around a horse's head", 'name': 'headstall_(for_horses)'}, {'frequency': 'c', 'synset': 'heart.n.02', 'synonyms': ['heart'], 'id': 553, 'def': 'a muscular organ; its contractions move the blood through the body', 'name': 'heart'}, {'frequency': 'c', 'synset': 'heater.n.01', 'synonyms': ['heater', 'warmer'], 'id': 554, 'def': 'device that heats water or supplies warmth to a room', 'name': 'heater'}, {'frequency': 'c', 'synset': 'helicopter.n.01', 'synonyms': ['helicopter'], 'id': 555, 'def': 'an aircraft without wings that obtains its lift from the rotation of overhead blades', 'name': 'helicopter'}, {'frequency': 'f', 'synset': 'helmet.n.02', 'synonyms': ['helmet'], 'id': 556, 'def': 'a protective headgear made of hard material to resist blows', 'name': 'helmet'}, {'frequency': 'r', 'synset': 'heron.n.02', 'synonyms': ['heron'], 'id': 557, 'def': 'grey or white wading bird with long neck and long legs and (usually) long bill', 'name': 'heron'}, {'frequency': 'c', 'synset': 'highchair.n.01', 'synonyms': ['highchair', 'feeding_chair'], 'id': 558, 'def': 'a chair for feeding a very young child', 'name': 'highchair'}, {'frequency': 'f', 'synset': 'hinge.n.01', 'synonyms': ['hinge'], 'id': 559, 'def': 'a joint that holds two parts together so that one can swing relative to the other', 'name': 'hinge'}, {'frequency': 'r', 'synset': 'hippopotamus.n.01', 'synonyms': ['hippopotamus'], 'id': 560, 'def': 'massive thick-skinned animal living in or around rivers of tropical Africa', 'name': 'hippopotamus'}, {'frequency': 'r', 'synset': 'hockey_stick.n.01', 'synonyms': ['hockey_stick'], 'id': 561, 'def': 'sports implement consisting of a stick used by hockey players to move the puck', 'name': 'hockey_stick'}, {'frequency': 'c', 'synset': 'hog.n.03', 'synonyms': ['hog', 'pig'], 'id': 562, 'def': 'domestic swine', 'name': 'hog'}, {'frequency': 'f', 'synset': 'home_plate.n.01', 'synonyms': ['home_plate_(baseball)', 'home_base_(baseball)'], 'id': 563, 'def': '(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score', 'name': 'home_plate_(baseball)'}, {'frequency': 'c', 'synset': 'honey.n.01', 'synonyms': ['honey'], 'id': 564, 'def': 'a sweet yellow liquid produced by bees', 'name': 'honey'}, {'frequency': 'f', 'synset': 'hood.n.06', 'synonyms': ['fume_hood', 'exhaust_hood'], 'id': 565, 'def': 'metal covering leading to a vent that exhausts smoke or fumes', 'name': 'fume_hood'}, {'frequency': 'f', 'synset': 'hook.n.05', 'synonyms': ['hook'], 'id': 566, 'def': 'a curved or bent implement for suspending or pulling something', 'name': 'hook'}, {'frequency': 'r', 'synset': 'hookah.n.01', 'synonyms': ['hookah', 'narghile', 'nargileh', 'sheesha', 'shisha', 'water_pipe'], 'id': 567, 'def': 'a tobacco pipe with a long flexible tube connected to a container where the smoke is cooled by passing through water', 'name': 'hookah'}, {'frequency': 'r', 'synset': 'hornet.n.01', 'synonyms': ['hornet'], 'id': 568, 'def': 'large stinging wasp', 'name': 'hornet'}, {'frequency': 'f', 'synset': 'horse.n.01', 'synonyms': ['horse'], 'id': 569, 'def': 'a common horse', 'name': 'horse'}, {'frequency': 'f', 'synset': 'hose.n.03', 'synonyms': ['hose', 'hosepipe'], 'id': 570, 'def': 'a flexible pipe for conveying a liquid or gas', 'name': 'hose'}, {'frequency': 'r', 'synset': 'hot-air_balloon.n.01', 'synonyms': ['hot-air_balloon'], 'id': 571, 'def': 'balloon for travel through the air in a basket suspended below a large bag of heated air', 'name': 'hot-air_balloon'}, {'frequency': 'r', 'synset': 'hot_plate.n.01', 'synonyms': ['hotplate'], 'id': 572, 'def': 'a portable electric appliance for heating or cooking or keeping food warm', 'name': 'hotplate'}, {'frequency': 'c', 'synset': 'hot_sauce.n.01', 'synonyms': ['hot_sauce'], 'id': 573, 'def': 'a pungent peppery sauce', 'name': 'hot_sauce'}, {'frequency': 'r', 'synset': 'hourglass.n.01', 'synonyms': ['hourglass'], 'id': 574, 'def': 'a sandglass timer that runs for sixty minutes', 'name': 'hourglass'}, {'frequency': 'r', 'synset': 'houseboat.n.01', 'synonyms': ['houseboat'], 'id': 575, 'def': 'a barge that is designed and equipped for use as a dwelling', 'name': 'houseboat'}, {'frequency': 'c', 'synset': 'hummingbird.n.01', 'synonyms': ['hummingbird'], 'id': 576, 'def': 'tiny American bird having brilliant iridescent plumage and long slender bills', 'name': 'hummingbird'}, {'frequency': 'r', 'synset': 'hummus.n.01', 'synonyms': ['hummus', 'humus', 'hommos', 'hoummos', 'humous'], 'id': 577, 'def': 'a thick spread made from mashed chickpeas', 'name': 'hummus'}, {'frequency': 'f', 'synset': 'ice_bear.n.01', 'synonyms': ['polar_bear'], 'id': 578, 'def': 'white bear of Arctic regions', 'name': 'polar_bear'}, {'frequency': 'c', 'synset': 'ice_cream.n.01', 'synonyms': ['icecream'], 'id': 579, 'def': 'frozen dessert containing cream and sugar and flavoring', 'name': 'icecream'}, {'frequency': 'r', 'synset': 'ice_lolly.n.01', 'synonyms': ['popsicle'], 'id': 580, 'def': 'ice cream or water ice on a small wooden stick', 'name': 'popsicle'}, {'frequency': 'c', 'synset': 'ice_maker.n.01', 'synonyms': ['ice_maker'], 'id': 581, 'def': 'an appliance included in some electric refrigerators for making ice cubes', 'name': 'ice_maker'}, {'frequency': 'r', 'synset': 'ice_pack.n.01', 'synonyms': ['ice_pack', 'ice_bag'], 'id': 582, 'def': 'a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling', 'name': 'ice_pack'}, {'frequency': 'r', 'synset': 'ice_skate.n.01', 'synonyms': ['ice_skate'], 'id': 583, 'def': 'skate consisting of a boot with a steel blade fitted to the sole', 'name': 'ice_skate'}, {'frequency': 'c', 'synset': 'igniter.n.01', 'synonyms': ['igniter', 'ignitor', 'lighter'], 'id': 584, 'def': 'a substance or device used to start a fire', 'name': 'igniter'}, {'frequency': 'r', 'synset': 'inhaler.n.01', 'synonyms': ['inhaler', 'inhalator'], 'id': 585, 'def': 'a dispenser that produces a chemical vapor to be inhaled through mouth or nose', 'name': 'inhaler'}, {'frequency': 'f', 'synset': 'ipod.n.01', 'synonyms': ['iPod'], 'id': 586, 'def': 'a pocket-sized device used to play music files', 'name': 'iPod'}, {'frequency': 'c', 'synset': 'iron.n.04', 'synonyms': ['iron_(for_clothing)', 'smoothing_iron_(for_clothing)'], 'id': 587, 'def': 'home appliance consisting of a flat metal base that is heated and used to smooth cloth', 'name': 'iron_(for_clothing)'}, {'frequency': 'c', 'synset': 'ironing_board.n.01', 'synonyms': ['ironing_board'], 'id': 588, 'def': 'narrow padded board on collapsible supports; used for ironing clothes', 'name': 'ironing_board'}, {'frequency': 'f', 'synset': 'jacket.n.01', 'synonyms': ['jacket'], 'id': 589, 'def': 'a waist-length coat', 'name': 'jacket'}, {'frequency': 'c', 'synset': 'jam.n.01', 'synonyms': ['jam'], 'id': 590, 'def': 'preserve of crushed fruit', 'name': 'jam'}, {'frequency': 'f', 'synset': 'jar.n.01', 'synonyms': ['jar'], 'id': 591, 'def': 'a vessel (usually cylindrical) with a wide mouth and without handles', 'name': 'jar'}, {'frequency': 'f', 'synset': 'jean.n.01', 'synonyms': ['jean', 'blue_jean', 'denim'], 'id': 592, 'def': '(usually plural) close-fitting trousers of heavy denim for manual work or casual wear', 'name': 'jean'}, {'frequency': 'c', 'synset': 'jeep.n.01', 'synonyms': ['jeep', 'landrover'], 'id': 593, 'def': 'a car suitable for traveling over rough terrain', 'name': 'jeep'}, {'frequency': 'r', 'synset': 'jelly_bean.n.01', 'synonyms': ['jelly_bean', 'jelly_egg'], 'id': 594, 'def': 'sugar-glazed jellied candy', 'name': 'jelly_bean'}, {'frequency': 'f', 'synset': 'jersey.n.03', 'synonyms': ['jersey', 'T-shirt', 'tee_shirt'], 'id': 595, 'def': 'a close-fitting pullover shirt', 'name': 'jersey'}, {'frequency': 'c', 'synset': 'jet.n.01', 'synonyms': ['jet_plane', 'jet-propelled_plane'], 'id': 596, 'def': 'an airplane powered by one or more jet engines', 'name': 'jet_plane'}, {'frequency': 'r', 'synset': 'jewel.n.01', 'synonyms': ['jewel', 'gem', 'precious_stone'], 'id': 597, 'def': 'a precious or semiprecious stone incorporated into a piece of jewelry', 'name': 'jewel'}, {'frequency': 'c', 'synset': 'jewelry.n.01', 'synonyms': ['jewelry', 'jewellery'], 'id': 598, 'def': 'an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)', 'name': 'jewelry'}, {'frequency': 'r', 'synset': 'joystick.n.02', 'synonyms': ['joystick'], 'id': 599, 'def': 'a control device for computers consisting of a vertical handle that can move freely in two directions', 'name': 'joystick'}, {'frequency': 'c', 'synset': 'jump_suit.n.01', 'synonyms': ['jumpsuit'], 'id': 600, 'def': "one-piece garment fashioned after a parachutist's uniform", 'name': 'jumpsuit'}, {'frequency': 'c', 'synset': 'kayak.n.01', 'synonyms': ['kayak'], 'id': 601, 'def': 'a small canoe consisting of a light frame made watertight with animal skins', 'name': 'kayak'}, {'frequency': 'r', 'synset': 'keg.n.02', 'synonyms': ['keg'], 'id': 602, 'def': 'small cask or barrel', 'name': 'keg'}, {'frequency': 'r', 'synset': 'kennel.n.01', 'synonyms': ['kennel', 'doghouse'], 'id': 603, 'def': 'outbuilding that serves as a shelter for a dog', 'name': 'kennel'}, {'frequency': 'c', 'synset': 'kettle.n.01', 'synonyms': ['kettle', 'boiler'], 'id': 604, 'def': 'a metal pot for stewing or boiling; usually has a lid', 'name': 'kettle'}, {'frequency': 'f', 'synset': 'key.n.01', 'synonyms': ['key'], 'id': 605, 'def': 'metal instrument used to unlock a lock', 'name': 'key'}, {'frequency': 'r', 'synset': 'keycard.n.01', 'synonyms': ['keycard'], 'id': 606, 'def': 'a plastic card used to gain access typically to a door', 'name': 'keycard'}, {'frequency': 'c', 'synset': 'kilt.n.01', 'synonyms': ['kilt'], 'id': 607, 'def': 'a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland', 'name': 'kilt'}, {'frequency': 'c', 'synset': 'kimono.n.01', 'synonyms': ['kimono'], 'id': 608, 'def': 'a loose robe; imitated from robes originally worn by Japanese', 'name': 'kimono'}, {'frequency': 'f', 'synset': 'kitchen_sink.n.01', 'synonyms': ['kitchen_sink'], 'id': 609, 'def': 'a sink in a kitchen', 'name': 'kitchen_sink'}, {'frequency': 'r', 'synset': 'kitchen_table.n.01', 'synonyms': ['kitchen_table'], 'id': 610, 'def': 'a table in the kitchen', 'name': 'kitchen_table'}, {'frequency': 'f', 'synset': 'kite.n.03', 'synonyms': ['kite'], 'id': 611, 'def': 'plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string', 'name': 'kite'}, {'frequency': 'c', 'synset': 'kitten.n.01', 'synonyms': ['kitten', 'kitty'], 'id': 612, 'def': 'young domestic cat', 'name': 'kitten'}, {'frequency': 'c', 'synset': 'kiwi.n.03', 'synonyms': ['kiwi_fruit'], 'id': 613, 'def': 'fuzzy brown egg-shaped fruit with slightly tart green flesh', 'name': 'kiwi_fruit'}, {'frequency': 'f', 'synset': 'knee_pad.n.01', 'synonyms': ['knee_pad'], 'id': 614, 'def': 'protective garment consisting of a pad worn by football or baseball or hockey players', 'name': 'knee_pad'}, {'frequency': 'f', 'synset': 'knife.n.01', 'synonyms': ['knife'], 'id': 615, 'def': 'tool with a blade and point used as a cutting instrument', 'name': 'knife'}, {'frequency': 'r', 'synset': 'knitting_needle.n.01', 'synonyms': ['knitting_needle'], 'id': 616, 'def': 'needle consisting of a slender rod with pointed ends; usually used in pairs', 'name': 'knitting_needle'}, {'frequency': 'f', 'synset': 'knob.n.02', 'synonyms': ['knob'], 'id': 617, 'def': 'a round handle often found on a door', 'name': 'knob'}, {'frequency': 'r', 'synset': 'knocker.n.05', 'synonyms': ['knocker_(on_a_door)', 'doorknocker'], 'id': 618, 'def': 'a device (usually metal and ornamental) attached by a hinge to a door', 'name': 'knocker_(on_a_door)'}, {'frequency': 'r', 'synset': 'koala.n.01', 'synonyms': ['koala', 'koala_bear'], 'id': 619, 'def': 'sluggish tailless Australian marsupial with grey furry ears and coat', 'name': 'koala'}, {'frequency': 'r', 'synset': 'lab_coat.n.01', 'synonyms': ['lab_coat', 'laboratory_coat'], 'id': 620, 'def': 'a light coat worn to protect clothing from substances used while working in a laboratory', 'name': 'lab_coat'}, {'frequency': 'f', 'synset': 'ladder.n.01', 'synonyms': ['ladder'], 'id': 621, 'def': 'steps consisting of two parallel members connected by rungs', 'name': 'ladder'}, {'frequency': 'c', 'synset': 'ladle.n.01', 'synonyms': ['ladle'], 'id': 622, 'def': 'a spoon-shaped vessel with a long handle frequently used to transfer liquids', 'name': 'ladle'}, {'frequency': 'c', 'synset': 'ladybug.n.01', 'synonyms': ['ladybug', 'ladybeetle', 'ladybird_beetle'], 'id': 623, 'def': 'small round bright-colored and spotted beetle, typically red and black', 'name': 'ladybug'}, {'frequency': 'f', 'synset': 'lamb.n.01', 'synonyms': ['lamb_(animal)'], 'id': 624, 'def': 'young sheep', 'name': 'lamb_(animal)'}, {'frequency': 'r', 'synset': 'lamb_chop.n.01', 'synonyms': ['lamb-chop', 'lambchop'], 'id': 625, 'def': 'chop cut from a lamb', 'name': 'lamb-chop'}, {'frequency': 'f', 'synset': 'lamp.n.02', 'synonyms': ['lamp'], 'id': 626, 'def': 'a piece of furniture holding one or more electric light bulbs', 'name': 'lamp'}, {'frequency': 'f', 'synset': 'lamppost.n.01', 'synonyms': ['lamppost'], 'id': 627, 'def': 'a metal post supporting an outdoor lamp (such as a streetlight)', 'name': 'lamppost'}, {'frequency': 'f', 'synset': 'lampshade.n.01', 'synonyms': ['lampshade'], 'id': 628, 'def': 'a protective ornamental shade used to screen a light bulb from direct view', 'name': 'lampshade'}, {'frequency': 'c', 'synset': 'lantern.n.01', 'synonyms': ['lantern'], 'id': 629, 'def': 'light in a transparent protective case', 'name': 'lantern'}, {'frequency': 'f', 'synset': 'lanyard.n.02', 'synonyms': ['lanyard', 'laniard'], 'id': 630, 'def': 'a cord worn around the neck to hold a knife or whistle, etc.', 'name': 'lanyard'}, {'frequency': 'f', 'synset': 'laptop.n.01', 'synonyms': ['laptop_computer', 'notebook_computer'], 'id': 631, 'def': 'a portable computer small enough to use in your lap', 'name': 'laptop_computer'}, {'frequency': 'r', 'synset': 'lasagna.n.01', 'synonyms': ['lasagna', 'lasagne'], 'id': 632, 'def': 'baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables', 'name': 'lasagna'}, {'frequency': 'f', 'synset': 'latch.n.02', 'synonyms': ['latch'], 'id': 633, 'def': 'a bar that can be lowered or slid into a groove to fasten a door or gate', 'name': 'latch'}, {'frequency': 'r', 'synset': 'lawn_mower.n.01', 'synonyms': ['lawn_mower'], 'id': 634, 'def': 'garden tool for mowing grass on lawns', 'name': 'lawn_mower'}, {'frequency': 'r', 'synset': 'leather.n.01', 'synonyms': ['leather'], 'id': 635, 'def': 'an animal skin made smooth and flexible by removing the hair and then tanning', 'name': 'leather'}, {'frequency': 'c', 'synset': 'legging.n.01', 'synonyms': ['legging_(clothing)', 'leging_(clothing)', 'leg_covering'], 'id': 636, 'def': 'a garment covering the leg (usually extending from the knee to the ankle)', 'name': 'legging_(clothing)'}, {'frequency': 'c', 'synset': 'lego.n.01', 'synonyms': ['Lego', 'Lego_set'], 'id': 637, 'def': "a child's plastic construction set for making models from blocks", 'name': 'Lego'}, {'frequency': 'r', 'synset': 'legume.n.02', 'synonyms': ['legume'], 'id': 638, 'def': 'the fruit or seed of bean or pea plants', 'name': 'legume'}, {'frequency': 'f', 'synset': 'lemon.n.01', 'synonyms': ['lemon'], 'id': 639, 'def': 'yellow oval fruit with juicy acidic flesh', 'name': 'lemon'}, {'frequency': 'r', 'synset': 'lemonade.n.01', 'synonyms': ['lemonade'], 'id': 640, 'def': 'sweetened beverage of diluted lemon juice', 'name': 'lemonade'}, {'frequency': 'f', 'synset': 'lettuce.n.02', 'synonyms': ['lettuce'], 'id': 641, 'def': 'leafy plant commonly eaten in salad or on sandwiches', 'name': 'lettuce'}, {'frequency': 'f', 'synset': 'license_plate.n.01', 'synonyms': ['license_plate', 'numberplate'], 'id': 642, 'def': "a plate mounted on the front and back of car and bearing the car's registration number", 'name': 'license_plate'}, {'frequency': 'f', 'synset': 'life_buoy.n.01', 'synonyms': ['life_buoy', 'lifesaver', 'life_belt', 'life_ring'], 'id': 643, 'def': 'a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)', 'name': 'life_buoy'}, {'frequency': 'f', 'synset': 'life_jacket.n.01', 'synonyms': ['life_jacket', 'life_vest'], 'id': 644, 'def': 'life preserver consisting of a sleeveless jacket of buoyant or inflatable design', 'name': 'life_jacket'}, {'frequency': 'f', 'synset': 'light_bulb.n.01', 'synonyms': ['lightbulb'], 'id': 645, 'def': 'lightblub/source of light', 'name': 'lightbulb'}, {'frequency': 'r', 'synset': 'lightning_rod.n.02', 'synonyms': ['lightning_rod', 'lightning_conductor'], 'id': 646, 'def': 'a metallic conductor that is attached to a high point and leads to the ground', 'name': 'lightning_rod'}, {'frequency': 'f', 'synset': 'lime.n.06', 'synonyms': ['lime'], 'id': 647, 'def': 'the green acidic fruit of any of various lime trees', 'name': 'lime'}, {'frequency': 'r', 'synset': 'limousine.n.01', 'synonyms': ['limousine'], 'id': 648, 'def': 'long luxurious car; usually driven by a chauffeur', 'name': 'limousine'}, {'frequency': 'c', 'synset': 'lion.n.01', 'synonyms': ['lion'], 'id': 649, 'def': 'large gregarious predatory cat of Africa and India', 'name': 'lion'}, {'frequency': 'c', 'synset': 'lip_balm.n.01', 'synonyms': ['lip_balm'], 'id': 650, 'def': 'a balm applied to the lips', 'name': 'lip_balm'}, {'frequency': 'r', 'synset': 'liquor.n.01', 'synonyms': ['liquor', 'spirits', 'hard_liquor', 'liqueur', 'cordial'], 'id': 651, 'def': 'liquor or beer', 'name': 'liquor'}, {'frequency': 'c', 'synset': 'lizard.n.01', 'synonyms': ['lizard'], 'id': 652, 'def': 'a reptile with usually two pairs of legs and a tapering tail', 'name': 'lizard'}, {'frequency': 'f', 'synset': 'log.n.01', 'synonyms': ['log'], 'id': 653, 'def': 'a segment of the trunk of a tree when stripped of branches', 'name': 'log'}, {'frequency': 'c', 'synset': 'lollipop.n.02', 'synonyms': ['lollipop'], 'id': 654, 'def': 'hard candy on a stick', 'name': 'lollipop'}, {'frequency': 'f', 'synset': 'loudspeaker.n.01', 'synonyms': ['speaker_(stero_equipment)'], 'id': 655, 'def': 'electronic device that produces sound often as part of a stereo system', 'name': 'speaker_(stero_equipment)'}, {'frequency': 'c', 'synset': 'love_seat.n.01', 'synonyms': ['loveseat'], 'id': 656, 'def': 'small sofa that seats two people', 'name': 'loveseat'}, {'frequency': 'r', 'synset': 'machine_gun.n.01', 'synonyms': ['machine_gun'], 'id': 657, 'def': 'a rapidly firing automatic gun', 'name': 'machine_gun'}, {'frequency': 'f', 'synset': 'magazine.n.02', 'synonyms': ['magazine'], 'id': 658, 'def': 'a paperback periodic publication', 'name': 'magazine'}, {'frequency': 'f', 'synset': 'magnet.n.01', 'synonyms': ['magnet'], 'id': 659, 'def': 'a device that attracts iron and produces a magnetic field', 'name': 'magnet'}, {'frequency': 'c', 'synset': 'mail_slot.n.01', 'synonyms': ['mail_slot'], 'id': 660, 'def': 'a slot (usually in a door) through which mail can be delivered', 'name': 'mail_slot'}, {'frequency': 'f', 'synset': 'mailbox.n.01', 'synonyms': ['mailbox_(at_home)', 'letter_box_(at_home)'], 'id': 661, 'def': 'a private box for delivery of mail', 'name': 'mailbox_(at_home)'}, {'frequency': 'r', 'synset': 'mallard.n.01', 'synonyms': ['mallard'], 'id': 662, 'def': 'wild dabbling duck from which domestic ducks are descended', 'name': 'mallard'}, {'frequency': 'r', 'synset': 'mallet.n.01', 'synonyms': ['mallet'], 'id': 663, 'def': 'a sports implement with a long handle and a hammer-like head used to hit a ball', 'name': 'mallet'}, {'frequency': 'r', 'synset': 'mammoth.n.01', 'synonyms': ['mammoth'], 'id': 664, 'def': 'any of numerous extinct elephants widely distributed in the Pleistocene', 'name': 'mammoth'}, {'frequency': 'r', 'synset': 'manatee.n.01', 'synonyms': ['manatee'], 'id': 665, 'def': 'sirenian mammal of tropical coastal waters of America', 'name': 'manatee'}, {'frequency': 'c', 'synset': 'mandarin.n.05', 'synonyms': ['mandarin_orange'], 'id': 666, 'def': 'a somewhat flat reddish-orange loose skinned citrus of China', 'name': 'mandarin_orange'}, {'frequency': 'c', 'synset': 'manger.n.01', 'synonyms': ['manger', 'trough'], 'id': 667, 'def': 'a container (usually in a barn or stable) from which cattle or horses feed', 'name': 'manger'}, {'frequency': 'f', 'synset': 'manhole.n.01', 'synonyms': ['manhole'], 'id': 668, 'def': 'a hole (usually with a flush cover) through which a person can gain access to an underground structure', 'name': 'manhole'}, {'frequency': 'f', 'synset': 'map.n.01', 'synonyms': ['map'], 'id': 669, 'def': "a diagrammatic representation of the earth's surface (or part of it)", 'name': 'map'}, {'frequency': 'f', 'synset': 'marker.n.03', 'synonyms': ['marker'], 'id': 670, 'def': 'a writing implement for making a mark', 'name': 'marker'}, {'frequency': 'r', 'synset': 'martini.n.01', 'synonyms': ['martini'], 'id': 671, 'def': 'a cocktail made of gin (or vodka) with dry vermouth', 'name': 'martini'}, {'frequency': 'r', 'synset': 'mascot.n.01', 'synonyms': ['mascot'], 'id': 672, 'def': 'a person or animal that is adopted by a team or other group as a symbolic figure', 'name': 'mascot'}, {'frequency': 'c', 'synset': 'mashed_potato.n.01', 'synonyms': ['mashed_potato'], 'id': 673, 'def': 'potato that has been peeled and boiled and then mashed', 'name': 'mashed_potato'}, {'frequency': 'r', 'synset': 'masher.n.02', 'synonyms': ['masher'], 'id': 674, 'def': 'a kitchen utensil used for mashing (e.g. potatoes)', 'name': 'masher'}, {'frequency': 'f', 'synset': 'mask.n.04', 'synonyms': ['mask', 'facemask'], 'id': 675, 'def': 'a protective covering worn over the face', 'name': 'mask'}, {'frequency': 'f', 'synset': 'mast.n.01', 'synonyms': ['mast'], 'id': 676, 'def': 'a vertical spar for supporting sails', 'name': 'mast'}, {'frequency': 'c', 'synset': 'mat.n.03', 'synonyms': ['mat_(gym_equipment)', 'gym_mat'], 'id': 677, 'def': 'sports equipment consisting of a piece of thick padding on the floor for gymnastics', 'name': 'mat_(gym_equipment)'}, {'frequency': 'r', 'synset': 'matchbox.n.01', 'synonyms': ['matchbox'], 'id': 678, 'def': 'a box for holding matches', 'name': 'matchbox'}, {'frequency': 'f', 'synset': 'mattress.n.01', 'synonyms': ['mattress'], 'id': 679, 'def': 'a thick pad filled with resilient material used as a bed or part of a bed', 'name': 'mattress'}, {'frequency': 'c', 'synset': 'measuring_cup.n.01', 'synonyms': ['measuring_cup'], 'id': 680, 'def': 'graduated cup used to measure liquid or granular ingredients', 'name': 'measuring_cup'}, {'frequency': 'c', 'synset': 'measuring_stick.n.01', 'synonyms': ['measuring_stick', 'ruler_(measuring_stick)', 'measuring_rod'], 'id': 681, 'def': 'measuring instrument having a sequence of marks at regular intervals', 'name': 'measuring_stick'}, {'frequency': 'c', 'synset': 'meatball.n.01', 'synonyms': ['meatball'], 'id': 682, 'def': 'ground meat formed into a ball and fried or simmered in broth', 'name': 'meatball'}, {'frequency': 'c', 'synset': 'medicine.n.02', 'synonyms': ['medicine'], 'id': 683, 'def': 'something that treats or prevents or alleviates the symptoms of disease', 'name': 'medicine'}, {'frequency': 'c', 'synset': 'melon.n.01', 'synonyms': ['melon'], 'id': 684, 'def': 'fruit of the gourd family having a hard rind and sweet juicy flesh', 'name': 'melon'}, {'frequency': 'f', 'synset': 'microphone.n.01', 'synonyms': ['microphone'], 'id': 685, 'def': 'device for converting sound waves into electrical energy', 'name': 'microphone'}, {'frequency': 'r', 'synset': 'microscope.n.01', 'synonyms': ['microscope'], 'id': 686, 'def': 'magnifier of the image of small objects', 'name': 'microscope'}, {'frequency': 'f', 'synset': 'microwave.n.02', 'synonyms': ['microwave_oven'], 'id': 687, 'def': 'kitchen appliance that cooks food by passing an electromagnetic wave through it', 'name': 'microwave_oven'}, {'frequency': 'r', 'synset': 'milestone.n.01', 'synonyms': ['milestone', 'milepost'], 'id': 688, 'def': 'stone post at side of a road to show distances', 'name': 'milestone'}, {'frequency': 'f', 'synset': 'milk.n.01', 'synonyms': ['milk'], 'id': 689, 'def': 'a white nutritious liquid secreted by mammals and used as food by human beings', 'name': 'milk'}, {'frequency': 'r', 'synset': 'milk_can.n.01', 'synonyms': ['milk_can'], 'id': 690, 'def': 'can for transporting milk', 'name': 'milk_can'}, {'frequency': 'r', 'synset': 'milkshake.n.01', 'synonyms': ['milkshake'], 'id': 691, 'def': 'frothy drink of milk and flavoring and sometimes fruit or ice cream', 'name': 'milkshake'}, {'frequency': 'f', 'synset': 'minivan.n.01', 'synonyms': ['minivan'], 'id': 692, 'def': 'a small box-shaped passenger van', 'name': 'minivan'}, {'frequency': 'r', 'synset': 'mint.n.05', 'synonyms': ['mint_candy'], 'id': 693, 'def': 'a candy that is flavored with a mint oil', 'name': 'mint_candy'}, {'frequency': 'f', 'synset': 'mirror.n.01', 'synonyms': ['mirror'], 'id': 694, 'def': 'polished surface that forms images by reflecting light', 'name': 'mirror'}, {'frequency': 'c', 'synset': 'mitten.n.01', 'synonyms': ['mitten'], 'id': 695, 'def': 'glove that encases the thumb separately and the other four fingers together', 'name': 'mitten'}, {'frequency': 'c', 'synset': 'mixer.n.04', 'synonyms': ['mixer_(kitchen_tool)', 'stand_mixer'], 'id': 696, 'def': 'a kitchen utensil that is used for mixing foods', 'name': 'mixer_(kitchen_tool)'}, {'frequency': 'c', 'synset': 'money.n.03', 'synonyms': ['money'], 'id': 697, 'def': 'the official currency issued by a government or national bank', 'name': 'money'}, {'frequency': 'f', 'synset': 'monitor.n.04', 'synonyms': ['monitor_(computer_equipment) computer_monitor'], 'id': 698, 'def': 'a computer monitor', 'name': 'monitor_(computer_equipment) computer_monitor'}, {'frequency': 'c', 'synset': 'monkey.n.01', 'synonyms': ['monkey'], 'id': 699, 'def': 'any of various long-tailed primates', 'name': 'monkey'}, {'frequency': 'f', 'synset': 'motor.n.01', 'synonyms': ['motor'], 'id': 700, 'def': 'machine that converts other forms of energy into mechanical energy and so imparts motion', 'name': 'motor'}, {'frequency': 'f', 'synset': 'motor_scooter.n.01', 'synonyms': ['motor_scooter', 'scooter'], 'id': 701, 'def': 'a wheeled vehicle with small wheels and a low-powered engine', 'name': 'motor_scooter'}, {'frequency': 'r', 'synset': 'motor_vehicle.n.01', 'synonyms': ['motor_vehicle', 'automotive_vehicle'], 'id': 702, 'def': 'a self-propelled wheeled vehicle that does not run on rails', 'name': 'motor_vehicle'}, {'frequency': 'f', 'synset': 'motorcycle.n.01', 'synonyms': ['motorcycle'], 'id': 703, 'def': 'a motor vehicle with two wheels and a strong frame', 'name': 'motorcycle'}, {'frequency': 'f', 'synset': 'mound.n.01', 'synonyms': ['mound_(baseball)', "pitcher's_mound"], 'id': 704, 'def': '(baseball) the slight elevation on which the pitcher stands', 'name': 'mound_(baseball)'}, {'frequency': 'f', 'synset': 'mouse.n.04', 'synonyms': ['mouse_(computer_equipment)', 'computer_mouse'], 'id': 705, 'def': 'a computer input device that controls an on-screen pointer (does not include trackpads / touchpads)', 'name': 'mouse_(computer_equipment)'}, {'frequency': 'f', 'synset': 'mousepad.n.01', 'synonyms': ['mousepad'], 'id': 706, 'def': 'a small portable pad that provides an operating surface for a computer mouse', 'name': 'mousepad'}, {'frequency': 'c', 'synset': 'muffin.n.01', 'synonyms': ['muffin'], 'id': 707, 'def': 'a sweet quick bread baked in a cup-shaped pan', 'name': 'muffin'}, {'frequency': 'f', 'synset': 'mug.n.04', 'synonyms': ['mug'], 'id': 708, 'def': 'with handle and usually cylindrical', 'name': 'mug'}, {'frequency': 'f', 'synset': 'mushroom.n.02', 'synonyms': ['mushroom'], 'id': 709, 'def': 'a common mushroom', 'name': 'mushroom'}, {'frequency': 'r', 'synset': 'music_stool.n.01', 'synonyms': ['music_stool', 'piano_stool'], 'id': 710, 'def': 'a stool for piano players; usually adjustable in height', 'name': 'music_stool'}, {'frequency': 'c', 'synset': 'musical_instrument.n.01', 'synonyms': ['musical_instrument', 'instrument_(musical)'], 'id': 711, 'def': 'any of various devices or contrivances that can be used to produce musical tones or sounds', 'name': 'musical_instrument'}, {'frequency': 'r', 'synset': 'nailfile.n.01', 'synonyms': ['nailfile'], 'id': 712, 'def': 'a small flat file for shaping the nails', 'name': 'nailfile'}, {'frequency': 'f', 'synset': 'napkin.n.01', 'synonyms': ['napkin', 'table_napkin', 'serviette'], 'id': 713, 'def': 'a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing', 'name': 'napkin'}, {'frequency': 'r', 'synset': 'neckerchief.n.01', 'synonyms': ['neckerchief'], 'id': 714, 'def': 'a kerchief worn around the neck', 'name': 'neckerchief'}, {'frequency': 'f', 'synset': 'necklace.n.01', 'synonyms': ['necklace'], 'id': 715, 'def': 'jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament', 'name': 'necklace'}, {'frequency': 'f', 'synset': 'necktie.n.01', 'synonyms': ['necktie', 'tie_(necktie)'], 'id': 716, 'def': 'neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front', 'name': 'necktie'}, {'frequency': 'c', 'synset': 'needle.n.03', 'synonyms': ['needle'], 'id': 717, 'def': 'a sharp pointed implement (usually metal)', 'name': 'needle'}, {'frequency': 'c', 'synset': 'nest.n.01', 'synonyms': ['nest'], 'id': 718, 'def': 'a structure in which animals lay eggs or give birth to their young', 'name': 'nest'}, {'frequency': 'f', 'synset': 'newspaper.n.01', 'synonyms': ['newspaper', 'paper_(newspaper)'], 'id': 719, 'def': 'a daily or weekly publication on folded sheets containing news, articles, and advertisements', 'name': 'newspaper'}, {'frequency': 'c', 'synset': 'newsstand.n.01', 'synonyms': ['newsstand'], 'id': 720, 'def': 'a stall where newspapers and other periodicals are sold', 'name': 'newsstand'}, {'frequency': 'c', 'synset': 'nightwear.n.01', 'synonyms': ['nightshirt', 'nightwear', 'sleepwear', 'nightclothes'], 'id': 721, 'def': 'garments designed to be worn in bed', 'name': 'nightshirt'}, {'frequency': 'r', 'synset': 'nosebag.n.01', 'synonyms': ['nosebag_(for_animals)', 'feedbag'], 'id': 722, 'def': 'a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head', 'name': 'nosebag_(for_animals)'}, {'frequency': 'c', 'synset': 'noseband.n.01', 'synonyms': ['noseband_(for_animals)', 'nosepiece_(for_animals)'], 'id': 723, 'def': "a strap that is the part of a bridle that goes over the animal's nose", 'name': 'noseband_(for_animals)'}, {'frequency': 'f', 'synset': 'notebook.n.01', 'synonyms': ['notebook'], 'id': 724, 'def': 'a book with blank pages for recording notes or memoranda', 'name': 'notebook'}, {'frequency': 'c', 'synset': 'notepad.n.01', 'synonyms': ['notepad'], 'id': 725, 'def': 'a pad of paper for keeping notes', 'name': 'notepad'}, {'frequency': 'f', 'synset': 'nut.n.03', 'synonyms': ['nut'], 'id': 726, 'def': 'a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt', 'name': 'nut'}, {'frequency': 'r', 'synset': 'nutcracker.n.01', 'synonyms': ['nutcracker'], 'id': 727, 'def': 'a hand tool used to crack nuts open', 'name': 'nutcracker'}, {'frequency': 'f', 'synset': 'oar.n.01', 'synonyms': ['oar'], 'id': 728, 'def': 'an implement used to propel or steer a boat', 'name': 'oar'}, {'frequency': 'r', 'synset': 'octopus.n.01', 'synonyms': ['octopus_(food)'], 'id': 729, 'def': 'tentacles of octopus prepared as food', 'name': 'octopus_(food)'}, {'frequency': 'r', 'synset': 'octopus.n.02', 'synonyms': ['octopus_(animal)'], 'id': 730, 'def': 'bottom-living cephalopod having a soft oval body with eight long tentacles', 'name': 'octopus_(animal)'}, {'frequency': 'c', 'synset': 'oil_lamp.n.01', 'synonyms': ['oil_lamp', 'kerosene_lamp', 'kerosine_lamp'], 'id': 731, 'def': 'a lamp that burns oil (as kerosine) for light', 'name': 'oil_lamp'}, {'frequency': 'c', 'synset': 'olive_oil.n.01', 'synonyms': ['olive_oil'], 'id': 732, 'def': 'oil from olives', 'name': 'olive_oil'}, {'frequency': 'r', 'synset': 'omelet.n.01', 'synonyms': ['omelet', 'omelette'], 'id': 733, 'def': 'beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly', 'name': 'omelet'}, {'frequency': 'f', 'synset': 'onion.n.01', 'synonyms': ['onion'], 'id': 734, 'def': 'the bulb of an onion plant', 'name': 'onion'}, {'frequency': 'f', 'synset': 'orange.n.01', 'synonyms': ['orange_(fruit)'], 'id': 735, 'def': 'orange (FRUIT of an orange tree)', 'name': 'orange_(fruit)'}, {'frequency': 'c', 'synset': 'orange_juice.n.01', 'synonyms': ['orange_juice'], 'id': 736, 'def': 'bottled or freshly squeezed juice of oranges', 'name': 'orange_juice'}, {'frequency': 'c', 'synset': 'ostrich.n.02', 'synonyms': ['ostrich'], 'id': 737, 'def': 'fast-running African flightless bird with two-toed feet; largest living bird', 'name': 'ostrich'}, {'frequency': 'f', 'synset': 'ottoman.n.03', 'synonyms': ['ottoman', 'pouf', 'pouffe', 'hassock'], 'id': 738, 'def': 'a thick standalone cushion used as a seat or footrest, often next to a chair', 'name': 'ottoman'}, {'frequency': 'f', 'synset': 'oven.n.01', 'synonyms': ['oven'], 'id': 739, 'def': 'kitchen appliance used for baking or roasting', 'name': 'oven'}, {'frequency': 'c', 'synset': 'overall.n.01', 'synonyms': ['overalls_(clothing)'], 'id': 740, 'def': 'work clothing consisting of denim trousers usually with a bib and shoulder straps', 'name': 'overalls_(clothing)'}, {'frequency': 'c', 'synset': 'owl.n.01', 'synonyms': ['owl'], 'id': 741, 'def': 'nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes', 'name': 'owl'}, {'frequency': 'c', 'synset': 'packet.n.03', 'synonyms': ['packet'], 'id': 742, 'def': 'a small package or bundle', 'name': 'packet'}, {'frequency': 'r', 'synset': 'pad.n.03', 'synonyms': ['inkpad', 'inking_pad', 'stamp_pad'], 'id': 743, 'def': 'absorbent material saturated with ink used to transfer ink evenly to a rubber stamp', 'name': 'inkpad'}, {'frequency': 'c', 'synset': 'pad.n.04', 'synonyms': ['pad'], 'id': 744, 'def': 'mostly arm/knee pads labeled', 'name': 'pad'}, {'frequency': 'f', 'synset': 'paddle.n.04', 'synonyms': ['paddle', 'boat_paddle'], 'id': 745, 'def': 'a short light oar used without an oarlock to propel a canoe or small boat', 'name': 'paddle'}, {'frequency': 'c', 'synset': 'padlock.n.01', 'synonyms': ['padlock'], 'id': 746, 'def': 'a detachable, portable lock', 'name': 'padlock'}, {'frequency': 'c', 'synset': 'paintbrush.n.01', 'synonyms': ['paintbrush'], 'id': 747, 'def': 'a brush used as an applicator to apply paint', 'name': 'paintbrush'}, {'frequency': 'f', 'synset': 'painting.n.01', 'synonyms': ['painting'], 'id': 748, 'def': 'graphic art consisting of an artistic composition made by applying paints to a surface', 'name': 'painting'}, {'frequency': 'f', 'synset': 'pajama.n.02', 'synonyms': ['pajamas', 'pyjamas'], 'id': 749, 'def': 'loose-fitting nightclothes worn for sleeping or lounging', 'name': 'pajamas'}, {'frequency': 'c', 'synset': 'palette.n.02', 'synonyms': ['palette', 'pallet'], 'id': 750, 'def': 'board that provides a flat surface on which artists mix paints and the range of colors used', 'name': 'palette'}, {'frequency': 'f', 'synset': 'pan.n.01', 'synonyms': ['pan_(for_cooking)', 'cooking_pan'], 'id': 751, 'def': 'cooking utensil consisting of a wide metal vessel', 'name': 'pan_(for_cooking)'}, {'frequency': 'r', 'synset': 'pan.n.03', 'synonyms': ['pan_(metal_container)'], 'id': 752, 'def': 'shallow container made of metal', 'name': 'pan_(metal_container)'}, {'frequency': 'c', 'synset': 'pancake.n.01', 'synonyms': ['pancake'], 'id': 753, 'def': 'a flat cake of thin batter fried on both sides on a griddle', 'name': 'pancake'}, {'frequency': 'r', 'synset': 'pantyhose.n.01', 'synonyms': ['pantyhose'], 'id': 754, 'def': "a woman's tights consisting of underpants and stockings", 'name': 'pantyhose'}, {'frequency': 'r', 'synset': 'papaya.n.02', 'synonyms': ['papaya'], 'id': 755, 'def': 'large oval melon-like tropical fruit with yellowish flesh', 'name': 'papaya'}, {'frequency': 'f', 'synset': 'paper_plate.n.01', 'synonyms': ['paper_plate'], 'id': 756, 'def': 'a disposable plate made of cardboard', 'name': 'paper_plate'}, {'frequency': 'f', 'synset': 'paper_towel.n.01', 'synonyms': ['paper_towel'], 'id': 757, 'def': 'a disposable towel made of absorbent paper', 'name': 'paper_towel'}, {'frequency': 'r', 'synset': 'paperback_book.n.01', 'synonyms': ['paperback_book', 'paper-back_book', 'softback_book', 'soft-cover_book'], 'id': 758, 'def': 'a book with paper covers', 'name': 'paperback_book'}, {'frequency': 'r', 'synset': 'paperweight.n.01', 'synonyms': ['paperweight'], 'id': 759, 'def': 'a weight used to hold down a stack of papers', 'name': 'paperweight'}, {'frequency': 'c', 'synset': 'parachute.n.01', 'synonyms': ['parachute'], 'id': 760, 'def': 'rescue equipment consisting of a device that fills with air and retards your fall', 'name': 'parachute'}, {'frequency': 'c', 'synset': 'parakeet.n.01', 'synonyms': ['parakeet', 'parrakeet', 'parroket', 'paraquet', 'paroquet', 'parroquet'], 'id': 761, 'def': 'any of numerous small slender long-tailed parrots', 'name': 'parakeet'}, {'frequency': 'c', 'synset': 'parasail.n.01', 'synonyms': ['parasail_(sports)'], 'id': 762, 'def': 'parachute that will lift a person up into the air when it is towed by a motorboat or a car', 'name': 'parasail_(sports)'}, {'frequency': 'c', 'synset': 'parasol.n.01', 'synonyms': ['parasol', 'sunshade'], 'id': 763, 'def': 'a handheld collapsible source of shade', 'name': 'parasol'}, {'frequency': 'r', 'synset': 'parchment.n.01', 'synonyms': ['parchment'], 'id': 764, 'def': 'a superior paper resembling sheepskin', 'name': 'parchment'}, {'frequency': 'c', 'synset': 'parka.n.01', 'synonyms': ['parka', 'anorak'], 'id': 765, 'def': "a kind of heavy jacket (`windcheater' is a British term)", 'name': 'parka'}, {'frequency': 'f', 'synset': 'parking_meter.n.01', 'synonyms': ['parking_meter'], 'id': 766, 'def': 'a coin-operated timer located next to a parking space', 'name': 'parking_meter'}, {'frequency': 'c', 'synset': 'parrot.n.01', 'synonyms': ['parrot'], 'id': 767, 'def': 'usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds', 'name': 'parrot'}, {'frequency': 'c', 'synset': 'passenger_car.n.01', 'synonyms': ['passenger_car_(part_of_a_train)', 'coach_(part_of_a_train)'], 'id': 768, 'def': 'a railcar where passengers ride', 'name': 'passenger_car_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'passenger_ship.n.01', 'synonyms': ['passenger_ship'], 'id': 769, 'def': 'a ship built to carry passengers', 'name': 'passenger_ship'}, {'frequency': 'c', 'synset': 'passport.n.02', 'synonyms': ['passport'], 'id': 770, 'def': 'a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country', 'name': 'passport'}, {'frequency': 'f', 'synset': 'pastry.n.02', 'synonyms': ['pastry'], 'id': 771, 'def': 'any of various baked foods made of dough or batter', 'name': 'pastry'}, {'frequency': 'r', 'synset': 'patty.n.01', 'synonyms': ['patty_(food)'], 'id': 772, 'def': 'small flat mass of chopped food', 'name': 'patty_(food)'}, {'frequency': 'c', 'synset': 'pea.n.01', 'synonyms': ['pea_(food)'], 'id': 773, 'def': 'seed of a pea plant used for food', 'name': 'pea_(food)'}, {'frequency': 'c', 'synset': 'peach.n.03', 'synonyms': ['peach'], 'id': 774, 'def': 'downy juicy fruit with sweet yellowish or whitish flesh', 'name': 'peach'}, {'frequency': 'c', 'synset': 'peanut_butter.n.01', 'synonyms': ['peanut_butter'], 'id': 775, 'def': 'a spread made from ground peanuts', 'name': 'peanut_butter'}, {'frequency': 'f', 'synset': 'pear.n.01', 'synonyms': ['pear'], 'id': 776, 'def': 'sweet juicy gritty-textured fruit available in many varieties', 'name': 'pear'}, {'frequency': 'c', 'synset': 'peeler.n.03', 'synonyms': ['peeler_(tool_for_fruit_and_vegetables)'], 'id': 777, 'def': 'a device for peeling vegetables or fruits', 'name': 'peeler_(tool_for_fruit_and_vegetables)'}, {'frequency': 'r', 'synset': 'peg.n.04', 'synonyms': ['wooden_leg', 'pegleg'], 'id': 778, 'def': 'a prosthesis that replaces a missing leg', 'name': 'wooden_leg'}, {'frequency': 'r', 'synset': 'pegboard.n.01', 'synonyms': ['pegboard'], 'id': 779, 'def': 'a board perforated with regularly spaced holes into which pegs can be fitted', 'name': 'pegboard'}, {'frequency': 'c', 'synset': 'pelican.n.01', 'synonyms': ['pelican'], 'id': 780, 'def': 'large long-winged warm-water seabird having a large bill with a distensible pouch for fish', 'name': 'pelican'}, {'frequency': 'f', 'synset': 'pen.n.01', 'synonyms': ['pen'], 'id': 781, 'def': 'a writing implement with a point from which ink flows', 'name': 'pen'}, {'frequency': 'f', 'synset': 'pencil.n.01', 'synonyms': ['pencil'], 'id': 782, 'def': 'a thin cylindrical pointed writing implement made of wood and graphite', 'name': 'pencil'}, {'frequency': 'r', 'synset': 'pencil_box.n.01', 'synonyms': ['pencil_box', 'pencil_case'], 'id': 783, 'def': 'a box for holding pencils', 'name': 'pencil_box'}, {'frequency': 'r', 'synset': 'pencil_sharpener.n.01', 'synonyms': ['pencil_sharpener'], 'id': 784, 'def': 'a rotary implement for sharpening the point on pencils', 'name': 'pencil_sharpener'}, {'frequency': 'r', 'synset': 'pendulum.n.01', 'synonyms': ['pendulum'], 'id': 785, 'def': 'an apparatus consisting of an object mounted so that it swings freely under the influence of gravity', 'name': 'pendulum'}, {'frequency': 'c', 'synset': 'penguin.n.01', 'synonyms': ['penguin'], 'id': 786, 'def': 'short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers', 'name': 'penguin'}, {'frequency': 'r', 'synset': 'pennant.n.02', 'synonyms': ['pennant'], 'id': 787, 'def': 'a flag longer than it is wide (and often tapering)', 'name': 'pennant'}, {'frequency': 'r', 'synset': 'penny.n.02', 'synonyms': ['penny_(coin)'], 'id': 788, 'def': 'a coin worth one-hundredth of the value of the basic unit', 'name': 'penny_(coin)'}, {'frequency': 'f', 'synset': 'pepper.n.03', 'synonyms': ['pepper', 'peppercorn'], 'id': 789, 'def': 'pungent seasoning from the berry of the common pepper plant; whole or ground', 'name': 'pepper'}, {'frequency': 'c', 'synset': 'pepper_mill.n.01', 'synonyms': ['pepper_mill', 'pepper_grinder'], 'id': 790, 'def': 'a mill for grinding pepper', 'name': 'pepper_mill'}, {'frequency': 'c', 'synset': 'perfume.n.02', 'synonyms': ['perfume'], 'id': 791, 'def': 'a toiletry that emits and diffuses a fragrant odor', 'name': 'perfume'}, {'frequency': 'r', 'synset': 'persimmon.n.02', 'synonyms': ['persimmon'], 'id': 792, 'def': 'orange fruit resembling a plum; edible when fully ripe', 'name': 'persimmon'}, {'frequency': 'f', 'synset': 'person.n.01', 'synonyms': ['person', 'baby', 'child', 'boy', 'girl', 'man', 'woman', 'human'], 'id': 793, 'def': 'a human being', 'name': 'person'}, {'frequency': 'c', 'synset': 'pet.n.01', 'synonyms': ['pet'], 'id': 794, 'def': 'a domesticated animal kept for companionship or amusement', 'name': 'pet'}, {'frequency': 'c', 'synset': 'pew.n.01', 'synonyms': ['pew_(church_bench)', 'church_bench'], 'id': 795, 'def': 'long bench with backs; used in church by the congregation', 'name': 'pew_(church_bench)'}, {'frequency': 'r', 'synset': 'phonebook.n.01', 'synonyms': ['phonebook', 'telephone_book', 'telephone_directory'], 'id': 796, 'def': 'a directory containing an alphabetical list of telephone subscribers and their telephone numbers', 'name': 'phonebook'}, {'frequency': 'c', 'synset': 'phonograph_record.n.01', 'synonyms': ['phonograph_record', 'phonograph_recording', 'record_(phonograph_recording)'], 'id': 797, 'def': 'sound recording consisting of a typically black disk with a continuous groove', 'name': 'phonograph_record'}, {'frequency': 'f', 'synset': 'piano.n.01', 'synonyms': ['piano'], 'id': 798, 'def': 'a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds', 'name': 'piano'}, {'frequency': 'f', 'synset': 'pickle.n.01', 'synonyms': ['pickle'], 'id': 799, 'def': 'vegetables (especially cucumbers) preserved in brine or vinegar', 'name': 'pickle'}, {'frequency': 'f', 'synset': 'pickup.n.01', 'synonyms': ['pickup_truck'], 'id': 800, 'def': 'a light truck with an open body and low sides and a tailboard', 'name': 'pickup_truck'}, {'frequency': 'c', 'synset': 'pie.n.01', 'synonyms': ['pie'], 'id': 801, 'def': 'dish baked in pastry-lined pan often with a pastry top', 'name': 'pie'}, {'frequency': 'c', 'synset': 'pigeon.n.01', 'synonyms': ['pigeon'], 'id': 802, 'def': 'wild and domesticated birds having a heavy body and short legs', 'name': 'pigeon'}, {'frequency': 'r', 'synset': 'piggy_bank.n.01', 'synonyms': ['piggy_bank', 'penny_bank'], 'id': 803, 'def': "a child's coin bank (often shaped like a pig)", 'name': 'piggy_bank'}, {'frequency': 'f', 'synset': 'pillow.n.01', 'synonyms': ['pillow'], 'id': 804, 'def': 'a cushion to support the head of a sleeping person', 'name': 'pillow'}, {'frequency': 'r', 'synset': 'pin.n.09', 'synonyms': ['pin_(non_jewelry)'], 'id': 805, 'def': 'a small slender (often pointed) piece of wood or metal used to support or fasten or attach things', 'name': 'pin_(non_jewelry)'}, {'frequency': 'f', 'synset': 'pineapple.n.02', 'synonyms': ['pineapple'], 'id': 806, 'def': 'large sweet fleshy tropical fruit with a tuft of stiff leaves', 'name': 'pineapple'}, {'frequency': 'c', 'synset': 'pinecone.n.01', 'synonyms': ['pinecone'], 'id': 807, 'def': 'the seed-producing cone of a pine tree', 'name': 'pinecone'}, {'frequency': 'r', 'synset': 'ping-pong_ball.n.01', 'synonyms': ['ping-pong_ball'], 'id': 808, 'def': 'light hollow ball used in playing table tennis', 'name': 'ping-pong_ball'}, {'frequency': 'r', 'synset': 'pinwheel.n.03', 'synonyms': ['pinwheel'], 'id': 809, 'def': 'a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind', 'name': 'pinwheel'}, {'frequency': 'r', 'synset': 'pipe.n.01', 'synonyms': ['tobacco_pipe'], 'id': 810, 'def': 'a tube with a small bowl at one end; used for smoking tobacco', 'name': 'tobacco_pipe'}, {'frequency': 'f', 'synset': 'pipe.n.02', 'synonyms': ['pipe', 'piping'], 'id': 811, 'def': 'a long tube made of metal or plastic that is used to carry water or oil or gas etc.', 'name': 'pipe'}, {'frequency': 'r', 'synset': 'pistol.n.01', 'synonyms': ['pistol', 'handgun'], 'id': 812, 'def': 'a firearm that is held and fired with one hand', 'name': 'pistol'}, {'frequency': 'c', 'synset': 'pita.n.01', 'synonyms': ['pita_(bread)', 'pocket_bread'], 'id': 813, 'def': 'usually small round bread that can open into a pocket for filling', 'name': 'pita_(bread)'}, {'frequency': 'f', 'synset': 'pitcher.n.02', 'synonyms': ['pitcher_(vessel_for_liquid)', 'ewer'], 'id': 814, 'def': 'an open vessel with a handle and a spout for pouring', 'name': 'pitcher_(vessel_for_liquid)'}, {'frequency': 'r', 'synset': 'pitchfork.n.01', 'synonyms': ['pitchfork'], 'id': 815, 'def': 'a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay', 'name': 'pitchfork'}, {'frequency': 'f', 'synset': 'pizza.n.01', 'synonyms': ['pizza'], 'id': 816, 'def': 'Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese', 'name': 'pizza'}, {'frequency': 'f', 'synset': 'place_mat.n.01', 'synonyms': ['place_mat'], 'id': 817, 'def': 'a mat placed on a table for an individual place setting', 'name': 'place_mat'}, {'frequency': 'f', 'synset': 'plate.n.04', 'synonyms': ['plate'], 'id': 818, 'def': 'dish on which food is served or from which food is eaten', 'name': 'plate'}, {'frequency': 'c', 'synset': 'platter.n.01', 'synonyms': ['platter'], 'id': 819, 'def': 'a large shallow dish used for serving food', 'name': 'platter'}, {'frequency': 'r', 'synset': 'playpen.n.01', 'synonyms': ['playpen'], 'id': 820, 'def': 'a portable enclosure in which babies may be left to play', 'name': 'playpen'}, {'frequency': 'c', 'synset': 'pliers.n.01', 'synonyms': ['pliers', 'plyers'], 'id': 821, 'def': 'a gripping hand tool with two hinged arms and (usually) serrated jaws', 'name': 'pliers'}, {'frequency': 'r', 'synset': 'plow.n.01', 'synonyms': ['plow_(farm_equipment)', 'plough_(farm_equipment)'], 'id': 822, 'def': 'a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing', 'name': 'plow_(farm_equipment)'}, {'frequency': 'r', 'synset': 'plume.n.02', 'synonyms': ['plume'], 'id': 823, 'def': 'a feather or cluster of feathers worn as an ornament', 'name': 'plume'}, {'frequency': 'r', 'synset': 'pocket_watch.n.01', 'synonyms': ['pocket_watch'], 'id': 824, 'def': 'a watch that is carried in a small watch pocket', 'name': 'pocket_watch'}, {'frequency': 'c', 'synset': 'pocketknife.n.01', 'synonyms': ['pocketknife'], 'id': 825, 'def': 'a knife with a blade that folds into the handle; suitable for carrying in the pocket', 'name': 'pocketknife'}, {'frequency': 'c', 'synset': 'poker.n.01', 'synonyms': ['poker_(fire_stirring_tool)', 'stove_poker', 'fire_hook'], 'id': 826, 'def': 'fire iron consisting of a metal rod with a handle; used to stir a fire', 'name': 'poker_(fire_stirring_tool)'}, {'frequency': 'f', 'synset': 'pole.n.01', 'synonyms': ['pole', 'post'], 'id': 827, 'def': 'a long (usually round) rod of wood or metal or plastic', 'name': 'pole'}, {'frequency': 'f', 'synset': 'polo_shirt.n.01', 'synonyms': ['polo_shirt', 'sport_shirt'], 'id': 828, 'def': 'a shirt with short sleeves designed for comfort and casual wear', 'name': 'polo_shirt'}, {'frequency': 'r', 'synset': 'poncho.n.01', 'synonyms': ['poncho'], 'id': 829, 'def': 'a blanket-like cloak with a hole in the center for the head', 'name': 'poncho'}, {'frequency': 'c', 'synset': 'pony.n.05', 'synonyms': ['pony'], 'id': 830, 'def': 'any of various breeds of small gentle horses usually less than five feet high at the shoulder', 'name': 'pony'}, {'frequency': 'r', 'synset': 'pool_table.n.01', 'synonyms': ['pool_table', 'billiard_table', 'snooker_table'], 'id': 831, 'def': 'game equipment consisting of a heavy table on which pool is played', 'name': 'pool_table'}, {'frequency': 'f', 'synset': 'pop.n.02', 'synonyms': ['pop_(soda)', 'soda_(pop)', 'tonic', 'soft_drink'], 'id': 832, 'def': 'a sweet drink containing carbonated water and flavoring', 'name': 'pop_(soda)'}, {'frequency': 'c', 'synset': 'postbox.n.01', 'synonyms': ['postbox_(public)', 'mailbox_(public)'], 'id': 833, 'def': 'public box for deposit of mail', 'name': 'postbox_(public)'}, {'frequency': 'c', 'synset': 'postcard.n.01', 'synonyms': ['postcard', 'postal_card', 'mailing-card'], 'id': 834, 'def': 'a card for sending messages by post without an envelope', 'name': 'postcard'}, {'frequency': 'f', 'synset': 'poster.n.01', 'synonyms': ['poster', 'placard'], 'id': 835, 'def': 'a sign posted in a public place as an advertisement', 'name': 'poster'}, {'frequency': 'f', 'synset': 'pot.n.01', 'synonyms': ['pot'], 'id': 836, 'def': 'metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid', 'name': 'pot'}, {'frequency': 'f', 'synset': 'pot.n.04', 'synonyms': ['flowerpot'], 'id': 837, 'def': 'a container in which plants are cultivated', 'name': 'flowerpot'}, {'frequency': 'f', 'synset': 'potato.n.01', 'synonyms': ['potato'], 'id': 838, 'def': 'an edible tuber native to South America', 'name': 'potato'}, {'frequency': 'c', 'synset': 'potholder.n.01', 'synonyms': ['potholder'], 'id': 839, 'def': 'an insulated pad for holding hot pots', 'name': 'potholder'}, {'frequency': 'c', 'synset': 'pottery.n.01', 'synonyms': ['pottery', 'clayware'], 'id': 840, 'def': 'ceramic ware made from clay and baked in a kiln', 'name': 'pottery'}, {'frequency': 'c', 'synset': 'pouch.n.01', 'synonyms': ['pouch'], 'id': 841, 'def': 'a small or medium size container for holding or carrying things', 'name': 'pouch'}, {'frequency': 'c', 'synset': 'power_shovel.n.01', 'synonyms': ['power_shovel', 'excavator', 'digger'], 'id': 842, 'def': 'a machine for excavating', 'name': 'power_shovel'}, {'frequency': 'c', 'synset': 'prawn.n.01', 'synonyms': ['prawn', 'shrimp'], 'id': 843, 'def': 'any of various edible decapod crustaceans', 'name': 'prawn'}, {'frequency': 'c', 'synset': 'pretzel.n.01', 'synonyms': ['pretzel'], 'id': 844, 'def': 'glazed and salted cracker typically in the shape of a loose knot', 'name': 'pretzel'}, {'frequency': 'f', 'synset': 'printer.n.03', 'synonyms': ['printer', 'printing_machine'], 'id': 845, 'def': 'a machine that prints', 'name': 'printer'}, {'frequency': 'c', 'synset': 'projectile.n.01', 'synonyms': ['projectile_(weapon)', 'missile'], 'id': 846, 'def': 'a weapon that is forcibly thrown or projected at a targets', 'name': 'projectile_(weapon)'}, {'frequency': 'c', 'synset': 'projector.n.02', 'synonyms': ['projector'], 'id': 847, 'def': 'an optical instrument that projects an enlarged image onto a screen', 'name': 'projector'}, {'frequency': 'f', 'synset': 'propeller.n.01', 'synonyms': ['propeller', 'propellor'], 'id': 848, 'def': 'a mechanical device that rotates to push against air or water', 'name': 'propeller'}, {'frequency': 'r', 'synset': 'prune.n.01', 'synonyms': ['prune'], 'id': 849, 'def': 'dried plum', 'name': 'prune'}, {'frequency': 'r', 'synset': 'pudding.n.01', 'synonyms': ['pudding'], 'id': 850, 'def': 'any of various soft thick unsweetened baked dishes', 'name': 'pudding'}, {'frequency': 'r', 'synset': 'puffer.n.02', 'synonyms': ['puffer_(fish)', 'pufferfish', 'blowfish', 'globefish'], 'id': 851, 'def': 'fishes whose elongated spiny body can inflate itself with water or air to form a globe', 'name': 'puffer_(fish)'}, {'frequency': 'r', 'synset': 'puffin.n.01', 'synonyms': ['puffin'], 'id': 852, 'def': 'seabirds having short necks and brightly colored compressed bills', 'name': 'puffin'}, {'frequency': 'r', 'synset': 'pug.n.01', 'synonyms': ['pug-dog'], 'id': 853, 'def': 'small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle', 'name': 'pug-dog'}, {'frequency': 'c', 'synset': 'pumpkin.n.02', 'synonyms': ['pumpkin'], 'id': 854, 'def': 'usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn', 'name': 'pumpkin'}, {'frequency': 'r', 'synset': 'punch.n.03', 'synonyms': ['puncher'], 'id': 855, 'def': 'a tool for making holes or indentations', 'name': 'puncher'}, {'frequency': 'r', 'synset': 'puppet.n.01', 'synonyms': ['puppet', 'marionette'], 'id': 856, 'def': 'a small figure of a person operated from above with strings by a puppeteer', 'name': 'puppet'}, {'frequency': 'c', 'synset': 'puppy.n.01', 'synonyms': ['puppy'], 'id': 857, 'def': 'a young dog', 'name': 'puppy'}, {'frequency': 'r', 'synset': 'quesadilla.n.01', 'synonyms': ['quesadilla'], 'id': 858, 'def': 'a tortilla that is filled with cheese and heated', 'name': 'quesadilla'}, {'frequency': 'r', 'synset': 'quiche.n.02', 'synonyms': ['quiche'], 'id': 859, 'def': 'a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)', 'name': 'quiche'}, {'frequency': 'f', 'synset': 'quilt.n.01', 'synonyms': ['quilt', 'comforter'], 'id': 860, 'def': 'bedding made of two layers of cloth filled with stuffing and stitched together', 'name': 'quilt'}, {'frequency': 'c', 'synset': 'rabbit.n.01', 'synonyms': ['rabbit'], 'id': 861, 'def': 'any of various burrowing animals of the family Leporidae having long ears and short tails', 'name': 'rabbit'}, {'frequency': 'r', 'synset': 'racer.n.02', 'synonyms': ['race_car', 'racing_car'], 'id': 862, 'def': 'a fast car that competes in races', 'name': 'race_car'}, {'frequency': 'c', 'synset': 'racket.n.04', 'synonyms': ['racket', 'racquet'], 'id': 863, 'def': 'a sports implement used to strike a ball in various games', 'name': 'racket'}, {'frequency': 'r', 'synset': 'radar.n.01', 'synonyms': ['radar'], 'id': 864, 'def': 'measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects', 'name': 'radar'}, {'frequency': 'f', 'synset': 'radiator.n.03', 'synonyms': ['radiator'], 'id': 865, 'def': 'a mechanism consisting of a metal honeycomb through which hot fluids circulate', 'name': 'radiator'}, {'frequency': 'c', 'synset': 'radio_receiver.n.01', 'synonyms': ['radio_receiver', 'radio_set', 'radio', 'tuner_(radio)'], 'id': 866, 'def': 'an electronic receiver that detects and demodulates and amplifies transmitted radio signals', 'name': 'radio_receiver'}, {'frequency': 'c', 'synset': 'radish.n.03', 'synonyms': ['radish', 'daikon'], 'id': 867, 'def': 'pungent edible root of any of various cultivated radish plants', 'name': 'radish'}, {'frequency': 'c', 'synset': 'raft.n.01', 'synonyms': ['raft'], 'id': 868, 'def': 'a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers', 'name': 'raft'}, {'frequency': 'r', 'synset': 'rag_doll.n.01', 'synonyms': ['rag_doll'], 'id': 869, 'def': 'a cloth doll that is stuffed and (usually) painted', 'name': 'rag_doll'}, {'frequency': 'c', 'synset': 'raincoat.n.01', 'synonyms': ['raincoat', 'waterproof_jacket'], 'id': 870, 'def': 'a water-resistant coat', 'name': 'raincoat'}, {'frequency': 'c', 'synset': 'ram.n.05', 'synonyms': ['ram_(animal)'], 'id': 871, 'def': 'uncastrated adult male sheep', 'name': 'ram_(animal)'}, {'frequency': 'c', 'synset': 'raspberry.n.02', 'synonyms': ['raspberry'], 'id': 872, 'def': 'red or black edible aggregate berries usually smaller than the related blackberries', 'name': 'raspberry'}, {'frequency': 'r', 'synset': 'rat.n.01', 'synonyms': ['rat'], 'id': 873, 'def': 'any of various long-tailed rodents similar to but larger than a mouse', 'name': 'rat'}, {'frequency': 'c', 'synset': 'razorblade.n.01', 'synonyms': ['razorblade'], 'id': 874, 'def': 'a blade that has very sharp edge', 'name': 'razorblade'}, {'frequency': 'c', 'synset': 'reamer.n.01', 'synonyms': ['reamer_(juicer)', 'juicer', 'juice_reamer'], 'id': 875, 'def': 'a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit', 'name': 'reamer_(juicer)'}, {'frequency': 'f', 'synset': 'rearview_mirror.n.01', 'synonyms': ['rearview_mirror'], 'id': 876, 'def': 'vehicle mirror (side or rearview)', 'name': 'rearview_mirror'}, {'frequency': 'c', 'synset': 'receipt.n.02', 'synonyms': ['receipt'], 'id': 877, 'def': 'an acknowledgment (usually tangible) that payment has been made', 'name': 'receipt'}, {'frequency': 'c', 'synset': 'recliner.n.01', 'synonyms': ['recliner', 'reclining_chair', 'lounger_(chair)'], 'id': 878, 'def': 'an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it', 'name': 'recliner'}, {'frequency': 'c', 'synset': 'record_player.n.01', 'synonyms': ['record_player', 'phonograph_(record_player)', 'turntable'], 'id': 879, 'def': 'machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically', 'name': 'record_player'}, {'frequency': 'f', 'synset': 'reflector.n.01', 'synonyms': ['reflector'], 'id': 880, 'def': 'device that reflects light, radiation, etc.', 'name': 'reflector'}, {'frequency': 'f', 'synset': 'remote_control.n.01', 'synonyms': ['remote_control'], 'id': 881, 'def': 'a device that can be used to control a machine or apparatus from a distance', 'name': 'remote_control'}, {'frequency': 'c', 'synset': 'rhinoceros.n.01', 'synonyms': ['rhinoceros'], 'id': 882, 'def': 'massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout', 'name': 'rhinoceros'}, {'frequency': 'r', 'synset': 'rib.n.03', 'synonyms': ['rib_(food)'], 'id': 883, 'def': 'cut of meat including one or more ribs', 'name': 'rib_(food)'}, {'frequency': 'c', 'synset': 'rifle.n.01', 'synonyms': ['rifle'], 'id': 884, 'def': 'a shoulder firearm with a long barrel', 'name': 'rifle'}, {'frequency': 'f', 'synset': 'ring.n.08', 'synonyms': ['ring'], 'id': 885, 'def': 'jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger', 'name': 'ring'}, {'frequency': 'r', 'synset': 'river_boat.n.01', 'synonyms': ['river_boat'], 'id': 886, 'def': 'a boat used on rivers or to ply a river', 'name': 'river_boat'}, {'frequency': 'r', 'synset': 'road_map.n.02', 'synonyms': ['road_map'], 'id': 887, 'def': '(NOT A ROAD) a MAP showing roads (for automobile travel)', 'name': 'road_map'}, {'frequency': 'c', 'synset': 'robe.n.01', 'synonyms': ['robe'], 'id': 888, 'def': 'any loose flowing garment', 'name': 'robe'}, {'frequency': 'c', 'synset': 'rocking_chair.n.01', 'synonyms': ['rocking_chair'], 'id': 889, 'def': 'a chair mounted on rockers', 'name': 'rocking_chair'}, {'frequency': 'r', 'synset': 'rodent.n.01', 'synonyms': ['rodent'], 'id': 890, 'def': 'relatively small placental mammals having a single pair of constantly growing incisor teeth specialized for gnawing', 'name': 'rodent'}, {'frequency': 'r', 'synset': 'roller_skate.n.01', 'synonyms': ['roller_skate'], 'id': 891, 'def': 'a shoe with pairs of rollers (small hard wheels) fixed to the sole', 'name': 'roller_skate'}, {'frequency': 'r', 'synset': 'rollerblade.n.01', 'synonyms': ['Rollerblade'], 'id': 892, 'def': 'an in-line variant of a roller skate', 'name': 'Rollerblade'}, {'frequency': 'c', 'synset': 'rolling_pin.n.01', 'synonyms': ['rolling_pin'], 'id': 893, 'def': 'utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough', 'name': 'rolling_pin'}, {'frequency': 'r', 'synset': 'root_beer.n.01', 'synonyms': ['root_beer'], 'id': 894, 'def': 'carbonated drink containing extracts of roots and herbs', 'name': 'root_beer'}, {'frequency': 'c', 'synset': 'router.n.02', 'synonyms': ['router_(computer_equipment)'], 'id': 895, 'def': 'a device that forwards data packets between computer networks', 'name': 'router_(computer_equipment)'}, {'frequency': 'f', 'synset': 'rubber_band.n.01', 'synonyms': ['rubber_band', 'elastic_band'], 'id': 896, 'def': 'a narrow band of elastic rubber used to hold things (such as papers) together', 'name': 'rubber_band'}, {'frequency': 'c', 'synset': 'runner.n.08', 'synonyms': ['runner_(carpet)'], 'id': 897, 'def': 'a long narrow carpet', 'name': 'runner_(carpet)'}, {'frequency': 'f', 'synset': 'sack.n.01', 'synonyms': ['plastic_bag', 'paper_bag'], 'id': 898, 'def': "a bag made of paper or plastic for holding customer's purchases", 'name': 'plastic_bag'}, {'frequency': 'f', 'synset': 'saddle.n.01', 'synonyms': ['saddle_(on_an_animal)'], 'id': 899, 'def': 'a seat for the rider of a horse or camel', 'name': 'saddle_(on_an_animal)'}, {'frequency': 'f', 'synset': 'saddle_blanket.n.01', 'synonyms': ['saddle_blanket', 'saddlecloth', 'horse_blanket'], 'id': 900, 'def': 'stable gear consisting of a blanket placed under the saddle', 'name': 'saddle_blanket'}, {'frequency': 'c', 'synset': 'saddlebag.n.01', 'synonyms': ['saddlebag'], 'id': 901, 'def': 'a large bag (or pair of bags) hung over a saddle', 'name': 'saddlebag'}, {'frequency': 'r', 'synset': 'safety_pin.n.01', 'synonyms': ['safety_pin'], 'id': 902, 'def': 'a pin in the form of a clasp; has a guard so the point of the pin will not stick the user', 'name': 'safety_pin'}, {'frequency': 'f', 'synset': 'sail.n.01', 'synonyms': ['sail'], 'id': 903, 'def': 'a large piece of fabric by means of which wind is used to propel a sailing vessel', 'name': 'sail'}, {'frequency': 'f', 'synset': 'salad.n.01', 'synonyms': ['salad'], 'id': 904, 'def': 'food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens', 'name': 'salad'}, {'frequency': 'r', 'synset': 'salad_plate.n.01', 'synonyms': ['salad_plate', 'salad_bowl'], 'id': 905, 'def': 'a plate or bowl for individual servings of salad', 'name': 'salad_plate'}, {'frequency': 'c', 'synset': 'salami.n.01', 'synonyms': ['salami'], 'id': 906, 'def': 'highly seasoned fatty sausage of pork and beef usually dried', 'name': 'salami'}, {'frequency': 'c', 'synset': 'salmon.n.01', 'synonyms': ['salmon_(fish)'], 'id': 907, 'def': 'any of various large food and game fishes of northern waters', 'name': 'salmon_(fish)'}, {'frequency': 'r', 'synset': 'salmon.n.03', 'synonyms': ['salmon_(food)'], 'id': 908, 'def': 'flesh of any of various marine or freshwater fish of the family Salmonidae', 'name': 'salmon_(food)'}, {'frequency': 'c', 'synset': 'salsa.n.01', 'synonyms': ['salsa'], 'id': 909, 'def': 'spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods', 'name': 'salsa'}, {'frequency': 'f', 'synset': 'saltshaker.n.01', 'synonyms': ['saltshaker'], 'id': 910, 'def': 'a shaker with a perforated top for sprinkling salt', 'name': 'saltshaker'}, {'frequency': 'f', 'synset': 'sandal.n.01', 'synonyms': ['sandal_(type_of_shoe)'], 'id': 911, 'def': 'a shoe consisting of a sole fastened by straps to the foot', 'name': 'sandal_(type_of_shoe)'}, {'frequency': 'f', 'synset': 'sandwich.n.01', 'synonyms': ['sandwich'], 'id': 912, 'def': 'two (or more) slices of bread with a filling between them', 'name': 'sandwich'}, {'frequency': 'r', 'synset': 'satchel.n.01', 'synonyms': ['satchel'], 'id': 913, 'def': 'luggage consisting of a small case with a flat bottom and (usually) a shoulder strap', 'name': 'satchel'}, {'frequency': 'r', 'synset': 'saucepan.n.01', 'synonyms': ['saucepan'], 'id': 914, 'def': 'a deep pan with a handle; used for stewing or boiling', 'name': 'saucepan'}, {'frequency': 'f', 'synset': 'saucer.n.02', 'synonyms': ['saucer'], 'id': 915, 'def': 'a small shallow dish for holding a cup at the table', 'name': 'saucer'}, {'frequency': 'f', 'synset': 'sausage.n.01', 'synonyms': ['sausage'], 'id': 916, 'def': 'highly seasoned minced meat stuffed in casings', 'name': 'sausage'}, {'frequency': 'r', 'synset': 'sawhorse.n.01', 'synonyms': ['sawhorse', 'sawbuck'], 'id': 917, 'def': 'a framework for holding wood that is being sawed', 'name': 'sawhorse'}, {'frequency': 'r', 'synset': 'sax.n.02', 'synonyms': ['saxophone'], 'id': 918, 'def': "a wind instrument with a `J'-shaped form typically made of brass", 'name': 'saxophone'}, {'frequency': 'f', 'synset': 'scale.n.07', 'synonyms': ['scale_(measuring_instrument)'], 'id': 919, 'def': 'a measuring instrument for weighing; shows amount of mass', 'name': 'scale_(measuring_instrument)'}, {'frequency': 'r', 'synset': 'scarecrow.n.01', 'synonyms': ['scarecrow', 'strawman'], 'id': 920, 'def': 'an effigy in the shape of a man to frighten birds away from seeds', 'name': 'scarecrow'}, {'frequency': 'f', 'synset': 'scarf.n.01', 'synonyms': ['scarf'], 'id': 921, 'def': 'a garment worn around the head or neck or shoulders for warmth or decoration', 'name': 'scarf'}, {'frequency': 'c', 'synset': 'school_bus.n.01', 'synonyms': ['school_bus'], 'id': 922, 'def': 'a bus used to transport children to or from school', 'name': 'school_bus'}, {'frequency': 'f', 'synset': 'scissors.n.01', 'synonyms': ['scissors'], 'id': 923, 'def': 'a tool having two crossed pivoting blades with looped handles', 'name': 'scissors'}, {'frequency': 'f', 'synset': 'scoreboard.n.01', 'synonyms': ['scoreboard'], 'id': 924, 'def': 'a large board for displaying the score of a contest (and some other information)', 'name': 'scoreboard'}, {'frequency': 'r', 'synset': 'scraper.n.01', 'synonyms': ['scraper'], 'id': 925, 'def': 'any of various hand tools for scraping', 'name': 'scraper'}, {'frequency': 'c', 'synset': 'screwdriver.n.01', 'synonyms': ['screwdriver'], 'id': 926, 'def': 'a hand tool for driving screws; has a tip that fits into the head of a screw', 'name': 'screwdriver'}, {'frequency': 'f', 'synset': 'scrub_brush.n.01', 'synonyms': ['scrubbing_brush'], 'id': 927, 'def': 'a brush with short stiff bristles for heavy cleaning', 'name': 'scrubbing_brush'}, {'frequency': 'c', 'synset': 'sculpture.n.01', 'synonyms': ['sculpture'], 'id': 928, 'def': 'a three-dimensional work of art', 'name': 'sculpture'}, {'frequency': 'c', 'synset': 'seabird.n.01', 'synonyms': ['seabird', 'seafowl'], 'id': 929, 'def': 'a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.', 'name': 'seabird'}, {'frequency': 'c', 'synset': 'seahorse.n.02', 'synonyms': ['seahorse'], 'id': 930, 'def': 'small fish with horse-like heads bent sharply downward and curled tails', 'name': 'seahorse'}, {'frequency': 'r', 'synset': 'seaplane.n.01', 'synonyms': ['seaplane', 'hydroplane'], 'id': 931, 'def': 'an airplane that can land on or take off from water', 'name': 'seaplane'}, {'frequency': 'c', 'synset': 'seashell.n.01', 'synonyms': ['seashell'], 'id': 932, 'def': 'the shell of a marine organism', 'name': 'seashell'}, {'frequency': 'c', 'synset': 'sewing_machine.n.01', 'synonyms': ['sewing_machine'], 'id': 933, 'def': 'a textile machine used as a home appliance for sewing', 'name': 'sewing_machine'}, {'frequency': 'c', 'synset': 'shaker.n.03', 'synonyms': ['shaker'], 'id': 934, 'def': 'a container in which something can be shaken', 'name': 'shaker'}, {'frequency': 'c', 'synset': 'shampoo.n.01', 'synonyms': ['shampoo'], 'id': 935, 'def': 'cleansing agent consisting of soaps or detergents used for washing the hair', 'name': 'shampoo'}, {'frequency': 'c', 'synset': 'shark.n.01', 'synonyms': ['shark'], 'id': 936, 'def': 'typically large carnivorous fishes with sharpe teeth', 'name': 'shark'}, {'frequency': 'r', 'synset': 'sharpener.n.01', 'synonyms': ['sharpener'], 'id': 937, 'def': 'any implement that is used to make something (an edge or a point) sharper', 'name': 'sharpener'}, {'frequency': 'r', 'synset': 'sharpie.n.03', 'synonyms': ['Sharpie'], 'id': 938, 'def': 'a pen with indelible ink that will write on any surface', 'name': 'Sharpie'}, {'frequency': 'r', 'synset': 'shaver.n.03', 'synonyms': ['shaver_(electric)', 'electric_shaver', 'electric_razor'], 'id': 939, 'def': 'a razor powered by an electric motor', 'name': 'shaver_(electric)'}, {'frequency': 'c', 'synset': 'shaving_cream.n.01', 'synonyms': ['shaving_cream', 'shaving_soap'], 'id': 940, 'def': 'toiletry consisting that forms a rich lather for softening the beard before shaving', 'name': 'shaving_cream'}, {'frequency': 'r', 'synset': 'shawl.n.01', 'synonyms': ['shawl'], 'id': 941, 'def': 'cloak consisting of an oblong piece of cloth used to cover the head and shoulders', 'name': 'shawl'}, {'frequency': 'r', 'synset': 'shears.n.01', 'synonyms': ['shears'], 'id': 942, 'def': 'large scissors with strong blades', 'name': 'shears'}, {'frequency': 'f', 'synset': 'sheep.n.01', 'synonyms': ['sheep'], 'id': 943, 'def': 'woolly usually horned ruminant mammal related to the goat', 'name': 'sheep'}, {'frequency': 'r', 'synset': 'shepherd_dog.n.01', 'synonyms': ['shepherd_dog', 'sheepdog'], 'id': 944, 'def': 'any of various usually long-haired breeds of dog reared to herd and guard sheep', 'name': 'shepherd_dog'}, {'frequency': 'r', 'synset': 'sherbert.n.01', 'synonyms': ['sherbert', 'sherbet'], 'id': 945, 'def': 'a frozen dessert made primarily of fruit juice and sugar', 'name': 'sherbert'}, {'frequency': 'c', 'synset': 'shield.n.02', 'synonyms': ['shield'], 'id': 946, 'def': 'armor carried on the arm to intercept blows', 'name': 'shield'}, {'frequency': 'f', 'synset': 'shirt.n.01', 'synonyms': ['shirt'], 'id': 947, 'def': 'a garment worn on the upper half of the body', 'name': 'shirt'}, {'frequency': 'f', 'synset': 'shoe.n.01', 'synonyms': ['shoe', 'sneaker_(type_of_shoe)', 'tennis_shoe'], 'id': 948, 'def': 'common footwear covering the foot', 'name': 'shoe'}, {'frequency': 'f', 'synset': 'shopping_bag.n.01', 'synonyms': ['shopping_bag'], 'id': 949, 'def': 'a bag made of plastic or strong paper (often with handles); used to transport goods after shopping', 'name': 'shopping_bag'}, {'frequency': 'c', 'synset': 'shopping_cart.n.01', 'synonyms': ['shopping_cart'], 'id': 950, 'def': 'a handcart that holds groceries or other goods while shopping', 'name': 'shopping_cart'}, {'frequency': 'f', 'synset': 'short_pants.n.01', 'synonyms': ['short_pants', 'shorts_(clothing)', 'trunks_(clothing)'], 'id': 951, 'def': 'trousers that end at or above the knee', 'name': 'short_pants'}, {'frequency': 'r', 'synset': 'shot_glass.n.01', 'synonyms': ['shot_glass'], 'id': 952, 'def': 'a small glass adequate to hold a single swallow of whiskey', 'name': 'shot_glass'}, {'frequency': 'f', 'synset': 'shoulder_bag.n.01', 'synonyms': ['shoulder_bag'], 'id': 953, 'def': 'a large handbag that can be carried by a strap looped over the shoulder', 'name': 'shoulder_bag'}, {'frequency': 'c', 'synset': 'shovel.n.01', 'synonyms': ['shovel'], 'id': 954, 'def': 'a hand tool for lifting loose material such as snow, dirt, etc.', 'name': 'shovel'}, {'frequency': 'f', 'synset': 'shower.n.01', 'synonyms': ['shower_head'], 'id': 955, 'def': 'a plumbing fixture that sprays water over you', 'name': 'shower_head'}, {'frequency': 'r', 'synset': 'shower_cap.n.01', 'synonyms': ['shower_cap'], 'id': 956, 'def': 'a tight cap worn to keep hair dry while showering', 'name': 'shower_cap'}, {'frequency': 'f', 'synset': 'shower_curtain.n.01', 'synonyms': ['shower_curtain'], 'id': 957, 'def': 'a curtain that keeps water from splashing out of the shower area', 'name': 'shower_curtain'}, {'frequency': 'r', 'synset': 'shredder.n.01', 'synonyms': ['shredder_(for_paper)'], 'id': 958, 'def': 'a device that shreds documents', 'name': 'shredder_(for_paper)'}, {'frequency': 'f', 'synset': 'signboard.n.01', 'synonyms': ['signboard'], 'id': 959, 'def': 'structure displaying a board on which advertisements can be posted', 'name': 'signboard'}, {'frequency': 'c', 'synset': 'silo.n.01', 'synonyms': ['silo'], 'id': 960, 'def': 'a cylindrical tower used for storing goods', 'name': 'silo'}, {'frequency': 'f', 'synset': 'sink.n.01', 'synonyms': ['sink'], 'id': 961, 'def': 'plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe', 'name': 'sink'}, {'frequency': 'f', 'synset': 'skateboard.n.01', 'synonyms': ['skateboard'], 'id': 962, 'def': 'a board with wheels that is ridden in a standing or crouching position and propelled by foot', 'name': 'skateboard'}, {'frequency': 'c', 'synset': 'skewer.n.01', 'synonyms': ['skewer'], 'id': 963, 'def': 'a long pin for holding meat in position while it is being roasted', 'name': 'skewer'}, {'frequency': 'f', 'synset': 'ski.n.01', 'synonyms': ['ski'], 'id': 964, 'def': 'sports equipment for skiing on snow', 'name': 'ski'}, {'frequency': 'f', 'synset': 'ski_boot.n.01', 'synonyms': ['ski_boot'], 'id': 965, 'def': 'a stiff boot that is fastened to a ski with a ski binding', 'name': 'ski_boot'}, {'frequency': 'f', 'synset': 'ski_parka.n.01', 'synonyms': ['ski_parka', 'ski_jacket'], 'id': 966, 'def': 'a parka to be worn while skiing', 'name': 'ski_parka'}, {'frequency': 'f', 'synset': 'ski_pole.n.01', 'synonyms': ['ski_pole'], 'id': 967, 'def': 'a pole with metal points used as an aid in skiing', 'name': 'ski_pole'}, {'frequency': 'f', 'synset': 'skirt.n.02', 'synonyms': ['skirt'], 'id': 968, 'def': 'a garment hanging from the waist; worn mainly by girls and women', 'name': 'skirt'}, {'frequency': 'r', 'synset': 'skullcap.n.01', 'synonyms': ['skullcap'], 'id': 969, 'def': 'rounded brimless cap fitting the crown of the head', 'name': 'skullcap'}, {'frequency': 'c', 'synset': 'sled.n.01', 'synonyms': ['sled', 'sledge', 'sleigh'], 'id': 970, 'def': 'a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.', 'name': 'sled'}, {'frequency': 'c', 'synset': 'sleeping_bag.n.01', 'synonyms': ['sleeping_bag'], 'id': 971, 'def': 'large padded bag designed to be slept in outdoors', 'name': 'sleeping_bag'}, {'frequency': 'r', 'synset': 'sling.n.05', 'synonyms': ['sling_(bandage)', 'triangular_bandage'], 'id': 972, 'def': 'bandage to support an injured forearm; slung over the shoulder or neck', 'name': 'sling_(bandage)'}, {'frequency': 'c', 'synset': 'slipper.n.01', 'synonyms': ['slipper_(footwear)', 'carpet_slipper_(footwear)'], 'id': 973, 'def': 'low footwear that can be slipped on and off easily; usually worn indoors', 'name': 'slipper_(footwear)'}, {'frequency': 'r', 'synset': 'smoothie.n.02', 'synonyms': ['smoothie'], 'id': 974, 'def': 'a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk', 'name': 'smoothie'}, {'frequency': 'r', 'synset': 'snake.n.01', 'synonyms': ['snake', 'serpent'], 'id': 975, 'def': 'limbless scaly elongate reptile; some are venomous', 'name': 'snake'}, {'frequency': 'f', 'synset': 'snowboard.n.01', 'synonyms': ['snowboard'], 'id': 976, 'def': 'a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes', 'name': 'snowboard'}, {'frequency': 'c', 'synset': 'snowman.n.01', 'synonyms': ['snowman'], 'id': 977, 'def': 'a figure of a person made of packed snow', 'name': 'snowman'}, {'frequency': 'c', 'synset': 'snowmobile.n.01', 'synonyms': ['snowmobile'], 'id': 978, 'def': 'tracked vehicle for travel on snow having skis in front', 'name': 'snowmobile'}, {'frequency': 'f', 'synset': 'soap.n.01', 'synonyms': ['soap'], 'id': 979, 'def': 'a cleansing agent made from the salts of vegetable or animal fats', 'name': 'soap'}, {'frequency': 'f', 'synset': 'soccer_ball.n.01', 'synonyms': ['soccer_ball'], 'id': 980, 'def': "an inflated ball used in playing soccer (called `football' outside of the United States)", 'name': 'soccer_ball'}, {'frequency': 'f', 'synset': 'sock.n.01', 'synonyms': ['sock'], 'id': 981, 'def': 'cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee', 'name': 'sock'}, {'frequency': 'f', 'synset': 'sofa.n.01', 'synonyms': ['sofa', 'couch', 'lounge'], 'id': 982, 'def': 'an upholstered seat for more than one person', 'name': 'sofa'}, {'frequency': 'r', 'synset': 'softball.n.01', 'synonyms': ['softball'], 'id': 983, 'def': 'ball used in playing softball', 'name': 'softball'}, {'frequency': 'c', 'synset': 'solar_array.n.01', 'synonyms': ['solar_array', 'solar_battery', 'solar_panel'], 'id': 984, 'def': 'electrical device consisting of a large array of connected solar cells', 'name': 'solar_array'}, {'frequency': 'r', 'synset': 'sombrero.n.02', 'synonyms': ['sombrero'], 'id': 985, 'def': 'a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico', 'name': 'sombrero'}, {'frequency': 'f', 'synset': 'soup.n.01', 'synonyms': ['soup'], 'id': 986, 'def': 'liquid food especially of meat or fish or vegetable stock often containing pieces of solid food', 'name': 'soup'}, {'frequency': 'r', 'synset': 'soup_bowl.n.01', 'synonyms': ['soup_bowl'], 'id': 987, 'def': 'a bowl for serving soup', 'name': 'soup_bowl'}, {'frequency': 'c', 'synset': 'soupspoon.n.01', 'synonyms': ['soupspoon'], 'id': 988, 'def': 'a spoon with a rounded bowl for eating soup', 'name': 'soupspoon'}, {'frequency': 'c', 'synset': 'sour_cream.n.01', 'synonyms': ['sour_cream', 'soured_cream'], 'id': 989, 'def': 'soured light cream', 'name': 'sour_cream'}, {'frequency': 'r', 'synset': 'soya_milk.n.01', 'synonyms': ['soya_milk', 'soybean_milk', 'soymilk'], 'id': 990, 'def': 'a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu', 'name': 'soya_milk'}, {'frequency': 'r', 'synset': 'space_shuttle.n.01', 'synonyms': ['space_shuttle'], 'id': 991, 'def': "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", 'name': 'space_shuttle'}, {'frequency': 'r', 'synset': 'sparkler.n.02', 'synonyms': ['sparkler_(fireworks)'], 'id': 992, 'def': 'a firework that burns slowly and throws out a shower of sparks', 'name': 'sparkler_(fireworks)'}, {'frequency': 'f', 'synset': 'spatula.n.02', 'synonyms': ['spatula'], 'id': 993, 'def': 'a hand tool with a thin flexible blade used to mix or spread soft substances', 'name': 'spatula'}, {'frequency': 'r', 'synset': 'spear.n.01', 'synonyms': ['spear', 'lance'], 'id': 994, 'def': 'a long pointed rod used as a tool or weapon', 'name': 'spear'}, {'frequency': 'f', 'synset': 'spectacles.n.01', 'synonyms': ['spectacles', 'specs', 'eyeglasses', 'glasses'], 'id': 995, 'def': 'optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision', 'name': 'spectacles'}, {'frequency': 'c', 'synset': 'spice_rack.n.01', 'synonyms': ['spice_rack'], 'id': 996, 'def': 'a rack for displaying containers filled with spices', 'name': 'spice_rack'}, {'frequency': 'c', 'synset': 'spider.n.01', 'synonyms': ['spider'], 'id': 997, 'def': 'predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body', 'name': 'spider'}, {'frequency': 'r', 'synset': 'spiny_lobster.n.02', 'synonyms': ['crawfish', 'crayfish'], 'id': 998, 'def': 'large edible marine crustacean having a spiny carapace but lacking the large pincers of true lobsters', 'name': 'crawfish'}, {'frequency': 'c', 'synset': 'sponge.n.01', 'synonyms': ['sponge'], 'id': 999, 'def': 'a porous mass usable to absorb water typically used for cleaning', 'name': 'sponge'}, {'frequency': 'f', 'synset': 'spoon.n.01', 'synonyms': ['spoon'], 'id': 1000, 'def': 'a piece of cutlery with a shallow bowl-shaped container and a handle', 'name': 'spoon'}, {'frequency': 'c', 'synset': 'sportswear.n.01', 'synonyms': ['sportswear', 'athletic_wear', 'activewear'], 'id': 1001, 'def': 'attire worn for sport or for casual wear', 'name': 'sportswear'}, {'frequency': 'c', 'synset': 'spotlight.n.02', 'synonyms': ['spotlight'], 'id': 1002, 'def': 'a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer', 'name': 'spotlight'}, {'frequency': 'r', 'synset': 'squid.n.01', 'synonyms': ['squid_(food)', 'calamari', 'calamary'], 'id': 1003, 'def': '(Italian cuisine) squid prepared as food', 'name': 'squid_(food)'}, {'frequency': 'c', 'synset': 'squirrel.n.01', 'synonyms': ['squirrel'], 'id': 1004, 'def': 'a kind of arboreal rodent having a long bushy tail', 'name': 'squirrel'}, {'frequency': 'r', 'synset': 'stagecoach.n.01', 'synonyms': ['stagecoach'], 'id': 1005, 'def': 'a large coach-and-four formerly used to carry passengers and mail on regular routes between towns', 'name': 'stagecoach'}, {'frequency': 'c', 'synset': 'stapler.n.01', 'synonyms': ['stapler_(stapling_machine)'], 'id': 1006, 'def': 'a machine that inserts staples into sheets of paper in order to fasten them together', 'name': 'stapler_(stapling_machine)'}, {'frequency': 'c', 'synset': 'starfish.n.01', 'synonyms': ['starfish', 'sea_star'], 'id': 1007, 'def': 'echinoderms characterized by five arms extending from a central disk', 'name': 'starfish'}, {'frequency': 'f', 'synset': 'statue.n.01', 'synonyms': ['statue_(sculpture)'], 'id': 1008, 'def': 'a sculpture representing a human or animal', 'name': 'statue_(sculpture)'}, {'frequency': 'c', 'synset': 'steak.n.01', 'synonyms': ['steak_(food)'], 'id': 1009, 'def': 'a slice of meat cut from the fleshy part of an animal or large fish', 'name': 'steak_(food)'}, {'frequency': 'r', 'synset': 'steak_knife.n.01', 'synonyms': ['steak_knife'], 'id': 1010, 'def': 'a sharp table knife used in eating steak', 'name': 'steak_knife'}, {'frequency': 'f', 'synset': 'steering_wheel.n.01', 'synonyms': ['steering_wheel'], 'id': 1011, 'def': 'a handwheel that is used for steering', 'name': 'steering_wheel'}, {'frequency': 'r', 'synset': 'step_ladder.n.01', 'synonyms': ['stepladder'], 'id': 1012, 'def': 'a folding portable ladder hinged at the top', 'name': 'stepladder'}, {'frequency': 'c', 'synset': 'step_stool.n.01', 'synonyms': ['step_stool'], 'id': 1013, 'def': 'a stool that has one or two steps that fold under the seat', 'name': 'step_stool'}, {'frequency': 'c', 'synset': 'stereo.n.01', 'synonyms': ['stereo_(sound_system)'], 'id': 1014, 'def': 'electronic device for playing audio', 'name': 'stereo_(sound_system)'}, {'frequency': 'r', 'synset': 'stew.n.02', 'synonyms': ['stew'], 'id': 1015, 'def': 'food prepared by stewing especially meat or fish with vegetables', 'name': 'stew'}, {'frequency': 'r', 'synset': 'stirrer.n.02', 'synonyms': ['stirrer'], 'id': 1016, 'def': 'an implement used for stirring', 'name': 'stirrer'}, {'frequency': 'f', 'synset': 'stirrup.n.01', 'synonyms': ['stirrup'], 'id': 1017, 'def': "support consisting of metal loops into which rider's feet go", 'name': 'stirrup'}, {'frequency': 'f', 'synset': 'stool.n.01', 'synonyms': ['stool'], 'id': 1018, 'def': 'a simple seat without a back or arms', 'name': 'stool'}, {'frequency': 'f', 'synset': 'stop_sign.n.01', 'synonyms': ['stop_sign'], 'id': 1019, 'def': 'a traffic sign to notify drivers that they must come to a complete stop', 'name': 'stop_sign'}, {'frequency': 'f', 'synset': 'stoplight.n.01', 'synonyms': ['brake_light'], 'id': 1020, 'def': 'a red light on the rear of a motor vehicle that signals when the brakes are applied', 'name': 'brake_light'}, {'frequency': 'f', 'synset': 'stove.n.01', 'synonyms': ['stove', 'kitchen_stove', 'range_(kitchen_appliance)', 'kitchen_range', 'cooking_stove'], 'id': 1021, 'def': 'a kitchen appliance used for cooking food', 'name': 'stove'}, {'frequency': 'c', 'synset': 'strainer.n.01', 'synonyms': ['strainer'], 'id': 1022, 'def': 'a filter to retain larger pieces while smaller pieces and liquids pass through', 'name': 'strainer'}, {'frequency': 'f', 'synset': 'strap.n.01', 'synonyms': ['strap'], 'id': 1023, 'def': 'an elongated strip of material for binding things together or holding', 'name': 'strap'}, {'frequency': 'f', 'synset': 'straw.n.04', 'synonyms': ['straw_(for_drinking)', 'drinking_straw'], 'id': 1024, 'def': 'a thin paper or plastic tube used to suck liquids into the mouth', 'name': 'straw_(for_drinking)'}, {'frequency': 'f', 'synset': 'strawberry.n.01', 'synonyms': ['strawberry'], 'id': 1025, 'def': 'sweet fleshy red fruit', 'name': 'strawberry'}, {'frequency': 'f', 'synset': 'street_sign.n.01', 'synonyms': ['street_sign'], 'id': 1026, 'def': 'a sign visible from the street', 'name': 'street_sign'}, {'frequency': 'f', 'synset': 'streetlight.n.01', 'synonyms': ['streetlight', 'street_lamp'], 'id': 1027, 'def': 'a lamp supported on a lamppost; for illuminating a street', 'name': 'streetlight'}, {'frequency': 'r', 'synset': 'string_cheese.n.01', 'synonyms': ['string_cheese'], 'id': 1028, 'def': 'cheese formed in long strings twisted together', 'name': 'string_cheese'}, {'frequency': 'r', 'synset': 'stylus.n.02', 'synonyms': ['stylus'], 'id': 1029, 'def': 'a pointed tool for writing or drawing or engraving, including pens', 'name': 'stylus'}, {'frequency': 'r', 'synset': 'subwoofer.n.01', 'synonyms': ['subwoofer'], 'id': 1030, 'def': 'a loudspeaker that is designed to reproduce very low bass frequencies', 'name': 'subwoofer'}, {'frequency': 'r', 'synset': 'sugar_bowl.n.01', 'synonyms': ['sugar_bowl'], 'id': 1031, 'def': 'a dish in which sugar is served', 'name': 'sugar_bowl'}, {'frequency': 'r', 'synset': 'sugarcane.n.01', 'synonyms': ['sugarcane_(plant)'], 'id': 1032, 'def': 'juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice', 'name': 'sugarcane_(plant)'}, {'frequency': 'f', 'synset': 'suit.n.01', 'synonyms': ['suit_(clothing)'], 'id': 1033, 'def': 'a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color', 'name': 'suit_(clothing)'}, {'frequency': 'c', 'synset': 'sunflower.n.01', 'synonyms': ['sunflower'], 'id': 1034, 'def': 'any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays', 'name': 'sunflower'}, {'frequency': 'f', 'synset': 'sunglasses.n.01', 'synonyms': ['sunglasses'], 'id': 1035, 'def': 'spectacles that are darkened or polarized to protect the eyes from the glare of the sun', 'name': 'sunglasses'}, {'frequency': 'c', 'synset': 'sunhat.n.01', 'synonyms': ['sunhat'], 'id': 1036, 'def': 'a hat with a broad brim that protects the face from direct exposure to the sun', 'name': 'sunhat'}, {'frequency': 'f', 'synset': 'surfboard.n.01', 'synonyms': ['surfboard'], 'id': 1037, 'def': 'a narrow buoyant board for riding surf', 'name': 'surfboard'}, {'frequency': 'c', 'synset': 'sushi.n.01', 'synonyms': ['sushi'], 'id': 1038, 'def': 'rice (with raw fish) wrapped in seaweed', 'name': 'sushi'}, {'frequency': 'c', 'synset': 'swab.n.02', 'synonyms': ['mop'], 'id': 1039, 'def': 'cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors', 'name': 'mop'}, {'frequency': 'c', 'synset': 'sweat_pants.n.01', 'synonyms': ['sweat_pants'], 'id': 1040, 'def': 'loose-fitting trousers with elastic cuffs; worn by athletes', 'name': 'sweat_pants'}, {'frequency': 'c', 'synset': 'sweatband.n.02', 'synonyms': ['sweatband'], 'id': 1041, 'def': 'a band of material tied around the forehead or wrist to absorb sweat', 'name': 'sweatband'}, {'frequency': 'f', 'synset': 'sweater.n.01', 'synonyms': ['sweater'], 'id': 1042, 'def': 'a crocheted or knitted garment covering the upper part of the body', 'name': 'sweater'}, {'frequency': 'f', 'synset': 'sweatshirt.n.01', 'synonyms': ['sweatshirt'], 'id': 1043, 'def': 'cotton knit pullover with long sleeves worn during athletic activity', 'name': 'sweatshirt'}, {'frequency': 'c', 'synset': 'sweet_potato.n.02', 'synonyms': ['sweet_potato'], 'id': 1044, 'def': 'the edible tuberous root of the sweet potato vine', 'name': 'sweet_potato'}, {'frequency': 'f', 'synset': 'swimsuit.n.01', 'synonyms': ['swimsuit', 'swimwear', 'bathing_suit', 'swimming_costume', 'bathing_costume', 'swimming_trunks', 'bathing_trunks'], 'id': 1045, 'def': 'garment worn for swimming', 'name': 'swimsuit'}, {'frequency': 'c', 'synset': 'sword.n.01', 'synonyms': ['sword'], 'id': 1046, 'def': 'a cutting or thrusting weapon that has a long metal blade', 'name': 'sword'}, {'frequency': 'r', 'synset': 'syringe.n.01', 'synonyms': ['syringe'], 'id': 1047, 'def': 'a medical instrument used to inject or withdraw fluids', 'name': 'syringe'}, {'frequency': 'r', 'synset': 'tabasco.n.02', 'synonyms': ['Tabasco_sauce'], 'id': 1048, 'def': 'very spicy sauce (trade name Tabasco) made from fully-aged red peppers', 'name': 'Tabasco_sauce'}, {'frequency': 'r', 'synset': 'table-tennis_table.n.01', 'synonyms': ['table-tennis_table', 'ping-pong_table'], 'id': 1049, 'def': 'a table used for playing table tennis', 'name': 'table-tennis_table'}, {'frequency': 'f', 'synset': 'table.n.02', 'synonyms': ['table'], 'id': 1050, 'def': 'a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs', 'name': 'table'}, {'frequency': 'c', 'synset': 'table_lamp.n.01', 'synonyms': ['table_lamp'], 'id': 1051, 'def': 'a lamp that sits on a table', 'name': 'table_lamp'}, {'frequency': 'f', 'synset': 'tablecloth.n.01', 'synonyms': ['tablecloth'], 'id': 1052, 'def': 'a covering spread over a dining table', 'name': 'tablecloth'}, {'frequency': 'r', 'synset': 'tachometer.n.01', 'synonyms': ['tachometer'], 'id': 1053, 'def': 'measuring instrument for indicating speed of rotation', 'name': 'tachometer'}, {'frequency': 'r', 'synset': 'taco.n.02', 'synonyms': ['taco'], 'id': 1054, 'def': 'a small tortilla cupped around a filling', 'name': 'taco'}, {'frequency': 'f', 'synset': 'tag.n.02', 'synonyms': ['tag'], 'id': 1055, 'def': 'a label associated with something for the purpose of identification or information', 'name': 'tag'}, {'frequency': 'f', 'synset': 'taillight.n.01', 'synonyms': ['taillight', 'rear_light'], 'id': 1056, 'def': 'lamp (usually red) mounted at the rear of a motor vehicle', 'name': 'taillight'}, {'frequency': 'r', 'synset': 'tambourine.n.01', 'synonyms': ['tambourine'], 'id': 1057, 'def': 'a shallow drum with a single drumhead and with metallic disks in the sides', 'name': 'tambourine'}, {'frequency': 'r', 'synset': 'tank.n.01', 'synonyms': ['army_tank', 'armored_combat_vehicle', 'armoured_combat_vehicle'], 'id': 1058, 'def': 'an enclosed armored military vehicle; has a cannon and moves on caterpillar treads', 'name': 'army_tank'}, {'frequency': 'f', 'synset': 'tank.n.02', 'synonyms': ['tank_(storage_vessel)', 'storage_tank'], 'id': 1059, 'def': 'a large (usually metallic) vessel for holding gases or liquids', 'name': 'tank_(storage_vessel)'}, {'frequency': 'f', 'synset': 'tank_top.n.01', 'synonyms': ['tank_top_(clothing)'], 'id': 1060, 'def': 'a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening', 'name': 'tank_top_(clothing)'}, {'frequency': 'f', 'synset': 'tape.n.01', 'synonyms': ['tape_(sticky_cloth_or_paper)'], 'id': 1061, 'def': 'a long thin piece of cloth or paper as used for binding or fastening', 'name': 'tape_(sticky_cloth_or_paper)'}, {'frequency': 'c', 'synset': 'tape.n.04', 'synonyms': ['tape_measure', 'measuring_tape'], 'id': 1062, 'def': 'measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths', 'name': 'tape_measure'}, {'frequency': 'c', 'synset': 'tapestry.n.02', 'synonyms': ['tapestry'], 'id': 1063, 'def': 'a heavy textile with a woven design; used for curtains and upholstery', 'name': 'tapestry'}, {'frequency': 'f', 'synset': 'tarpaulin.n.01', 'synonyms': ['tarp'], 'id': 1064, 'def': 'waterproofed canvas', 'name': 'tarp'}, {'frequency': 'c', 'synset': 'tartan.n.01', 'synonyms': ['tartan', 'plaid'], 'id': 1065, 'def': 'a cloth having a crisscross design', 'name': 'tartan'}, {'frequency': 'c', 'synset': 'tassel.n.01', 'synonyms': ['tassel'], 'id': 1066, 'def': 'adornment consisting of a bunch of cords fastened at one end', 'name': 'tassel'}, {'frequency': 'c', 'synset': 'tea_bag.n.01', 'synonyms': ['tea_bag'], 'id': 1067, 'def': 'a measured amount of tea in a bag for an individual serving of tea', 'name': 'tea_bag'}, {'frequency': 'c', 'synset': 'teacup.n.02', 'synonyms': ['teacup'], 'id': 1068, 'def': 'a cup from which tea is drunk', 'name': 'teacup'}, {'frequency': 'c', 'synset': 'teakettle.n.01', 'synonyms': ['teakettle'], 'id': 1069, 'def': 'kettle for boiling water to make tea', 'name': 'teakettle'}, {'frequency': 'f', 'synset': 'teapot.n.01', 'synonyms': ['teapot'], 'id': 1070, 'def': 'pot for brewing tea; usually has a spout and handle', 'name': 'teapot'}, {'frequency': 'f', 'synset': 'teddy.n.01', 'synonyms': ['teddy_bear'], 'id': 1071, 'def': "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", 'name': 'teddy_bear'}, {'frequency': 'f', 'synset': 'telephone.n.01', 'synonyms': ['telephone', 'phone', 'telephone_set'], 'id': 1072, 'def': 'electronic device for communicating by voice over long distances (includes wired and wireless/cell phones)', 'name': 'telephone'}, {'frequency': 'c', 'synset': 'telephone_booth.n.01', 'synonyms': ['telephone_booth', 'phone_booth', 'call_box', 'telephone_box', 'telephone_kiosk'], 'id': 1073, 'def': 'booth for using a telephone', 'name': 'telephone_booth'}, {'frequency': 'f', 'synset': 'telephone_pole.n.01', 'synonyms': ['telephone_pole', 'telegraph_pole', 'telegraph_post'], 'id': 1074, 'def': 'tall pole supporting telephone wires', 'name': 'telephone_pole'}, {'frequency': 'r', 'synset': 'telephoto_lens.n.01', 'synonyms': ['telephoto_lens', 'zoom_lens'], 'id': 1075, 'def': 'a camera lens that magnifies the image', 'name': 'telephoto_lens'}, {'frequency': 'c', 'synset': 'television_camera.n.01', 'synonyms': ['television_camera', 'tv_camera'], 'id': 1076, 'def': 'television equipment for capturing and recording video', 'name': 'television_camera'}, {'frequency': 'f', 'synset': 'television_receiver.n.01', 'synonyms': ['television_set', 'tv', 'tv_set'], 'id': 1077, 'def': 'an electronic device that receives television signals and displays them on a screen', 'name': 'television_set'}, {'frequency': 'f', 'synset': 'tennis_ball.n.01', 'synonyms': ['tennis_ball'], 'id': 1078, 'def': 'ball about the size of a fist used in playing tennis', 'name': 'tennis_ball'}, {'frequency': 'f', 'synset': 'tennis_racket.n.01', 'synonyms': ['tennis_racket'], 'id': 1079, 'def': 'a racket used to play tennis', 'name': 'tennis_racket'}, {'frequency': 'r', 'synset': 'tequila.n.01', 'synonyms': ['tequila'], 'id': 1080, 'def': 'Mexican liquor made from fermented juices of an agave plant', 'name': 'tequila'}, {'frequency': 'c', 'synset': 'thermometer.n.01', 'synonyms': ['thermometer'], 'id': 1081, 'def': 'measuring instrument for measuring temperature', 'name': 'thermometer'}, {'frequency': 'c', 'synset': 'thermos.n.01', 'synonyms': ['thermos_bottle'], 'id': 1082, 'def': 'vacuum flask that preserves temperature of hot or cold drinks', 'name': 'thermos_bottle'}, {'frequency': 'f', 'synset': 'thermostat.n.01', 'synonyms': ['thermostat'], 'id': 1083, 'def': 'a regulator for automatically regulating temperature by starting or stopping the supply of heat', 'name': 'thermostat'}, {'frequency': 'r', 'synset': 'thimble.n.02', 'synonyms': ['thimble'], 'id': 1084, 'def': 'a small metal cap to protect the finger while sewing; can be used as a small container', 'name': 'thimble'}, {'frequency': 'c', 'synset': 'thread.n.01', 'synonyms': ['thread', 'yarn'], 'id': 1085, 'def': 'a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving', 'name': 'thread'}, {'frequency': 'c', 'synset': 'thumbtack.n.01', 'synonyms': ['thumbtack', 'drawing_pin', 'pushpin'], 'id': 1086, 'def': 'a tack for attaching papers to a bulletin board or drawing board', 'name': 'thumbtack'}, {'frequency': 'c', 'synset': 'tiara.n.01', 'synonyms': ['tiara'], 'id': 1087, 'def': 'a jeweled headdress worn by women on formal occasions', 'name': 'tiara'}, {'frequency': 'c', 'synset': 'tiger.n.02', 'synonyms': ['tiger'], 'id': 1088, 'def': 'large feline of forests in most of Asia having a tawny coat with black stripes', 'name': 'tiger'}, {'frequency': 'c', 'synset': 'tights.n.01', 'synonyms': ['tights_(clothing)', 'leotards'], 'id': 1089, 'def': 'skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls', 'name': 'tights_(clothing)'}, {'frequency': 'c', 'synset': 'timer.n.01', 'synonyms': ['timer', 'stopwatch'], 'id': 1090, 'def': 'a timepiece that measures a time interval and signals its end', 'name': 'timer'}, {'frequency': 'f', 'synset': 'tinfoil.n.01', 'synonyms': ['tinfoil'], 'id': 1091, 'def': 'foil made of tin or an alloy of tin and lead', 'name': 'tinfoil'}, {'frequency': 'c', 'synset': 'tinsel.n.01', 'synonyms': ['tinsel'], 'id': 1092, 'def': 'a showy decoration that is basically valueless', 'name': 'tinsel'}, {'frequency': 'f', 'synset': 'tissue.n.02', 'synonyms': ['tissue_paper'], 'id': 1093, 'def': 'a soft thin (usually translucent) paper', 'name': 'tissue_paper'}, {'frequency': 'c', 'synset': 'toast.n.01', 'synonyms': ['toast_(food)'], 'id': 1094, 'def': 'slice of bread that has been toasted', 'name': 'toast_(food)'}, {'frequency': 'f', 'synset': 'toaster.n.02', 'synonyms': ['toaster'], 'id': 1095, 'def': 'a kitchen appliance (usually electric) for toasting bread', 'name': 'toaster'}, {'frequency': 'f', 'synset': 'toaster_oven.n.01', 'synonyms': ['toaster_oven'], 'id': 1096, 'def': 'kitchen appliance consisting of a small electric oven for toasting or warming food', 'name': 'toaster_oven'}, {'frequency': 'f', 'synset': 'toilet.n.02', 'synonyms': ['toilet'], 'id': 1097, 'def': 'a plumbing fixture for defecation and urination', 'name': 'toilet'}, {'frequency': 'f', 'synset': 'toilet_tissue.n.01', 'synonyms': ['toilet_tissue', 'toilet_paper', 'bathroom_tissue'], 'id': 1098, 'def': 'a soft thin absorbent paper for use in toilets', 'name': 'toilet_tissue'}, {'frequency': 'f', 'synset': 'tomato.n.01', 'synonyms': ['tomato'], 'id': 1099, 'def': 'mildly acid red or yellow pulpy fruit eaten as a vegetable', 'name': 'tomato'}, {'frequency': 'f', 'synset': 'tongs.n.01', 'synonyms': ['tongs'], 'id': 1100, 'def': 'any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below', 'name': 'tongs'}, {'frequency': 'c', 'synset': 'toolbox.n.01', 'synonyms': ['toolbox'], 'id': 1101, 'def': 'a box or chest or cabinet for holding hand tools', 'name': 'toolbox'}, {'frequency': 'f', 'synset': 'toothbrush.n.01', 'synonyms': ['toothbrush'], 'id': 1102, 'def': 'small brush; has long handle; used to clean teeth', 'name': 'toothbrush'}, {'frequency': 'f', 'synset': 'toothpaste.n.01', 'synonyms': ['toothpaste'], 'id': 1103, 'def': 'a dentifrice in the form of a paste', 'name': 'toothpaste'}, {'frequency': 'f', 'synset': 'toothpick.n.01', 'synonyms': ['toothpick'], 'id': 1104, 'def': 'pick consisting of a small strip of wood or plastic; used to pick food from between the teeth', 'name': 'toothpick'}, {'frequency': 'f', 'synset': 'top.n.09', 'synonyms': ['cover'], 'id': 1105, 'def': 'covering for a hole (especially a hole in the top of a container)', 'name': 'cover'}, {'frequency': 'c', 'synset': 'tortilla.n.01', 'synonyms': ['tortilla'], 'id': 1106, 'def': 'thin unleavened pancake made from cornmeal or wheat flour', 'name': 'tortilla'}, {'frequency': 'c', 'synset': 'tow_truck.n.01', 'synonyms': ['tow_truck'], 'id': 1107, 'def': 'a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)', 'name': 'tow_truck'}, {'frequency': 'f', 'synset': 'towel.n.01', 'synonyms': ['towel'], 'id': 1108, 'def': 'a rectangular piece of absorbent cloth (or paper) for drying or wiping', 'name': 'towel'}, {'frequency': 'f', 'synset': 'towel_rack.n.01', 'synonyms': ['towel_rack', 'towel_rail', 'towel_bar'], 'id': 1109, 'def': 'a rack consisting of one or more bars on which towels can be hung', 'name': 'towel_rack'}, {'frequency': 'f', 'synset': 'toy.n.03', 'synonyms': ['toy'], 'id': 1110, 'def': 'a device regarded as providing amusement', 'name': 'toy'}, {'frequency': 'c', 'synset': 'tractor.n.01', 'synonyms': ['tractor_(farm_equipment)'], 'id': 1111, 'def': 'a wheeled vehicle with large wheels; used in farming and other applications', 'name': 'tractor_(farm_equipment)'}, {'frequency': 'f', 'synset': 'traffic_light.n.01', 'synonyms': ['traffic_light'], 'id': 1112, 'def': 'a device to control vehicle traffic often consisting of three or more lights', 'name': 'traffic_light'}, {'frequency': 'c', 'synset': 'trail_bike.n.01', 'synonyms': ['dirt_bike'], 'id': 1113, 'def': 'a lightweight motorcycle equipped with rugged tires and suspension for off-road use', 'name': 'dirt_bike'}, {'frequency': 'f', 'synset': 'trailer_truck.n.01', 'synonyms': ['trailer_truck', 'tractor_trailer', 'trucking_rig', 'articulated_lorry', 'semi_truck'], 'id': 1114, 'def': 'a truck consisting of a tractor and trailer together', 'name': 'trailer_truck'}, {'frequency': 'f', 'synset': 'train.n.01', 'synonyms': ['train_(railroad_vehicle)', 'railroad_train'], 'id': 1115, 'def': 'public or private transport provided by a line of railway cars coupled together and drawn by a locomotive', 'name': 'train_(railroad_vehicle)'}, {'frequency': 'r', 'synset': 'trampoline.n.01', 'synonyms': ['trampoline'], 'id': 1116, 'def': 'gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame', 'name': 'trampoline'}, {'frequency': 'f', 'synset': 'tray.n.01', 'synonyms': ['tray'], 'id': 1117, 'def': 'an open receptacle for holding or displaying or serving articles or food', 'name': 'tray'}, {'frequency': 'r', 'synset': 'trench_coat.n.01', 'synonyms': ['trench_coat'], 'id': 1118, 'def': 'a military style raincoat; belted with deep pockets', 'name': 'trench_coat'}, {'frequency': 'r', 'synset': 'triangle.n.05', 'synonyms': ['triangle_(musical_instrument)'], 'id': 1119, 'def': 'a percussion instrument consisting of a metal bar bent in the shape of an open triangle', 'name': 'triangle_(musical_instrument)'}, {'frequency': 'c', 'synset': 'tricycle.n.01', 'synonyms': ['tricycle'], 'id': 1120, 'def': 'a vehicle with three wheels that is moved by foot pedals', 'name': 'tricycle'}, {'frequency': 'f', 'synset': 'tripod.n.01', 'synonyms': ['tripod'], 'id': 1121, 'def': 'a three-legged rack used for support', 'name': 'tripod'}, {'frequency': 'f', 'synset': 'trouser.n.01', 'synonyms': ['trousers', 'pants_(clothing)'], 'id': 1122, 'def': 'a garment extending from the waist to the knee or ankle, covering each leg separately', 'name': 'trousers'}, {'frequency': 'f', 'synset': 'truck.n.01', 'synonyms': ['truck'], 'id': 1123, 'def': 'an automotive vehicle suitable for hauling', 'name': 'truck'}, {'frequency': 'r', 'synset': 'truffle.n.03', 'synonyms': ['truffle_(chocolate)', 'chocolate_truffle'], 'id': 1124, 'def': 'creamy chocolate candy', 'name': 'truffle_(chocolate)'}, {'frequency': 'c', 'synset': 'trunk.n.02', 'synonyms': ['trunk'], 'id': 1125, 'def': 'luggage consisting of a large strong case used when traveling or for storage', 'name': 'trunk'}, {'frequency': 'r', 'synset': 'tub.n.02', 'synonyms': ['vat'], 'id': 1126, 'def': 'a large vessel for holding or storing liquids', 'name': 'vat'}, {'frequency': 'c', 'synset': 'turban.n.01', 'synonyms': ['turban'], 'id': 1127, 'def': 'a traditional headdress consisting of a long scarf wrapped around the head', 'name': 'turban'}, {'frequency': 'c', 'synset': 'turkey.n.04', 'synonyms': ['turkey_(food)'], 'id': 1128, 'def': 'flesh of large domesticated fowl usually roasted', 'name': 'turkey_(food)'}, {'frequency': 'r', 'synset': 'turnip.n.01', 'synonyms': ['turnip'], 'id': 1129, 'def': 'widely cultivated plant having a large fleshy edible white or yellow root', 'name': 'turnip'}, {'frequency': 'c', 'synset': 'turtle.n.02', 'synonyms': ['turtle'], 'id': 1130, 'def': 'any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming', 'name': 'turtle'}, {'frequency': 'c', 'synset': 'turtleneck.n.01', 'synonyms': ['turtleneck_(clothing)', 'polo-neck'], 'id': 1131, 'def': 'a sweater or jersey with a high close-fitting collar', 'name': 'turtleneck_(clothing)'}, {'frequency': 'c', 'synset': 'typewriter.n.01', 'synonyms': ['typewriter'], 'id': 1132, 'def': 'hand-operated character printer for printing written messages one character at a time', 'name': 'typewriter'}, {'frequency': 'f', 'synset': 'umbrella.n.01', 'synonyms': ['umbrella'], 'id': 1133, 'def': 'a lightweight handheld collapsible canopy', 'name': 'umbrella'}, {'frequency': 'f', 'synset': 'underwear.n.01', 'synonyms': ['underwear', 'underclothes', 'underclothing', 'underpants'], 'id': 1134, 'def': 'undergarment worn next to the skin and under the outer garments', 'name': 'underwear'}, {'frequency': 'r', 'synset': 'unicycle.n.01', 'synonyms': ['unicycle'], 'id': 1135, 'def': 'a vehicle with a single wheel that is driven by pedals', 'name': 'unicycle'}, {'frequency': 'f', 'synset': 'urinal.n.01', 'synonyms': ['urinal'], 'id': 1136, 'def': 'a plumbing fixture (usually attached to the wall) used by men to urinate', 'name': 'urinal'}, {'frequency': 'c', 'synset': 'urn.n.01', 'synonyms': ['urn'], 'id': 1137, 'def': 'a large vase that usually has a pedestal or feet', 'name': 'urn'}, {'frequency': 'c', 'synset': 'vacuum.n.04', 'synonyms': ['vacuum_cleaner'], 'id': 1138, 'def': 'an electrical home appliance that cleans by suction', 'name': 'vacuum_cleaner'}, {'frequency': 'f', 'synset': 'vase.n.01', 'synonyms': ['vase'], 'id': 1139, 'def': 'an open jar of glass or porcelain used as an ornament or to hold flowers', 'name': 'vase'}, {'frequency': 'c', 'synset': 'vending_machine.n.01', 'synonyms': ['vending_machine'], 'id': 1140, 'def': 'a slot machine for selling goods', 'name': 'vending_machine'}, {'frequency': 'f', 'synset': 'vent.n.01', 'synonyms': ['vent', 'blowhole', 'air_vent'], 'id': 1141, 'def': 'a hole for the escape of gas or air', 'name': 'vent'}, {'frequency': 'f', 'synset': 'vest.n.01', 'synonyms': ['vest', 'waistcoat'], 'id': 1142, 'def': "a man's sleeveless garment worn underneath a coat", 'name': 'vest'}, {'frequency': 'c', 'synset': 'videotape.n.01', 'synonyms': ['videotape'], 'id': 1143, 'def': 'a video recording made on magnetic tape', 'name': 'videotape'}, {'frequency': 'r', 'synset': 'vinegar.n.01', 'synonyms': ['vinegar'], 'id': 1144, 'def': 'sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative', 'name': 'vinegar'}, {'frequency': 'r', 'synset': 'violin.n.01', 'synonyms': ['violin', 'fiddle'], 'id': 1145, 'def': 'bowed stringed instrument that is the highest member of the violin family', 'name': 'violin'}, {'frequency': 'r', 'synset': 'vodka.n.01', 'synonyms': ['vodka'], 'id': 1146, 'def': 'unaged colorless liquor originating in Russia', 'name': 'vodka'}, {'frequency': 'c', 'synset': 'volleyball.n.02', 'synonyms': ['volleyball'], 'id': 1147, 'def': 'an inflated ball used in playing volleyball', 'name': 'volleyball'}, {'frequency': 'r', 'synset': 'vulture.n.01', 'synonyms': ['vulture'], 'id': 1148, 'def': 'any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion', 'name': 'vulture'}, {'frequency': 'c', 'synset': 'waffle.n.01', 'synonyms': ['waffle'], 'id': 1149, 'def': 'pancake batter baked in a waffle iron', 'name': 'waffle'}, {'frequency': 'r', 'synset': 'waffle_iron.n.01', 'synonyms': ['waffle_iron'], 'id': 1150, 'def': 'a kitchen appliance for baking waffles', 'name': 'waffle_iron'}, {'frequency': 'c', 'synset': 'wagon.n.01', 'synonyms': ['wagon'], 'id': 1151, 'def': 'any of various kinds of wheeled vehicles drawn by an animal or a tractor', 'name': 'wagon'}, {'frequency': 'c', 'synset': 'wagon_wheel.n.01', 'synonyms': ['wagon_wheel'], 'id': 1152, 'def': 'a wheel of a wagon', 'name': 'wagon_wheel'}, {'frequency': 'c', 'synset': 'walking_stick.n.01', 'synonyms': ['walking_stick'], 'id': 1153, 'def': 'a stick carried in the hand for support in walking', 'name': 'walking_stick'}, {'frequency': 'c', 'synset': 'wall_clock.n.01', 'synonyms': ['wall_clock'], 'id': 1154, 'def': 'a clock mounted on a wall', 'name': 'wall_clock'}, {'frequency': 'f', 'synset': 'wall_socket.n.01', 'synonyms': ['wall_socket', 'wall_plug', 'electric_outlet', 'electrical_outlet', 'outlet', 'electric_receptacle'], 'id': 1155, 'def': 'receptacle providing a place in a wiring system where current can be taken to run electrical devices', 'name': 'wall_socket'}, {'frequency': 'f', 'synset': 'wallet.n.01', 'synonyms': ['wallet', 'billfold'], 'id': 1156, 'def': 'a pocket-size case for holding papers and paper money', 'name': 'wallet'}, {'frequency': 'r', 'synset': 'walrus.n.01', 'synonyms': ['walrus'], 'id': 1157, 'def': 'either of two large northern marine mammals having ivory tusks and tough hide over thick blubber', 'name': 'walrus'}, {'frequency': 'r', 'synset': 'wardrobe.n.01', 'synonyms': ['wardrobe'], 'id': 1158, 'def': 'a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes', 'name': 'wardrobe'}, {'frequency': 'r', 'synset': 'washbasin.n.01', 'synonyms': ['washbasin', 'basin_(for_washing)', 'washbowl', 'washstand', 'handbasin'], 'id': 1159, 'def': 'a bathroom sink that is permanently installed and connected to a water supply and drainpipe; where you can wash your hands and face', 'name': 'washbasin'}, {'frequency': 'c', 'synset': 'washer.n.03', 'synonyms': ['automatic_washer', 'washing_machine'], 'id': 1160, 'def': 'a home appliance for washing clothes and linens automatically', 'name': 'automatic_washer'}, {'frequency': 'f', 'synset': 'watch.n.01', 'synonyms': ['watch', 'wristwatch'], 'id': 1161, 'def': 'a small, portable timepiece', 'name': 'watch'}, {'frequency': 'f', 'synset': 'water_bottle.n.01', 'synonyms': ['water_bottle'], 'id': 1162, 'def': 'a bottle for holding water', 'name': 'water_bottle'}, {'frequency': 'c', 'synset': 'water_cooler.n.01', 'synonyms': ['water_cooler'], 'id': 1163, 'def': 'a device for cooling and dispensing drinking water', 'name': 'water_cooler'}, {'frequency': 'c', 'synset': 'water_faucet.n.01', 'synonyms': ['water_faucet', 'water_tap', 'tap_(water_faucet)'], 'id': 1164, 'def': 'a faucet for drawing water from a pipe or cask', 'name': 'water_faucet'}, {'frequency': 'r', 'synset': 'water_heater.n.01', 'synonyms': ['water_heater', 'hot-water_heater'], 'id': 1165, 'def': 'a heater and storage tank to supply heated water', 'name': 'water_heater'}, {'frequency': 'c', 'synset': 'water_jug.n.01', 'synonyms': ['water_jug'], 'id': 1166, 'def': 'a jug that holds water', 'name': 'water_jug'}, {'frequency': 'r', 'synset': 'water_pistol.n.01', 'synonyms': ['water_gun', 'squirt_gun'], 'id': 1167, 'def': 'plaything consisting of a toy pistol that squirts water', 'name': 'water_gun'}, {'frequency': 'c', 'synset': 'water_scooter.n.01', 'synonyms': ['water_scooter', 'sea_scooter', 'jet_ski'], 'id': 1168, 'def': 'a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)', 'name': 'water_scooter'}, {'frequency': 'c', 'synset': 'water_ski.n.01', 'synonyms': ['water_ski'], 'id': 1169, 'def': 'broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)', 'name': 'water_ski'}, {'frequency': 'c', 'synset': 'water_tower.n.01', 'synonyms': ['water_tower'], 'id': 1170, 'def': 'a large reservoir for water', 'name': 'water_tower'}, {'frequency': 'c', 'synset': 'watering_can.n.01', 'synonyms': ['watering_can'], 'id': 1171, 'def': 'a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants', 'name': 'watering_can'}, {'frequency': 'f', 'synset': 'watermelon.n.02', 'synonyms': ['watermelon'], 'id': 1172, 'def': 'large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp', 'name': 'watermelon'}, {'frequency': 'f', 'synset': 'weathervane.n.01', 'synonyms': ['weathervane', 'vane_(weathervane)', 'wind_vane'], 'id': 1173, 'def': 'mechanical device attached to an elevated structure; rotates freely to show the direction of the wind', 'name': 'weathervane'}, {'frequency': 'c', 'synset': 'webcam.n.01', 'synonyms': ['webcam'], 'id': 1174, 'def': 'a digital camera designed to take digital photographs and transmit them over the internet', 'name': 'webcam'}, {'frequency': 'c', 'synset': 'wedding_cake.n.01', 'synonyms': ['wedding_cake', 'bridecake'], 'id': 1175, 'def': 'a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception', 'name': 'wedding_cake'}, {'frequency': 'c', 'synset': 'wedding_ring.n.01', 'synonyms': ['wedding_ring', 'wedding_band'], 'id': 1176, 'def': 'a ring given to the bride and/or groom at the wedding', 'name': 'wedding_ring'}, {'frequency': 'f', 'synset': 'wet_suit.n.01', 'synonyms': ['wet_suit'], 'id': 1177, 'def': 'a close-fitting garment made of a permeable material; worn in cold water to retain body heat', 'name': 'wet_suit'}, {'frequency': 'f', 'synset': 'wheel.n.01', 'synonyms': ['wheel'], 'id': 1178, 'def': 'a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle', 'name': 'wheel'}, {'frequency': 'c', 'synset': 'wheelchair.n.01', 'synonyms': ['wheelchair'], 'id': 1179, 'def': 'a movable chair mounted on large wheels', 'name': 'wheelchair'}, {'frequency': 'c', 'synset': 'whipped_cream.n.01', 'synonyms': ['whipped_cream'], 'id': 1180, 'def': 'cream that has been beaten until light and fluffy', 'name': 'whipped_cream'}, {'frequency': 'c', 'synset': 'whistle.n.03', 'synonyms': ['whistle'], 'id': 1181, 'def': 'a small wind instrument that produces a whistling sound by blowing into it', 'name': 'whistle'}, {'frequency': 'c', 'synset': 'wig.n.01', 'synonyms': ['wig'], 'id': 1182, 'def': 'hairpiece covering the head and made of real or synthetic hair', 'name': 'wig'}, {'frequency': 'c', 'synset': 'wind_chime.n.01', 'synonyms': ['wind_chime'], 'id': 1183, 'def': 'a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle', 'name': 'wind_chime'}, {'frequency': 'c', 'synset': 'windmill.n.01', 'synonyms': ['windmill'], 'id': 1184, 'def': 'A mill or turbine that is powered by wind', 'name': 'windmill'}, {'frequency': 'c', 'synset': 'window_box.n.01', 'synonyms': ['window_box_(for_plants)'], 'id': 1185, 'def': 'a container for growing plants on a windowsill', 'name': 'window_box_(for_plants)'}, {'frequency': 'f', 'synset': 'windshield_wiper.n.01', 'synonyms': ['windshield_wiper', 'windscreen_wiper', 'wiper_(for_windshield/screen)'], 'id': 1186, 'def': 'a mechanical device that cleans the windshield', 'name': 'windshield_wiper'}, {'frequency': 'c', 'synset': 'windsock.n.01', 'synonyms': ['windsock', 'air_sock', 'air-sleeve', 'wind_sleeve', 'wind_cone'], 'id': 1187, 'def': 'a truncated cloth cone mounted on a mast/pole; shows wind direction', 'name': 'windsock'}, {'frequency': 'f', 'synset': 'wine_bottle.n.01', 'synonyms': ['wine_bottle'], 'id': 1188, 'def': 'a bottle for holding wine', 'name': 'wine_bottle'}, {'frequency': 'c', 'synset': 'wine_bucket.n.01', 'synonyms': ['wine_bucket', 'wine_cooler'], 'id': 1189, 'def': 'a bucket of ice used to chill a bottle of wine', 'name': 'wine_bucket'}, {'frequency': 'f', 'synset': 'wineglass.n.01', 'synonyms': ['wineglass'], 'id': 1190, 'def': 'a glass that has a stem and in which wine is served', 'name': 'wineglass'}, {'frequency': 'f', 'synset': 'winker.n.02', 'synonyms': ['blinder_(for_horses)'], 'id': 1191, 'def': 'blinds that prevent a horse from seeing something on either side', 'name': 'blinder_(for_horses)'}, {'frequency': 'c', 'synset': 'wok.n.01', 'synonyms': ['wok'], 'id': 1192, 'def': 'pan with a convex bottom; used for frying in Chinese cooking', 'name': 'wok'}, {'frequency': 'r', 'synset': 'wolf.n.01', 'synonyms': ['wolf'], 'id': 1193, 'def': 'a wild carnivorous mammal of the dog family, living and hunting in packs', 'name': 'wolf'}, {'frequency': 'c', 'synset': 'wooden_spoon.n.02', 'synonyms': ['wooden_spoon'], 'id': 1194, 'def': 'a spoon made of wood', 'name': 'wooden_spoon'}, {'frequency': 'c', 'synset': 'wreath.n.01', 'synonyms': ['wreath'], 'id': 1195, 'def': 'an arrangement of flowers, leaves, or stems fastened in a ring', 'name': 'wreath'}, {'frequency': 'c', 'synset': 'wrench.n.03', 'synonyms': ['wrench', 'spanner'], 'id': 1196, 'def': 'a hand tool that is used to hold or twist a nut or bolt', 'name': 'wrench'}, {'frequency': 'f', 'synset': 'wristband.n.01', 'synonyms': ['wristband'], 'id': 1197, 'def': 'band consisting of a part of a sleeve that covers the wrist', 'name': 'wristband'}, {'frequency': 'f', 'synset': 'wristlet.n.01', 'synonyms': ['wristlet', 'wrist_band'], 'id': 1198, 'def': 'a band or bracelet worn around the wrist', 'name': 'wristlet'}, {'frequency': 'c', 'synset': 'yacht.n.01', 'synonyms': ['yacht'], 'id': 1199, 'def': 'an expensive vessel propelled by sail or power and used for cruising or racing', 'name': 'yacht'}, {'frequency': 'c', 'synset': 'yogurt.n.01', 'synonyms': ['yogurt', 'yoghurt', 'yoghourt'], 'id': 1200, 'def': 'a custard-like food made from curdled milk', 'name': 'yogurt'}, {'frequency': 'c', 'synset': 'yoke.n.07', 'synonyms': ['yoke_(animal_equipment)'], 'id': 1201, 'def': 'gear joining two animals at the neck; NOT egg yolk', 'name': 'yoke_(animal_equipment)'}, {'frequency': 'f', 'synset': 'zebra.n.01', 'synonyms': ['zebra'], 'id': 1202, 'def': 'any of several fleet black-and-white striped African equines', 'name': 'zebra'}, {'frequency': 'c', 'synset': 'zucchini.n.02', 'synonyms': ['zucchini', 'courgette'], 'id': 1203, 'def': 'small cucumber-shaped vegetable marrow; typically dark green', 'name': 'zucchini'}] # noqa
+# fmt: on
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/pascal_voc.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/pascal_voc.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbbf82cb96442bfa0cf05ed0f4dddf3645434b7e
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/pascal_voc.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import numpy as np
+import os
+import xml.etree.ElementTree as ET
+from typing import List, Tuple, Union
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from detectron2.structures import BoxMode
+from detectron2.utils.file_io import PathManager
+
+__all__ = ["load_voc_instances", "register_pascal_voc"]
+
+
+# fmt: off
+CLASS_NAMES = (
+ "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
+ "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
+ "pottedplant", "sheep", "sofa", "train", "tvmonitor"
+)
+# fmt: on
+
+
+def load_voc_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]):
+ """
+ Load Pascal VOC detection annotations to Detectron2 format.
+
+ Args:
+ dirname: Contain "Annotations", "ImageSets", "JPEGImages"
+ split (str): one of "train", "test", "val", "trainval"
+ class_names: list or tuple of class names
+ """
+ with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f:
+ fileids = np.loadtxt(f, dtype=np.str)
+
+ # Needs to read many small annotation files. Makes sense at local
+ annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/"))
+ dicts = []
+ for fileid in fileids:
+ anno_file = os.path.join(annotation_dirname, fileid + ".xml")
+ jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg")
+
+ with PathManager.open(anno_file) as f:
+ tree = ET.parse(f)
+
+ r = {
+ "file_name": jpeg_file,
+ "image_id": fileid,
+ "height": int(tree.findall("./size/height")[0].text),
+ "width": int(tree.findall("./size/width")[0].text),
+ }
+ instances = []
+
+ for obj in tree.findall("object"):
+ cls = obj.find("name").text
+ # We include "difficult" samples in training.
+ # Based on limited experiments, they don't hurt accuracy.
+ # difficult = int(obj.find("difficult").text)
+ # if difficult == 1:
+ # continue
+ bbox = obj.find("bndbox")
+ bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
+ # Original annotations are integers in the range [1, W or H]
+ # Assuming they mean 1-based pixel indices (inclusive),
+ # a box with annotation (xmin=1, xmax=W) covers the whole image.
+ # In coordinate space this is represented by (xmin=0, xmax=W)
+ bbox[0] -= 1.0
+ bbox[1] -= 1.0
+ instances.append(
+ {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
+ )
+ r["annotations"] = instances
+ dicts.append(r)
+ return dicts
+
+
+def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES):
+ DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names))
+ MetadataCatalog.get(name).set(
+ thing_classes=list(class_names), dirname=dirname, year=year, split=split
+ )
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/register_coco.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/register_coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..e564438d5bf016bcdbb65b4bbdc215d79f579f8a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/register_coco.py
@@ -0,0 +1,3 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .coco import register_coco_instances # noqa
+from .coco_panoptic import register_coco_panoptic_separated # noqa
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/detection_utils.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/detection_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..2707eb430f4474c4a8a8968e5bf4caf2124d9f36
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/detection_utils.py
@@ -0,0 +1,623 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+"""
+Common data processing utilities that are used in a
+typical object detection data pipeline.
+"""
+import logging
+import numpy as np
+from typing import List, Union
+import pycocotools.mask as mask_util
+import torch
+from PIL import Image
+
+from detectron2.structures import (
+ BitMasks,
+ Boxes,
+ BoxMode,
+ Instances,
+ Keypoints,
+ PolygonMasks,
+ RotatedBoxes,
+ polygons_to_bitmask,
+)
+from detectron2.utils.file_io import PathManager
+
+from . import transforms as T
+from .catalog import MetadataCatalog
+
+__all__ = [
+ "SizeMismatchError",
+ "convert_image_to_rgb",
+ "check_image_size",
+ "transform_proposals",
+ "transform_instance_annotations",
+ "annotations_to_instances",
+ "annotations_to_instances_rotated",
+ "build_augmentation",
+ "build_transform_gen",
+ "create_keypoint_hflip_indices",
+ "filter_empty_instances",
+ "read_image",
+]
+
+
+class SizeMismatchError(ValueError):
+ """
+ When loaded image has difference width/height compared with annotation.
+ """
+
+
+# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
+_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
+_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
+
+# https://www.exiv2.org/tags.html
+_EXIF_ORIENT = 274 # exif 'Orientation' tag
+
+
+def convert_PIL_to_numpy(image, format):
+ """
+ Convert PIL image to numpy array of target format.
+
+ Args:
+ image (PIL.Image): a PIL image
+ format (str): the format of output image
+
+ Returns:
+ (np.ndarray): also see `read_image`
+ """
+ if format is not None:
+ # PIL only supports RGB, so convert to RGB and flip channels over below
+ conversion_format = format
+ if format in ["BGR", "YUV-BT.601"]:
+ conversion_format = "RGB"
+ image = image.convert(conversion_format)
+ image = np.asarray(image)
+ # PIL squeezes out the channel dimension for "L", so make it HWC
+ if format == "L":
+ image = np.expand_dims(image, -1)
+
+ # handle formats not supported by PIL
+ elif format == "BGR":
+ # flip channels if needed
+ image = image[:, :, ::-1]
+ elif format == "YUV-BT.601":
+ image = image / 255.0
+ image = np.dot(image, np.array(_M_RGB2YUV).T)
+
+ return image
+
+
+def convert_image_to_rgb(image, format):
+ """
+ Convert an image from given format to RGB.
+
+ Args:
+ image (np.ndarray or Tensor): an HWC image
+ format (str): the format of input image, also see `read_image`
+
+ Returns:
+ (np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
+ """
+ if isinstance(image, torch.Tensor):
+ image = image.cpu().numpy()
+ if format == "BGR":
+ image = image[:, :, [2, 1, 0]]
+ elif format == "YUV-BT.601":
+ image = np.dot(image, np.array(_M_YUV2RGB).T)
+ image = image * 255.0
+ else:
+ if format == "L":
+ image = image[:, :, 0]
+ image = image.astype(np.uint8)
+ image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
+ return image
+
+
+def _apply_exif_orientation(image):
+ """
+ Applies the exif orientation correctly.
+
+ This code exists per the bug:
+ https://github.com/python-pillow/Pillow/issues/3973
+ with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
+ various methods, especially `tobytes`
+
+ Function based on:
+ https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
+ https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
+
+ Args:
+ image (PIL.Image): a PIL image
+
+ Returns:
+ (PIL.Image): the PIL image with exif orientation applied, if applicable
+ """
+ if not hasattr(image, "getexif"):
+ return image
+
+ try:
+ exif = image.getexif()
+ except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
+ exif = None
+
+ if exif is None:
+ return image
+
+ orientation = exif.get(_EXIF_ORIENT)
+
+ method = {
+ 2: Image.FLIP_LEFT_RIGHT,
+ 3: Image.ROTATE_180,
+ 4: Image.FLIP_TOP_BOTTOM,
+ 5: Image.TRANSPOSE,
+ 6: Image.ROTATE_270,
+ 7: Image.TRANSVERSE,
+ 8: Image.ROTATE_90,
+ }.get(orientation)
+
+ if method is not None:
+ return image.transpose(method)
+ return image
+
+
+def read_image(file_name, format=None):
+ """
+ Read an image into the given format.
+ Will apply rotation and flipping if the image has such exif information.
+
+ Args:
+ file_name (str): image file path
+ format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
+
+ Returns:
+ image (np.ndarray):
+ an HWC image in the given format, which is 0-255, uint8 for
+ supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
+ """
+ with PathManager.open(file_name, "rb") as f:
+ image = Image.open(f)
+
+ # work around this bug: https://github.com/python-pillow/Pillow/issues/3973
+ image = _apply_exif_orientation(image)
+ return convert_PIL_to_numpy(image, format)
+
+
+def check_image_size(dataset_dict, image):
+ """
+ Raise an error if the image does not match the size specified in the dict.
+ """
+ if "width" in dataset_dict or "height" in dataset_dict:
+ image_wh = (image.shape[1], image.shape[0])
+ expected_wh = (dataset_dict["width"], dataset_dict["height"])
+ if not image_wh == expected_wh:
+ raise SizeMismatchError(
+ "Mismatched image shape{}, got {}, expect {}.".format(
+ " for image " + dataset_dict["file_name"]
+ if "file_name" in dataset_dict
+ else "",
+ image_wh,
+ expected_wh,
+ )
+ + " Please check the width/height in your annotation."
+ )
+
+ # To ensure bbox always remap to original image size
+ if "width" not in dataset_dict:
+ dataset_dict["width"] = image.shape[1]
+ if "height" not in dataset_dict:
+ dataset_dict["height"] = image.shape[0]
+
+
+def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
+ """
+ Apply transformations to the proposals in dataset_dict, if any.
+
+ Args:
+ dataset_dict (dict): a dict read from the dataset, possibly
+ contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
+ image_shape (tuple): height, width
+ transforms (TransformList):
+ proposal_topk (int): only keep top-K scoring proposals
+ min_box_size (int): proposals with either side smaller than this
+ threshold are removed
+
+ The input dict is modified in-place, with abovementioned keys removed. A new
+ key "proposals" will be added. Its value is an `Instances`
+ object which contains the transformed proposals in its field
+ "proposal_boxes" and "objectness_logits".
+ """
+ if "proposal_boxes" in dataset_dict:
+ # Transform proposal boxes
+ boxes = transforms.apply_box(
+ BoxMode.convert(
+ dataset_dict.pop("proposal_boxes"),
+ dataset_dict.pop("proposal_bbox_mode"),
+ BoxMode.XYXY_ABS,
+ )
+ )
+ boxes = Boxes(boxes)
+ objectness_logits = torch.as_tensor(
+ dataset_dict.pop("proposal_objectness_logits").astype("float32")
+ )
+
+ boxes.clip(image_shape)
+ keep = boxes.nonempty(threshold=min_box_size)
+ boxes = boxes[keep]
+ objectness_logits = objectness_logits[keep]
+
+ proposals = Instances(image_shape)
+ proposals.proposal_boxes = boxes[:proposal_topk]
+ proposals.objectness_logits = objectness_logits[:proposal_topk]
+ dataset_dict["proposals"] = proposals
+
+
+def transform_instance_annotations(
+ annotation, transforms, image_size, *, keypoint_hflip_indices=None
+):
+ """
+ Apply transforms to box, segmentation and keypoints annotations of a single instance.
+
+ It will use `transforms.apply_box` for the box, and
+ `transforms.apply_coords` for segmentation polygons & keypoints.
+ If you need anything more specially designed for each data structure,
+ you'll need to implement your own version of this function or the transforms.
+
+ Args:
+ annotation (dict): dict of instance annotations for a single instance.
+ It will be modified in-place.
+ transforms (TransformList or list[Transform]):
+ image_size (tuple): the height, width of the transformed image
+ keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
+
+ Returns:
+ dict:
+ the same input dict with fields "bbox", "segmentation", "keypoints"
+ transformed according to `transforms`.
+ The "bbox_mode" field will be set to XYXY_ABS.
+ """
+ if isinstance(transforms, (tuple, list)):
+ transforms = T.TransformList(transforms)
+ # bbox is 1d (per-instance bounding box)
+ bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
+ # clip transformed bbox to image size
+ bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0)
+ annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1])
+ annotation["bbox_mode"] = BoxMode.XYXY_ABS
+
+ if "segmentation" in annotation:
+ # each instance contains 1 or more polygons
+ segm = annotation["segmentation"]
+ if isinstance(segm, list):
+ # polygons
+ polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
+ annotation["segmentation"] = [
+ p.reshape(-1) for p in transforms.apply_polygons(polygons)
+ ]
+ elif isinstance(segm, dict):
+ # RLE
+ mask = mask_util.decode(segm)
+ mask = transforms.apply_segmentation(mask)
+ assert tuple(mask.shape[:2]) == image_size
+ annotation["segmentation"] = mask
+ else:
+ raise ValueError(
+ "Cannot transform segmentation of type '{}'!"
+ "Supported types are: polygons as list[list[float] or ndarray],"
+ " COCO-style RLE as a dict.".format(type(segm))
+ )
+
+ if "keypoints" in annotation:
+ keypoints = transform_keypoint_annotations(
+ annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
+ )
+ annotation["keypoints"] = keypoints
+
+ return annotation
+
+
+def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
+ """
+ Transform keypoint annotations of an image.
+ If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0)
+
+ Args:
+ keypoints (list[float]): Nx3 float in Detectron2's Dataset format.
+ Each point is represented by (x, y, visibility).
+ transforms (TransformList):
+ image_size (tuple): the height, width of the transformed image
+ keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
+ When `transforms` includes horizontal flip, will use the index
+ mapping to flip keypoints.
+ """
+ # (N*3,) -> (N, 3)
+ keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3)
+ keypoints_xy = transforms.apply_coords(keypoints[:, :2])
+
+ # Set all out-of-boundary points to "unlabeled"
+ inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1]))
+ inside = inside.all(axis=1)
+ keypoints[:, :2] = keypoints_xy
+ keypoints[:, 2][~inside] = 0
+
+ # This assumes that HorizFlipTransform is the only one that does flip
+ do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
+
+ # Alternative way: check if probe points was horizontally flipped.
+ # probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])
+ # probe_aug = transforms.apply_coords(probe.copy())
+ # do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa
+
+ # If flipped, swap each keypoint with its opposite-handed equivalent
+ if do_hflip:
+ if keypoint_hflip_indices is None:
+ raise ValueError("Cannot flip keypoints without providing flip indices!")
+ if len(keypoints) != len(keypoint_hflip_indices):
+ raise ValueError(
+ "Keypoint data has {} points, but metadata "
+ "contains {} points!".format(len(keypoints), len(keypoint_hflip_indices))
+ )
+ keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :]
+
+ # Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0
+ keypoints[keypoints[:, 2] == 0] = 0
+ return keypoints
+
+
+def annotations_to_instances(annos, image_size, mask_format="polygon"):
+ """
+ Create an :class:`Instances` object used by the models,
+ from instance annotations in the dataset dict.
+
+ Args:
+ annos (list[dict]): a list of instance annotations in one image, each
+ element for one instance.
+ image_size (tuple): height, width
+
+ Returns:
+ Instances:
+ It will contain fields "gt_boxes", "gt_classes",
+ "gt_masks", "gt_keypoints", if they can be obtained from `annos`.
+ This is the format that builtin models expect.
+ """
+ boxes = (
+ np.stack(
+ [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
+ )
+ if len(annos)
+ else np.zeros((0, 4))
+ )
+ target = Instances(image_size)
+ target.gt_boxes = Boxes(boxes)
+
+ classes = [int(obj["category_id"]) for obj in annos]
+ classes = torch.tensor(classes, dtype=torch.int64)
+ target.gt_classes = classes
+
+ if len(annos) and "segmentation" in annos[0]:
+ segms = [obj["segmentation"] for obj in annos]
+ if mask_format == "polygon":
+ try:
+ masks = PolygonMasks(segms)
+ except ValueError as e:
+ raise ValueError(
+ "Failed to use mask_format=='polygon' from the given annotations!"
+ ) from e
+ else:
+ assert mask_format == "bitmask", mask_format
+ masks = []
+ for segm in segms:
+ if isinstance(segm, list):
+ # polygon
+ masks.append(polygons_to_bitmask(segm, *image_size))
+ elif isinstance(segm, dict):
+ # COCO RLE
+ masks.append(mask_util.decode(segm))
+ elif isinstance(segm, np.ndarray):
+ assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
+ segm.ndim
+ )
+ # mask array
+ masks.append(segm)
+ else:
+ raise ValueError(
+ "Cannot convert segmentation of type '{}' to BitMasks!"
+ "Supported types are: polygons as list[list[float] or ndarray],"
+ " COCO-style RLE as a dict, or a binary segmentation mask "
+ " in a 2D numpy array of shape HxW.".format(type(segm))
+ )
+ # torch.from_numpy does not support array with negative stride.
+ masks = BitMasks(
+ torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
+ )
+ target.gt_masks = masks
+
+ if len(annos) and "keypoints" in annos[0]:
+ kpts = [obj.get("keypoints", []) for obj in annos]
+ target.gt_keypoints = Keypoints(kpts)
+
+ return target
+
+
+def annotations_to_instances_rotated(annos, image_size):
+ """
+ Create an :class:`Instances` object used by the models,
+ from instance annotations in the dataset dict.
+ Compared to `annotations_to_instances`, this function is for rotated boxes only
+
+ Args:
+ annos (list[dict]): a list of instance annotations in one image, each
+ element for one instance.
+ image_size (tuple): height, width
+
+ Returns:
+ Instances:
+ Containing fields "gt_boxes", "gt_classes",
+ if they can be obtained from `annos`.
+ This is the format that builtin models expect.
+ """
+ boxes = [obj["bbox"] for obj in annos]
+ target = Instances(image_size)
+ boxes = target.gt_boxes = RotatedBoxes(boxes)
+ boxes.clip(image_size)
+
+ classes = [obj["category_id"] for obj in annos]
+ classes = torch.tensor(classes, dtype=torch.int64)
+ target.gt_classes = classes
+
+ return target
+
+
+def filter_empty_instances(
+ instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False
+):
+ """
+ Filter out empty instances in an `Instances` object.
+
+ Args:
+ instances (Instances):
+ by_box (bool): whether to filter out instances with empty boxes
+ by_mask (bool): whether to filter out instances with empty masks
+ box_threshold (float): minimum width and height to be considered non-empty
+ return_mask (bool): whether to return boolean mask of filtered instances
+
+ Returns:
+ Instances: the filtered instances.
+ tensor[bool], optional: boolean mask of filtered instances
+ """
+ assert by_box or by_mask
+ r = []
+ if by_box:
+ r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
+ if instances.has("gt_masks") and by_mask:
+ r.append(instances.gt_masks.nonempty())
+
+ # TODO: can also filter visible keypoints
+
+ if not r:
+ return instances
+ m = r[0]
+ for x in r[1:]:
+ m = m & x
+ if return_mask:
+ return instances[m], m
+ return instances[m]
+
+
+def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]:
+ """
+ Args:
+ dataset_names: list of dataset names
+
+ Returns:
+ list[int]: a list of size=#keypoints, storing the
+ horizontally-flipped keypoint indices.
+ """
+ if isinstance(dataset_names, str):
+ dataset_names = [dataset_names]
+
+ check_metadata_consistency("keypoint_names", dataset_names)
+ check_metadata_consistency("keypoint_flip_map", dataset_names)
+
+ meta = MetadataCatalog.get(dataset_names[0])
+ names = meta.keypoint_names
+ # TODO flip -> hflip
+ flip_map = dict(meta.keypoint_flip_map)
+ flip_map.update({v: k for k, v in flip_map.items()})
+ flipped_names = [i if i not in flip_map else flip_map[i] for i in names]
+ flip_indices = [names.index(i) for i in flipped_names]
+ return flip_indices
+
+
+def gen_crop_transform_with_instance(crop_size, image_size, instance):
+ """
+ Generate a CropTransform so that the cropping region contains
+ the center of the given instance.
+
+ Args:
+ crop_size (tuple): h, w in pixels
+ image_size (tuple): h, w
+ instance (dict): an annotation dict of one instance, in Detectron2's
+ dataset format.
+ """
+ crop_size = np.asarray(crop_size, dtype=np.int32)
+ bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS)
+ center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
+ assert (
+ image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
+ ), "The annotation bounding box is outside of the image!"
+ assert (
+ image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
+ ), "Crop size is larger than image size!"
+
+ min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
+ max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
+ max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
+
+ y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
+ x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
+ return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
+
+
+def check_metadata_consistency(key, dataset_names):
+ """
+ Check that the datasets have consistent metadata.
+
+ Args:
+ key (str): a metadata key
+ dataset_names (list[str]): a list of dataset names
+
+ Raises:
+ AttributeError: if the key does not exist in the metadata
+ ValueError: if the given datasets do not have the same metadata values defined by key
+ """
+ if len(dataset_names) == 0:
+ return
+ logger = logging.getLogger(__name__)
+ entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]
+ for idx, entry in enumerate(entries_per_dataset):
+ if entry != entries_per_dataset[0]:
+ logger.error(
+ "Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry))
+ )
+ logger.error(
+ "Metadata '{}' for dataset '{}' is '{}'".format(
+ key, dataset_names[0], str(entries_per_dataset[0])
+ )
+ )
+ raise ValueError("Datasets have different metadata '{}'!".format(key))
+
+
+def build_augmentation(cfg, is_train):
+ """
+ Create a list of default :class:`Augmentation` from config.
+ Now it includes resizing and flipping.
+
+ Returns:
+ list[Augmentation]
+ """
+ if is_train:
+ min_size = cfg.INPUT.MIN_SIZE_TRAIN
+ max_size = cfg.INPUT.MAX_SIZE_TRAIN
+ sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
+ else:
+ min_size = cfg.INPUT.MIN_SIZE_TEST
+ max_size = cfg.INPUT.MAX_SIZE_TEST
+ sample_style = "choice"
+ augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
+ if is_train and cfg.INPUT.RANDOM_FLIP != "none":
+ augmentation.append(
+ T.RandomFlip(
+ horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
+ vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
+ )
+ )
+ return augmentation
+
+
+build_transform_gen = build_augmentation
+"""
+Alias for backward-compatibility.
+"""
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/samplers/__init__.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/samplers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..85c9f1a9df8a4038fbd4246239b699402e382309
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/samplers/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .distributed_sampler import (
+ InferenceSampler,
+ RandomSubsetTrainingSampler,
+ RepeatFactorTrainingSampler,
+ TrainingSampler,
+)
+
+from .grouped_batch_sampler import GroupedBatchSampler
+
+__all__ = [
+ "GroupedBatchSampler",
+ "TrainingSampler",
+ "RandomSubsetTrainingSampler",
+ "InferenceSampler",
+ "RepeatFactorTrainingSampler",
+]
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/samplers/distributed_sampler.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/samplers/distributed_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..a098e6ac07c1b193fddcb69e6e54aced82e6081c
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/samplers/distributed_sampler.py
@@ -0,0 +1,278 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import itertools
+import logging
+import math
+from collections import defaultdict
+from typing import Optional
+import torch
+from torch.utils.data.sampler import Sampler
+
+from detectron2.utils import comm
+
+logger = logging.getLogger(__name__)
+
+
+class TrainingSampler(Sampler):
+ """
+ In training, we only care about the "infinite stream" of training data.
+ So this sampler produces an infinite stream of indices and
+ all workers cooperate to correctly shuffle the indices and sample different indices.
+
+ The samplers in each worker effectively produces `indices[worker_id::num_workers]`
+ where `indices` is an infinite stream of indices consisting of
+ `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)
+ or `range(size) + range(size) + ...` (if shuffle is False)
+
+ Note that this sampler does not shard based on pytorch DataLoader worker id.
+ A sampler passed to pytorch DataLoader is used only with map-style dataset
+ and will not be executed inside workers.
+ But if this sampler is used in a way that it gets execute inside a dataloader
+ worker, then extra work needs to be done to shard its outputs based on worker id.
+ This is required so that workers don't produce identical data.
+ :class:`ToIterableDataset` implements this logic.
+ This note is true for all samplers in detectron2.
+ """
+
+ def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None):
+ """
+ Args:
+ size (int): the total number of data of the underlying dataset to sample from
+ shuffle (bool): whether to shuffle the indices or not
+ seed (int): the initial seed of the shuffle. Must be the same
+ across all workers. If None, will use a random seed shared
+ among workers (require synchronization among all workers).
+ """
+ if not isinstance(size, int):
+ raise TypeError(f"TrainingSampler(size=) expects an int. Got type {type(size)}.")
+ if size <= 0:
+ raise ValueError(f"TrainingSampler(size=) expects a positive int. Got {size}.")
+ self._size = size
+ self._shuffle = shuffle
+ if seed is None:
+ seed = comm.shared_random_seed()
+ self._seed = int(seed)
+
+ self._rank = comm.get_rank()
+ self._world_size = comm.get_world_size()
+
+ def __iter__(self):
+ start = self._rank
+ yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
+
+ def _infinite_indices(self):
+ g = torch.Generator()
+ g.manual_seed(self._seed)
+ while True:
+ if self._shuffle:
+ yield from torch.randperm(self._size, generator=g).tolist()
+ else:
+ yield from torch.arange(self._size).tolist()
+
+
+class RandomSubsetTrainingSampler(TrainingSampler):
+ """
+ Similar to TrainingSampler, but only sample a random subset of indices.
+ This is useful when you want to estimate the accuracy vs data-number curves by
+ training the model with different subset_ratio.
+ """
+
+ def __init__(
+ self,
+ size: int,
+ subset_ratio: float,
+ shuffle: bool = True,
+ seed_shuffle: Optional[int] = None,
+ seed_subset: Optional[int] = None,
+ ):
+ """
+ Args:
+ size (int): the total number of data of the underlying dataset to sample from
+ subset_ratio (float): the ratio of subset data to sample from the underlying dataset
+ shuffle (bool): whether to shuffle the indices or not
+ seed_shuffle (int): the initial seed of the shuffle. Must be the same
+ across all workers. If None, will use a random seed shared
+ among workers (require synchronization among all workers).
+ seed_subset (int): the seed to randomize the subset to be sampled.
+ Must be the same across all workers. If None, will use a random seed shared
+ among workers (require synchronization among all workers).
+ """
+ super().__init__(size=size, shuffle=shuffle, seed=seed_shuffle)
+
+ assert 0.0 < subset_ratio <= 1.0
+ self._size_subset = int(size * subset_ratio)
+ assert self._size_subset > 0
+ if seed_subset is None:
+ seed_subset = comm.shared_random_seed()
+ self._seed_subset = int(seed_subset)
+
+ # randomly generate the subset indexes to be sampled from
+ g = torch.Generator()
+ g.manual_seed(self._seed_subset)
+ indexes_randperm = torch.randperm(self._size, generator=g)
+ self._indexes_subset = indexes_randperm[: self._size_subset]
+
+ logger.info("Using RandomSubsetTrainingSampler......")
+ logger.info(f"Randomly sample {self._size_subset} data from the original {self._size} data")
+
+ def _infinite_indices(self):
+ g = torch.Generator()
+ g.manual_seed(self._seed) # self._seed equals seed_shuffle from __init__()
+ while True:
+ if self._shuffle:
+ # generate a random permutation to shuffle self._indexes_subset
+ randperm = torch.randperm(self._size_subset, generator=g)
+ yield from self._indexes_subset[randperm].tolist()
+ else:
+ yield from self._indexes_subset.tolist()
+
+
+class RepeatFactorTrainingSampler(Sampler):
+ """
+ Similar to TrainingSampler, but a sample may appear more times than others based
+ on its "repeat factor". This is suitable for training on class imbalanced datasets like LVIS.
+ """
+
+ def __init__(self, repeat_factors, *, shuffle=True, seed=None):
+ """
+ Args:
+ repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's
+ full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.
+ shuffle (bool): whether to shuffle the indices or not
+ seed (int): the initial seed of the shuffle. Must be the same
+ across all workers. If None, will use a random seed shared
+ among workers (require synchronization among all workers).
+ """
+ self._shuffle = shuffle
+ if seed is None:
+ seed = comm.shared_random_seed()
+ self._seed = int(seed)
+
+ self._rank = comm.get_rank()
+ self._world_size = comm.get_world_size()
+
+ # Split into whole number (_int_part) and fractional (_frac_part) parts.
+ self._int_part = torch.trunc(repeat_factors)
+ self._frac_part = repeat_factors - self._int_part
+
+ @staticmethod
+ def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):
+ """
+ Compute (fractional) per-image repeat factors based on category frequency.
+ The repeat factor for an image is a function of the frequency of the rarest
+ category labeled in that image. The "frequency of category c" in [0, 1] is defined
+ as the fraction of images in the training set (without repeats) in which category c
+ appears.
+ See :paper:`lvis` (>= v2) Appendix B.2.
+
+ Args:
+ dataset_dicts (list[dict]): annotations in Detectron2 dataset format.
+ repeat_thresh (float): frequency threshold below which data is repeated.
+ If the frequency is half of `repeat_thresh`, the image will be
+ repeated twice.
+
+ Returns:
+ torch.Tensor:
+ the i-th element is the repeat factor for the dataset image at index i.
+ """
+ # 1. For each category c, compute the fraction of images that contain it: f(c)
+ category_freq = defaultdict(int)
+ for dataset_dict in dataset_dicts: # For each image (without repeats)
+ cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
+ for cat_id in cat_ids:
+ category_freq[cat_id] += 1
+ num_images = len(dataset_dicts)
+ for k, v in category_freq.items():
+ category_freq[k] = v / num_images
+
+ # 2. For each category c, compute the category-level repeat factor:
+ # r(c) = max(1, sqrt(t / f(c)))
+ category_rep = {
+ cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))
+ for cat_id, cat_freq in category_freq.items()
+ }
+
+ # 3. For each image I, compute the image-level repeat factor:
+ # r(I) = max_{c in I} r(c)
+ rep_factors = []
+ for dataset_dict in dataset_dicts:
+ cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
+ rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)
+ rep_factors.append(rep_factor)
+
+ return torch.tensor(rep_factors, dtype=torch.float32)
+
+ def _get_epoch_indices(self, generator):
+ """
+ Create a list of dataset indices (with repeats) to use for one epoch.
+
+ Args:
+ generator (torch.Generator): pseudo random number generator used for
+ stochastic rounding.
+
+ Returns:
+ torch.Tensor: list of dataset indices to use in one epoch. Each index
+ is repeated based on its calculated repeat factor.
+ """
+ # Since repeat factors are fractional, we use stochastic rounding so
+ # that the target repeat factor is achieved in expectation over the
+ # course of training
+ rands = torch.rand(len(self._frac_part), generator=generator)
+ rep_factors = self._int_part + (rands < self._frac_part).float()
+ # Construct a list of indices in which we repeat images as specified
+ indices = []
+ for dataset_index, rep_factor in enumerate(rep_factors):
+ indices.extend([dataset_index] * int(rep_factor.item()))
+ return torch.tensor(indices, dtype=torch.int64)
+
+ def __iter__(self):
+ start = self._rank
+ yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
+
+ def _infinite_indices(self):
+ g = torch.Generator()
+ g.manual_seed(self._seed)
+ while True:
+ # Sample indices with repeats determined by stochastic rounding; each
+ # "epoch" may have a slightly different size due to the rounding.
+ indices = self._get_epoch_indices(g)
+ if self._shuffle:
+ randperm = torch.randperm(len(indices), generator=g)
+ yield from indices[randperm].tolist()
+ else:
+ yield from indices.tolist()
+
+
+class InferenceSampler(Sampler):
+ """
+ Produce indices for inference across all workers.
+ Inference needs to run on the __exact__ set of samples,
+ therefore when the total number of samples is not divisible by the number of workers,
+ this sampler produces different number of samples on different workers.
+ """
+
+ def __init__(self, size: int):
+ """
+ Args:
+ size (int): the total number of data of the underlying dataset to sample from
+ """
+ self._size = size
+ assert size > 0
+ self._rank = comm.get_rank()
+ self._world_size = comm.get_world_size()
+ self._local_indices = self._get_local_indices(size, self._world_size, self._rank)
+
+ @staticmethod
+ def _get_local_indices(total_size, world_size, rank):
+ shard_size = total_size // world_size
+ left = total_size % world_size
+ shard_sizes = [shard_size + int(r < left) for r in range(world_size)]
+
+ begin = sum(shard_sizes[:rank])
+ end = min(sum(shard_sizes[: rank + 1]), total_size)
+ return range(begin, end)
+
+ def __iter__(self):
+ yield from self._local_indices
+
+ def __len__(self):
+ return len(self._local_indices)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/samplers/grouped_batch_sampler.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/samplers/grouped_batch_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b247730aacd04dd0c752664acde3257c4eddd71
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/samplers/grouped_batch_sampler.py
@@ -0,0 +1,47 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import numpy as np
+from torch.utils.data.sampler import BatchSampler, Sampler
+
+
+class GroupedBatchSampler(BatchSampler):
+ """
+ Wraps another sampler to yield a mini-batch of indices.
+ It enforces that the batch only contain elements from the same group.
+ It also tries to provide mini-batches which follows an ordering which is
+ as close as possible to the ordering from the original sampler.
+ """
+
+ def __init__(self, sampler, group_ids, batch_size):
+ """
+ Args:
+ sampler (Sampler): Base sampler.
+ group_ids (list[int]): If the sampler produces indices in range [0, N),
+ `group_ids` must be a list of `N` ints which contains the group id of each sample.
+ The group ids must be a set of integers in the range [0, num_groups).
+ batch_size (int): Size of mini-batch.
+ """
+ if not isinstance(sampler, Sampler):
+ raise ValueError(
+ "sampler should be an instance of "
+ "torch.utils.data.Sampler, but got sampler={}".format(sampler)
+ )
+ self.sampler = sampler
+ self.group_ids = np.asarray(group_ids)
+ assert self.group_ids.ndim == 1
+ self.batch_size = batch_size
+ groups = np.unique(self.group_ids).tolist()
+
+ # buffer the indices of each group until batch size is reached
+ self.buffer_per_group = {k: [] for k in groups}
+
+ def __iter__(self):
+ for idx in self.sampler:
+ group_id = self.group_ids[idx]
+ group_buffer = self.buffer_per_group[group_id]
+ group_buffer.append(idx)
+ if len(group_buffer) == self.batch_size:
+ yield group_buffer[:] # yield a copy of the list
+ del group_buffer[:]
+
+ def __len__(self):
+ raise NotImplementedError("len() of GroupedBatchSampler is not well-defined.")
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/transforms/__init__.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/transforms/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab3c63b5b456a7fb878757e25768a3634f76ae5b
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/transforms/__init__.py
@@ -0,0 +1,14 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from fvcore.transforms.transform import Transform, TransformList # order them first
+from fvcore.transforms.transform import *
+from .transform import *
+from .augmentation import *
+from .augmentation_impl import *
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
+
+
+from detectron2.utils.env import fixup_module_metadata
+
+fixup_module_metadata(__name__, globals(), __all__)
+del fixup_module_metadata
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/transforms/augmentation.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/transforms/augmentation.py
new file mode 100644
index 0000000000000000000000000000000000000000..48be5b1bd66617dfca41f1e915259ffd485bcdd6
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/transforms/augmentation.py
@@ -0,0 +1,377 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import inspect
+import numpy as np
+import pprint
+from typing import Any, List, Optional, Tuple, Union
+from fvcore.transforms.transform import Transform, TransformList
+
+"""
+See "Data Augmentation" tutorial for an overview of the system:
+https://detectron2.readthedocs.io/tutorials/augmentation.html
+"""
+
+
+__all__ = [
+ "Augmentation",
+ "AugmentationList",
+ "AugInput",
+ "TransformGen",
+ "apply_transform_gens",
+ "StandardAugInput",
+ "apply_augmentations",
+]
+
+
+def _check_img_dtype(img):
+ assert isinstance(img, np.ndarray), "[Augmentation] Needs an numpy array, but got a {}!".format(
+ type(img)
+ )
+ assert not isinstance(img.dtype, np.integer) or (
+ img.dtype == np.uint8
+ ), "[Augmentation] Got image of type {}, use uint8 or floating points instead!".format(
+ img.dtype
+ )
+ assert img.ndim in [2, 3], img.ndim
+
+
+def _get_aug_input_args(aug, aug_input) -> List[Any]:
+ """
+ Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``.
+ """
+ if aug.input_args is None:
+ # Decide what attributes are needed automatically
+ prms = list(inspect.signature(aug.get_transform).parameters.items())
+ # The default behavior is: if there is one parameter, then its "image"
+ # (work automatically for majority of use cases, and also avoid BC breaking),
+ # Otherwise, use the argument names.
+ if len(prms) == 1:
+ names = ("image",)
+ else:
+ names = []
+ for name, prm in prms:
+ if prm.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD):
+ raise TypeError(
+ f""" \
+The default implementation of `{type(aug)}.__call__` does not allow \
+`{type(aug)}.get_transform` to use variable-length arguments (*args, **kwargs)! \
+If arguments are unknown, reimplement `__call__` instead. \
+"""
+ )
+ names.append(name)
+ aug.input_args = tuple(names)
+
+ args = []
+ for f in aug.input_args:
+ try:
+ args.append(getattr(aug_input, f))
+ except AttributeError as e:
+ raise AttributeError(
+ f"{type(aug)}.get_transform needs input attribute '{f}', "
+ f"but it is not an attribute of {type(aug_input)}!"
+ ) from e
+ return args
+
+
+class Augmentation:
+ """
+ Augmentation defines (often random) policies/strategies to generate :class:`Transform`
+ from data. It is often used for pre-processing of input data.
+
+ A "policy" that generates a :class:`Transform` may, in the most general case,
+ need arbitrary information from input data in order to determine what transforms
+ to apply. Therefore, each :class:`Augmentation` instance defines the arguments
+ needed by its :meth:`get_transform` method. When called with the positional arguments,
+ the :meth:`get_transform` method executes the policy.
+
+ Note that :class:`Augmentation` defines the policies to create a :class:`Transform`,
+ but not how to execute the actual transform operations to those data.
+ Its :meth:`__call__` method will use :meth:`AugInput.transform` to execute the transform.
+
+ The returned `Transform` object is meant to describe deterministic transformation, which means
+ it can be re-applied on associated data, e.g. the geometry of an image and its segmentation
+ masks need to be transformed together.
+ (If such re-application is not needed, then determinism is not a crucial requirement.)
+ """
+
+ input_args: Optional[Tuple[str]] = None
+ """
+ Stores the attribute names needed by :meth:`get_transform`, e.g. ``("image", "sem_seg")``.
+ By default, it is just a tuple of argument names in :meth:`self.get_transform`, which often only
+ contain "image". As long as the argument name convention is followed, there is no need for
+ users to touch this attribute.
+ """
+
+ def _init(self, params=None):
+ if params:
+ for k, v in params.items():
+ if k != "self" and not k.startswith("_"):
+ setattr(self, k, v)
+
+ def get_transform(self, *args) -> Transform:
+ """
+ Execute the policy based on input data, and decide what transform to apply to inputs.
+
+ Args:
+ args: Any fixed-length positional arguments. By default, the name of the arguments
+ should exist in the :class:`AugInput` to be used.
+
+ Returns:
+ Transform: Returns the deterministic transform to apply to the input.
+
+ Examples:
+ ::
+ class MyAug:
+ # if a policy needs to know both image and semantic segmentation
+ def get_transform(image, sem_seg) -> T.Transform:
+ pass
+ tfm: Transform = MyAug().get_transform(image, sem_seg)
+ new_image = tfm.apply_image(image)
+
+ Notes:
+ Users can freely use arbitrary new argument names in custom
+ :meth:`get_transform` method, as long as they are available in the
+ input data. In detectron2 we use the following convention:
+
+ * image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
+ floating point in range [0, 1] or [0, 255].
+ * boxes: (N,4) ndarray of float32. It represents the instance bounding boxes
+ of N instances. Each is in XYXY format in unit of absolute coordinates.
+ * sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel.
+
+ We do not specify convention for other types and do not include builtin
+ :class:`Augmentation` that uses other types in detectron2.
+ """
+ raise NotImplementedError
+
+ def __call__(self, aug_input) -> Transform:
+ """
+ Augment the given `aug_input` **in-place**, and return the transform that's used.
+
+ This method will be called to apply the augmentation. In most augmentation, it
+ is enough to use the default implementation, which calls :meth:`get_transform`
+ using the inputs. But a subclass can overwrite it to have more complicated logic.
+
+ Args:
+ aug_input (AugInput): an object that has attributes needed by this augmentation
+ (defined by ``self.get_transform``). Its ``transform`` method will be called
+ to in-place transform it.
+
+ Returns:
+ Transform: the transform that is applied on the input.
+ """
+ args = _get_aug_input_args(self, aug_input)
+ tfm = self.get_transform(*args)
+ assert isinstance(tfm, (Transform, TransformList)), (
+ f"{type(self)}.get_transform must return an instance of Transform! "
+ f"Got {type(tfm)} instead."
+ )
+ aug_input.transform(tfm)
+ return tfm
+
+ def _rand_range(self, low=1.0, high=None, size=None):
+ """
+ Uniform float random number between low and high.
+ """
+ if high is None:
+ low, high = 0, low
+ if size is None:
+ size = []
+ return np.random.uniform(low, high, size)
+
+ def __repr__(self):
+ """
+ Produce something like:
+ "MyAugmentation(field1={self.field1}, field2={self.field2})"
+ """
+ try:
+ sig = inspect.signature(self.__init__)
+ classname = type(self).__name__
+ argstr = []
+ for name, param in sig.parameters.items():
+ assert (
+ param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD
+ ), "The default __repr__ doesn't support *args or **kwargs"
+ assert hasattr(self, name), (
+ "Attribute {} not found! "
+ "Default __repr__ only works if attributes match the constructor.".format(name)
+ )
+ attr = getattr(self, name)
+ default = param.default
+ if default is attr:
+ continue
+ attr_str = pprint.pformat(attr)
+ if "\n" in attr_str:
+ # don't show it if pformat decides to use >1 lines
+ attr_str = "..."
+ argstr.append("{}={}".format(name, attr_str))
+ return "{}({})".format(classname, ", ".join(argstr))
+ except AssertionError:
+ return super().__repr__()
+
+ __str__ = __repr__
+
+
+def _transform_to_aug(tfm_or_aug):
+ """
+ Wrap Transform into Augmentation.
+ Private, used internally to implement augmentations.
+ """
+ assert isinstance(tfm_or_aug, (Transform, Augmentation)), tfm_or_aug
+ if isinstance(tfm_or_aug, Augmentation):
+ return tfm_or_aug
+ else:
+
+ class _TransformToAug(Augmentation):
+ def __init__(self, tfm: Transform):
+ self.tfm = tfm
+
+ def get_transform(self, *args):
+ return self.tfm
+
+ def __repr__(self):
+ return repr(self.tfm)
+
+ __str__ = __repr__
+
+ return _TransformToAug(tfm_or_aug)
+
+
+class AugmentationList(Augmentation):
+ """
+ Apply a sequence of augmentations.
+
+ It has ``__call__`` method to apply the augmentations.
+
+ Note that :meth:`get_transform` method is impossible (will throw error if called)
+ for :class:`AugmentationList`, because in order to apply a sequence of augmentations,
+ the kth augmentation must be applied first, to provide inputs needed by the (k+1)th
+ augmentation.
+ """
+
+ def __init__(self, augs):
+ """
+ Args:
+ augs (list[Augmentation or Transform]):
+ """
+ super().__init__()
+ self.augs = [_transform_to_aug(x) for x in augs]
+
+ def __call__(self, aug_input) -> Transform:
+ tfms = []
+ for x in self.augs:
+ tfm = x(aug_input)
+ tfms.append(tfm)
+ return TransformList(tfms)
+
+ def __repr__(self):
+ msgs = [str(x) for x in self.augs]
+ return "AugmentationList[{}]".format(", ".join(msgs))
+
+ __str__ = __repr__
+
+
+class AugInput:
+ """
+ Input that can be used with :meth:`Augmentation.__call__`.
+ This is a standard implementation for the majority of use cases.
+ This class provides the standard attributes **"image", "boxes", "sem_seg"**
+ defined in :meth:`__init__` and they may be needed by different augmentations.
+ Most augmentation policies do not need attributes beyond these three.
+
+ After applying augmentations to these attributes (using :meth:`AugInput.transform`),
+ the returned transforms can then be used to transform other data structures that users have.
+
+ Examples:
+ ::
+ input = AugInput(image, boxes=boxes)
+ tfms = augmentation(input)
+ transformed_image = input.image
+ transformed_boxes = input.boxes
+ transformed_other_data = tfms.apply_other(other_data)
+
+ An extended project that works with new data types may implement augmentation policies
+ that need other inputs. An algorithm may need to transform inputs in a way different
+ from the standard approach defined in this class. In those rare situations, users can
+ implement a class similar to this class, that satify the following condition:
+
+ * The input must provide access to these data in the form of attribute access
+ (``getattr``). For example, if an :class:`Augmentation` to be applied needs "image"
+ and "sem_seg" arguments, its input must have the attribute "image" and "sem_seg".
+ * The input must have a ``transform(tfm: Transform) -> None`` method which
+ in-place transforms all its attributes.
+ """
+
+ # TODO maybe should support more builtin data types here
+ def __init__(
+ self,
+ image: np.ndarray,
+ *,
+ boxes: Optional[np.ndarray] = None,
+ sem_seg: Optional[np.ndarray] = None,
+ ):
+ """
+ Args:
+ image (ndarray): (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
+ floating point in range [0, 1] or [0, 255]. The meaning of C is up
+ to users.
+ boxes (ndarray or None): Nx4 float32 boxes in XYXY_ABS mode
+ sem_seg (ndarray or None): HxW uint8 semantic segmentation mask. Each element
+ is an integer label of pixel.
+ """
+ _check_img_dtype(image)
+ self.image = image
+ self.boxes = boxes
+ self.sem_seg = sem_seg
+
+ def transform(self, tfm: Transform) -> None:
+ """
+ In-place transform all attributes of this class.
+
+ By "in-place", it means after calling this method, accessing an attribute such
+ as ``self.image`` will return transformed data.
+ """
+ self.image = tfm.apply_image(self.image)
+ if self.boxes is not None:
+ self.boxes = tfm.apply_box(self.boxes)
+ if self.sem_seg is not None:
+ self.sem_seg = tfm.apply_segmentation(self.sem_seg)
+
+ def apply_augmentations(
+ self, augmentations: List[Union[Augmentation, Transform]]
+ ) -> TransformList:
+ """
+ Equivalent of ``AugmentationList(augmentations)(self)``
+ """
+ return AugmentationList(augmentations)(self)
+
+
+def apply_augmentations(augmentations: List[Union[Transform, Augmentation]], inputs):
+ """
+ Use ``T.AugmentationList(augmentations)(inputs)`` instead.
+ """
+ if isinstance(inputs, np.ndarray):
+ # handle the common case of image-only Augmentation, also for backward compatibility
+ image_only = True
+ inputs = AugInput(inputs)
+ else:
+ image_only = False
+ tfms = inputs.apply_augmentations(augmentations)
+ return inputs.image if image_only else inputs, tfms
+
+
+apply_transform_gens = apply_augmentations
+"""
+Alias for backward-compatibility.
+"""
+
+TransformGen = Augmentation
+"""
+Alias for Augmentation, since it is something that generates :class:`Transform`s
+"""
+
+StandardAugInput = AugInput
+"""
+Alias for compatibility. It's not worth the complexity to have two classes.
+"""
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/transforms/augmentation_impl.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/transforms/augmentation_impl.py
new file mode 100644
index 0000000000000000000000000000000000000000..652a34a9aef2d4004f46ad7814befe6d1c230bc4
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/transforms/augmentation_impl.py
@@ -0,0 +1,614 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+"""
+Implement many useful :class:`Augmentation`.
+"""
+import numpy as np
+import sys
+from typing import Tuple
+import torch
+from fvcore.transforms.transform import (
+ BlendTransform,
+ CropTransform,
+ HFlipTransform,
+ NoOpTransform,
+ PadTransform,
+ Transform,
+ TransformList,
+ VFlipTransform,
+)
+from PIL import Image
+
+from .augmentation import Augmentation, _transform_to_aug
+from .transform import ExtentTransform, ResizeTransform, RotationTransform
+
+__all__ = [
+ "FixedSizeCrop",
+ "RandomApply",
+ "RandomBrightness",
+ "RandomContrast",
+ "RandomCrop",
+ "RandomExtent",
+ "RandomFlip",
+ "RandomSaturation",
+ "RandomLighting",
+ "RandomRotation",
+ "Resize",
+ "ResizeScale",
+ "ResizeShortestEdge",
+ "RandomCrop_CategoryAreaConstraint",
+]
+
+
+class RandomApply(Augmentation):
+ """
+ Randomly apply an augmentation with a given probability.
+ """
+
+ def __init__(self, tfm_or_aug, prob=0.5):
+ """
+ Args:
+ tfm_or_aug (Transform, Augmentation): the transform or augmentation
+ to be applied. It can either be a `Transform` or `Augmentation`
+ instance.
+ prob (float): probability between 0.0 and 1.0 that
+ the wrapper transformation is applied
+ """
+ super().__init__()
+ self.aug = _transform_to_aug(tfm_or_aug)
+ assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})"
+ self.prob = prob
+
+ def get_transform(self, *args):
+ do = self._rand_range() < self.prob
+ if do:
+ return self.aug.get_transform(*args)
+ else:
+ return NoOpTransform()
+
+ def __call__(self, aug_input):
+ do = self._rand_range() < self.prob
+ if do:
+ return self.aug(aug_input)
+ else:
+ return NoOpTransform()
+
+
+class RandomFlip(Augmentation):
+ """
+ Flip the image horizontally or vertically with the given probability.
+ """
+
+ def __init__(self, prob=0.5, *, horizontal=True, vertical=False):
+ """
+ Args:
+ prob (float): probability of flip.
+ horizontal (boolean): whether to apply horizontal flipping
+ vertical (boolean): whether to apply vertical flipping
+ """
+ super().__init__()
+
+ if horizontal and vertical:
+ raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
+ if not horizontal and not vertical:
+ raise ValueError("At least one of horiz or vert has to be True!")
+ self._init(locals())
+
+ def get_transform(self, image):
+ h, w = image.shape[:2]
+ do = self._rand_range() < self.prob
+ if do:
+ if self.horizontal:
+ return HFlipTransform(w)
+ elif self.vertical:
+ return VFlipTransform(h)
+ else:
+ return NoOpTransform()
+
+
+class Resize(Augmentation):
+ """Resize image to a fixed target size"""
+
+ def __init__(self, shape, interp=Image.BILINEAR):
+ """
+ Args:
+ shape: (h, w) tuple or a int
+ interp: PIL interpolation method
+ """
+ if isinstance(shape, int):
+ shape = (shape, shape)
+ shape = tuple(shape)
+ self._init(locals())
+
+ def get_transform(self, image):
+ return ResizeTransform(
+ image.shape[0], image.shape[1], self.shape[0], self.shape[1], self.interp
+ )
+
+
+class ResizeShortestEdge(Augmentation):
+ """
+ Resize the image while keeping the aspect ratio unchanged.
+ It attempts to scale the shorter edge to the given `short_edge_length`,
+ as long as the longer edge does not exceed `max_size`.
+ If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
+ """
+
+ @torch.jit.unused
+ def __init__(
+ self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR
+ ):
+ """
+ Args:
+ short_edge_length (list[int]): If ``sample_style=="range"``,
+ a [min, max] interval from which to sample the shortest edge length.
+ If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
+ max_size (int): maximum allowed longest edge length.
+ sample_style (str): either "range" or "choice".
+ """
+ super().__init__()
+ assert sample_style in ["range", "choice"], sample_style
+
+ self.is_range = sample_style == "range"
+ if isinstance(short_edge_length, int):
+ short_edge_length = (short_edge_length, short_edge_length)
+ if self.is_range:
+ assert len(short_edge_length) == 2, (
+ "short_edge_length must be two values using 'range' sample style."
+ f" Got {short_edge_length}!"
+ )
+ self._init(locals())
+
+ @torch.jit.unused
+ def get_transform(self, image):
+ h, w = image.shape[:2]
+ if self.is_range:
+ size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
+ else:
+ size = np.random.choice(self.short_edge_length)
+ if size == 0:
+ return NoOpTransform()
+
+ newh, neww = ResizeShortestEdge.get_output_shape(h, w, size, self.max_size)
+ return ResizeTransform(h, w, newh, neww, self.interp)
+
+ @staticmethod
+ def get_output_shape(
+ oldh: int, oldw: int, short_edge_length: int, max_size: int
+ ) -> Tuple[int, int]:
+ """
+ Compute the output size given input size and target short edge length.
+ """
+ h, w = oldh, oldw
+ size = short_edge_length * 1.0
+ scale = size / min(h, w)
+ if h < w:
+ newh, neww = size, scale * w
+ else:
+ newh, neww = scale * h, size
+ if max(newh, neww) > max_size:
+ scale = max_size * 1.0 / max(newh, neww)
+ newh = newh * scale
+ neww = neww * scale
+ neww = int(neww + 0.5)
+ newh = int(newh + 0.5)
+ return (newh, neww)
+
+
+class ResizeScale(Augmentation):
+ """
+ Takes target size as input and randomly scales the given target size between `min_scale`
+ and `max_scale`. It then scales the input image such that it fits inside the scaled target
+ box, keeping the aspect ratio constant.
+ This implements the resize part of the Google's 'resize_and_crop' data augmentation:
+ https://github.com/tensorflow/tpu/blob/master/models/official/detection/utils/input_utils.py#L127
+ """
+
+ def __init__(
+ self,
+ min_scale: float,
+ max_scale: float,
+ target_height: int,
+ target_width: int,
+ interp: int = Image.BILINEAR,
+ ):
+ """
+ Args:
+ min_scale: minimum image scale range.
+ max_scale: maximum image scale range.
+ target_height: target image height.
+ target_width: target image width.
+ interp: image interpolation method.
+ """
+ super().__init__()
+ self._init(locals())
+
+ def _get_resize(self, image: np.ndarray, scale: float) -> Transform:
+ input_size = image.shape[:2]
+
+ # Compute new target size given a scale.
+ target_size = (self.target_height, self.target_width)
+ target_scale_size = np.multiply(target_size, scale)
+
+ # Compute actual rescaling applied to input image and output size.
+ output_scale = np.minimum(
+ target_scale_size[0] / input_size[0], target_scale_size[1] / input_size[1]
+ )
+ output_size = np.round(np.multiply(input_size, output_scale)).astype(int)
+
+ return ResizeTransform(
+ input_size[0], input_size[1], output_size[0], output_size[1], self.interp
+ )
+
+ def get_transform(self, image: np.ndarray) -> Transform:
+ random_scale = np.random.uniform(self.min_scale, self.max_scale)
+ return self._get_resize(image, random_scale)
+
+
+class RandomRotation(Augmentation):
+ """
+ This method returns a copy of this image, rotated the given
+ number of degrees counter clockwise around the given center.
+ """
+
+ def __init__(self, angle, expand=True, center=None, sample_style="range", interp=None):
+ """
+ Args:
+ angle (list[float]): If ``sample_style=="range"``,
+ a [min, max] interval from which to sample the angle (in degrees).
+ If ``sample_style=="choice"``, a list of angles to sample from
+ expand (bool): choose if the image should be resized to fit the whole
+ rotated image (default), or simply cropped
+ center (list[[float, float]]): If ``sample_style=="range"``,
+ a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center,
+ [0, 0] being the top left of the image and [1, 1] the bottom right.
+ If ``sample_style=="choice"``, a list of centers to sample from
+ Default: None, which means that the center of rotation is the center of the image
+ center has no effect if expand=True because it only affects shifting
+ """
+ super().__init__()
+ assert sample_style in ["range", "choice"], sample_style
+ self.is_range = sample_style == "range"
+ if isinstance(angle, (float, int)):
+ angle = (angle, angle)
+ if center is not None and isinstance(center[0], (float, int)):
+ center = (center, center)
+ self._init(locals())
+
+ def get_transform(self, image):
+ h, w = image.shape[:2]
+ center = None
+ if self.is_range:
+ angle = np.random.uniform(self.angle[0], self.angle[1])
+ if self.center is not None:
+ center = (
+ np.random.uniform(self.center[0][0], self.center[1][0]),
+ np.random.uniform(self.center[0][1], self.center[1][1]),
+ )
+ else:
+ angle = np.random.choice(self.angle)
+ if self.center is not None:
+ center = np.random.choice(self.center)
+
+ if center is not None:
+ center = (w * center[0], h * center[1]) # Convert to absolute coordinates
+
+ if angle % 360 == 0:
+ return NoOpTransform()
+
+ return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp)
+
+
+class FixedSizeCrop(Augmentation):
+ """
+ If `crop_size` is smaller than the input image size, then it uses a random crop of
+ the crop size. If `crop_size` is larger than the input image size, then it pads
+ the right and the bottom of the image to the crop size if `pad` is True, otherwise
+ it returns the smaller image.
+ """
+
+ def __init__(self, crop_size: Tuple[int], pad: bool = True, pad_value: float = 128.0):
+ """
+ Args:
+ crop_size: target image (height, width).
+ pad: if True, will pad images smaller than `crop_size` up to `crop_size`
+ pad_value: the padding value.
+ """
+ super().__init__()
+ self._init(locals())
+
+ def _get_crop(self, image: np.ndarray) -> Transform:
+ # Compute the image scale and scaled size.
+ input_size = image.shape[:2]
+ output_size = self.crop_size
+
+ # Add random crop if the image is scaled up.
+ max_offset = np.subtract(input_size, output_size)
+ max_offset = np.maximum(max_offset, 0)
+ offset = np.multiply(max_offset, np.random.uniform(0.0, 1.0))
+ offset = np.round(offset).astype(int)
+ return CropTransform(
+ offset[1], offset[0], output_size[1], output_size[0], input_size[1], input_size[0]
+ )
+
+ def _get_pad(self, image: np.ndarray) -> Transform:
+ # Compute the image scale and scaled size.
+ input_size = image.shape[:2]
+ output_size = self.crop_size
+
+ # Add padding if the image is scaled down.
+ pad_size = np.subtract(output_size, input_size)
+ pad_size = np.maximum(pad_size, 0)
+ original_size = np.minimum(input_size, output_size)
+ return PadTransform(
+ 0, 0, pad_size[1], pad_size[0], original_size[1], original_size[0], self.pad_value
+ )
+
+ def get_transform(self, image: np.ndarray) -> TransformList:
+ transforms = [self._get_crop(image)]
+ if self.pad:
+ transforms.append(self._get_pad(image))
+ return TransformList(transforms)
+
+
+class RandomCrop(Augmentation):
+ """
+ Randomly crop a rectangle region out of an image.
+ """
+
+ def __init__(self, crop_type: str, crop_size):
+ """
+ Args:
+ crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range".
+ crop_size (tuple[float, float]): two floats, explained below.
+
+ - "relative": crop a (H * crop_size[0], W * crop_size[1]) region from an input image of
+ size (H, W). crop size should be in (0, 1]
+ - "relative_range": uniformly sample two values from [crop_size[0], 1]
+ and [crop_size[1]], 1], and use them as in "relative" crop type.
+ - "absolute" crop a (crop_size[0], crop_size[1]) region from input image.
+ crop_size must be smaller than the input image size.
+ - "absolute_range", for an input of size (H, W), uniformly sample H_crop in
+ [crop_size[0], min(H, crop_size[1])] and W_crop in [crop_size[0], min(W, crop_size[1])].
+ Then crop a region (H_crop, W_crop).
+ """
+ # TODO style of relative_range and absolute_range are not consistent:
+ # one takes (h, w) but another takes (min, max)
+ super().__init__()
+ assert crop_type in ["relative_range", "relative", "absolute", "absolute_range"]
+ self._init(locals())
+
+ def get_transform(self, image):
+ h, w = image.shape[:2]
+ croph, cropw = self.get_crop_size((h, w))
+ assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self)
+ h0 = np.random.randint(h - croph + 1)
+ w0 = np.random.randint(w - cropw + 1)
+ return CropTransform(w0, h0, cropw, croph)
+
+ def get_crop_size(self, image_size):
+ """
+ Args:
+ image_size (tuple): height, width
+
+ Returns:
+ crop_size (tuple): height, width in absolute pixels
+ """
+ h, w = image_size
+ if self.crop_type == "relative":
+ ch, cw = self.crop_size
+ return int(h * ch + 0.5), int(w * cw + 0.5)
+ elif self.crop_type == "relative_range":
+ crop_size = np.asarray(self.crop_size, dtype=np.float32)
+ ch, cw = crop_size + np.random.rand(2) * (1 - crop_size)
+ return int(h * ch + 0.5), int(w * cw + 0.5)
+ elif self.crop_type == "absolute":
+ return (min(self.crop_size[0], h), min(self.crop_size[1], w))
+ elif self.crop_type == "absolute_range":
+ assert self.crop_size[0] <= self.crop_size[1]
+ ch = np.random.randint(min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1)
+ cw = np.random.randint(min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1)
+ return ch, cw
+ else:
+ raise NotImplementedError("Unknown crop type {}".format(self.crop_type))
+
+
+class RandomCrop_CategoryAreaConstraint(Augmentation):
+ """
+ Similar to :class:`RandomCrop`, but find a cropping window such that no single category
+ occupies a ratio of more than `single_category_max_area` in semantic segmentation ground
+ truth, which can cause unstability in training. The function attempts to find such a valid
+ cropping window for at most 10 times.
+ """
+
+ def __init__(
+ self,
+ crop_type: str,
+ crop_size,
+ single_category_max_area: float = 1.0,
+ ignored_category: int = None,
+ ):
+ """
+ Args:
+ crop_type, crop_size: same as in :class:`RandomCrop`
+ single_category_max_area: the maximum allowed area ratio of a
+ category. Set to 1.0 to disable
+ ignored_category: allow this category in the semantic segmentation
+ ground truth to exceed the area ratio. Usually set to the category
+ that's ignored in training.
+ """
+ self.crop_aug = RandomCrop(crop_type, crop_size)
+ self._init(locals())
+
+ def get_transform(self, image, sem_seg):
+ if self.single_category_max_area >= 1.0:
+ return self.crop_aug.get_transform(image)
+ else:
+ h, w = sem_seg.shape
+ for _ in range(10):
+ crop_size = self.crop_aug.get_crop_size((h, w))
+ y0 = np.random.randint(h - crop_size[0] + 1)
+ x0 = np.random.randint(w - crop_size[1] + 1)
+ sem_seg_temp = sem_seg[y0 : y0 + crop_size[0], x0 : x0 + crop_size[1]]
+ labels, cnt = np.unique(sem_seg_temp, return_counts=True)
+ if self.ignored_category is not None:
+ cnt = cnt[labels != self.ignored_category]
+ if len(cnt) > 1 and np.max(cnt) < np.sum(cnt) * self.single_category_max_area:
+ break
+ crop_tfm = CropTransform(x0, y0, crop_size[1], crop_size[0])
+ return crop_tfm
+
+
+class RandomExtent(Augmentation):
+ """
+ Outputs an image by cropping a random "subrect" of the source image.
+
+ The subrect can be parameterized to include pixels outside the source image,
+ in which case they will be set to zeros (i.e. black). The size of the output
+ image will vary with the size of the random subrect.
+ """
+
+ def __init__(self, scale_range, shift_range):
+ """
+ Args:
+ output_size (h, w): Dimensions of output image
+ scale_range (l, h): Range of input-to-output size scaling factor
+ shift_range (x, y): Range of shifts of the cropped subrect. The rect
+ is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)],
+ where (w, h) is the (width, height) of the input image. Set each
+ component to zero to crop at the image's center.
+ """
+ super().__init__()
+ self._init(locals())
+
+ def get_transform(self, image):
+ img_h, img_w = image.shape[:2]
+
+ # Initialize src_rect to fit the input image.
+ src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h])
+
+ # Apply a random scaling to the src_rect.
+ src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1])
+
+ # Apply a random shift to the coordinates origin.
+ src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5)
+ src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5)
+
+ # Map src_rect coordinates into image coordinates (center at corner).
+ src_rect[0::2] += 0.5 * img_w
+ src_rect[1::2] += 0.5 * img_h
+
+ return ExtentTransform(
+ src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]),
+ output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])),
+ )
+
+
+class RandomContrast(Augmentation):
+ """
+ Randomly transforms image contrast.
+
+ Contrast intensity is uniformly sampled in (intensity_min, intensity_max).
+ - intensity < 1 will reduce contrast
+ - intensity = 1 will preserve the input image
+ - intensity > 1 will increase contrast
+
+ See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
+ """
+
+ def __init__(self, intensity_min, intensity_max):
+ """
+ Args:
+ intensity_min (float): Minimum augmentation
+ intensity_max (float): Maximum augmentation
+ """
+ super().__init__()
+ self._init(locals())
+
+ def get_transform(self, image):
+ w = np.random.uniform(self.intensity_min, self.intensity_max)
+ return BlendTransform(src_image=image.mean(), src_weight=1 - w, dst_weight=w)
+
+
+class RandomBrightness(Augmentation):
+ """
+ Randomly transforms image brightness.
+
+ Brightness intensity is uniformly sampled in (intensity_min, intensity_max).
+ - intensity < 1 will reduce brightness
+ - intensity = 1 will preserve the input image
+ - intensity > 1 will increase brightness
+
+ See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
+ """
+
+ def __init__(self, intensity_min, intensity_max):
+ """
+ Args:
+ intensity_min (float): Minimum augmentation
+ intensity_max (float): Maximum augmentation
+ """
+ super().__init__()
+ self._init(locals())
+
+ def get_transform(self, image):
+ w = np.random.uniform(self.intensity_min, self.intensity_max)
+ return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w)
+
+
+class RandomSaturation(Augmentation):
+ """
+ Randomly transforms saturation of an RGB image.
+ Input images are assumed to have 'RGB' channel order.
+
+ Saturation intensity is uniformly sampled in (intensity_min, intensity_max).
+ - intensity < 1 will reduce saturation (make the image more grayscale)
+ - intensity = 1 will preserve the input image
+ - intensity > 1 will increase saturation
+
+ See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
+ """
+
+ def __init__(self, intensity_min, intensity_max):
+ """
+ Args:
+ intensity_min (float): Minimum augmentation (1 preserves input).
+ intensity_max (float): Maximum augmentation (1 preserves input).
+ """
+ super().__init__()
+ self._init(locals())
+
+ def get_transform(self, image):
+ assert image.shape[-1] == 3, "RandomSaturation only works on RGB images"
+ w = np.random.uniform(self.intensity_min, self.intensity_max)
+ grayscale = image.dot([0.299, 0.587, 0.114])[:, :, np.newaxis]
+ return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w)
+
+
+class RandomLighting(Augmentation):
+ """
+ The "lighting" augmentation described in AlexNet, using fixed PCA over ImageNet.
+ Input images are assumed to have 'RGB' channel order.
+
+ The degree of color jittering is randomly sampled via a normal distribution,
+ with standard deviation given by the scale parameter.
+ """
+
+ def __init__(self, scale):
+ """
+ Args:
+ scale (float): Standard deviation of principal component weighting.
+ """
+ super().__init__()
+ self._init(locals())
+ self.eigen_vecs = np.array(
+ [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]
+ )
+ self.eigen_vals = np.array([0.2175, 0.0188, 0.0045])
+
+ def get_transform(self, image):
+ assert image.shape[-1] == 3, "RandomLighting only works on RGB images"
+ weights = np.random.normal(scale=self.scale, size=3)
+ return BlendTransform(
+ src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0
+ )
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/data/transforms/transform.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/transforms/transform.py
new file mode 100644
index 0000000000000000000000000000000000000000..de44b991d7ab0d920ffb769e1402f08e358d37f7
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/data/transforms/transform.py
@@ -0,0 +1,351 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+"""
+See "Data Augmentation" tutorial for an overview of the system:
+https://detectron2.readthedocs.io/tutorials/augmentation.html
+"""
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from fvcore.transforms.transform import (
+ CropTransform,
+ HFlipTransform,
+ NoOpTransform,
+ Transform,
+ TransformList,
+)
+from PIL import Image
+
+try:
+ import cv2 # noqa
+except ImportError:
+ # OpenCV is an optional dependency at the moment
+ pass
+
+__all__ = [
+ "ExtentTransform",
+ "ResizeTransform",
+ "RotationTransform",
+ "ColorTransform",
+ "PILColorTransform",
+]
+
+
+class ExtentTransform(Transform):
+ """
+ Extracts a subregion from the source image and scales it to the output size.
+
+ The fill color is used to map pixels from the source rect that fall outside
+ the source image.
+
+ See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform
+ """
+
+ def __init__(self, src_rect, output_size, interp=Image.LINEAR, fill=0):
+ """
+ Args:
+ src_rect (x0, y0, x1, y1): src coordinates
+ output_size (h, w): dst image size
+ interp: PIL interpolation methods
+ fill: Fill color used when src_rect extends outside image
+ """
+ super().__init__()
+ self._set_attributes(locals())
+
+ def apply_image(self, img, interp=None):
+ h, w = self.output_size
+ if len(img.shape) > 2 and img.shape[2] == 1:
+ pil_image = Image.fromarray(img[:, :, 0], mode="L")
+ else:
+ pil_image = Image.fromarray(img)
+ pil_image = pil_image.transform(
+ size=(w, h),
+ method=Image.EXTENT,
+ data=self.src_rect,
+ resample=interp if interp else self.interp,
+ fill=self.fill,
+ )
+ ret = np.asarray(pil_image)
+ if len(img.shape) > 2 and img.shape[2] == 1:
+ ret = np.expand_dims(ret, -1)
+ return ret
+
+ def apply_coords(self, coords):
+ # Transform image center from source coordinates into output coordinates
+ # and then map the new origin to the corner of the output image.
+ h, w = self.output_size
+ x0, y0, x1, y1 = self.src_rect
+ new_coords = coords.astype(np.float32)
+ new_coords[:, 0] -= 0.5 * (x0 + x1)
+ new_coords[:, 1] -= 0.5 * (y0 + y1)
+ new_coords[:, 0] *= w / (x1 - x0)
+ new_coords[:, 1] *= h / (y1 - y0)
+ new_coords[:, 0] += 0.5 * w
+ new_coords[:, 1] += 0.5 * h
+ return new_coords
+
+ def apply_segmentation(self, segmentation):
+ segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
+ return segmentation
+
+
+class ResizeTransform(Transform):
+ """
+ Resize the image to a target size.
+ """
+
+ def __init__(self, h, w, new_h, new_w, interp=None):
+ """
+ Args:
+ h, w (int): original image size
+ new_h, new_w (int): new image size
+ interp: PIL interpolation methods, defaults to bilinear.
+ """
+ # TODO decide on PIL vs opencv
+ super().__init__()
+ if interp is None:
+ interp = Image.BILINEAR
+ self._set_attributes(locals())
+
+ def apply_image(self, img, interp=None):
+ assert img.shape[:2] == (self.h, self.w)
+ assert len(img.shape) <= 4
+ interp_method = interp if interp is not None else self.interp
+
+ if img.dtype == np.uint8:
+ if len(img.shape) > 2 and img.shape[2] == 1:
+ pil_image = Image.fromarray(img[:, :, 0], mode="L")
+ else:
+ pil_image = Image.fromarray(img)
+ pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)
+ ret = np.asarray(pil_image)
+ if len(img.shape) > 2 and img.shape[2] == 1:
+ ret = np.expand_dims(ret, -1)
+ else:
+ # PIL only supports uint8
+ if any(x < 0 for x in img.strides):
+ img = np.ascontiguousarray(img)
+ img = torch.from_numpy(img)
+ shape = list(img.shape)
+ shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
+ img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
+ _PIL_RESIZE_TO_INTERPOLATE_MODE = {
+ Image.NEAREST: "nearest",
+ Image.BILINEAR: "bilinear",
+ Image.BICUBIC: "bicubic",
+ }
+ mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[interp_method]
+ align_corners = None if mode == "nearest" else False
+ img = F.interpolate(
+ img, (self.new_h, self.new_w), mode=mode, align_corners=align_corners
+ )
+ shape[:2] = (self.new_h, self.new_w)
+ ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
+
+ return ret
+
+ def apply_coords(self, coords):
+ coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
+ coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
+ return coords
+
+ def apply_segmentation(self, segmentation):
+ segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
+ return segmentation
+
+ def inverse(self):
+ return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp)
+
+
+class RotationTransform(Transform):
+ """
+ This method returns a copy of this image, rotated the given
+ number of degrees counter clockwise around its center.
+ """
+
+ def __init__(self, h, w, angle, expand=True, center=None, interp=None):
+ """
+ Args:
+ h, w (int): original image size
+ angle (float): degrees for rotation
+ expand (bool): choose if the image should be resized to fit the whole
+ rotated image (default), or simply cropped
+ center (tuple (width, height)): coordinates of the rotation center
+ if left to None, the center will be fit to the center of each image
+ center has no effect if expand=True because it only affects shifting
+ interp: cv2 interpolation method, default cv2.INTER_LINEAR
+ """
+ super().__init__()
+ image_center = np.array((w / 2, h / 2))
+ if center is None:
+ center = image_center
+ if interp is None:
+ interp = cv2.INTER_LINEAR
+ abs_cos, abs_sin = (abs(np.cos(np.deg2rad(angle))), abs(np.sin(np.deg2rad(angle))))
+ if expand:
+ # find the new width and height bounds
+ bound_w, bound_h = np.rint(
+ [h * abs_sin + w * abs_cos, h * abs_cos + w * abs_sin]
+ ).astype(int)
+ else:
+ bound_w, bound_h = w, h
+
+ self._set_attributes(locals())
+ self.rm_coords = self.create_rotation_matrix()
+ # Needed because of this problem https://github.com/opencv/opencv/issues/11784
+ self.rm_image = self.create_rotation_matrix(offset=-0.5)
+
+ def apply_image(self, img, interp=None):
+ """
+ img should be a numpy array, formatted as Height * Width * Nchannels
+ """
+ if len(img) == 0 or self.angle % 360 == 0:
+ return img
+ assert img.shape[:2] == (self.h, self.w)
+ interp = interp if interp is not None else self.interp
+ return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp)
+
+ def apply_coords(self, coords):
+ """
+ coords should be a N * 2 array-like, containing N couples of (x, y) points
+ """
+ coords = np.asarray(coords, dtype=float)
+ if len(coords) == 0 or self.angle % 360 == 0:
+ return coords
+ return cv2.transform(coords[:, np.newaxis, :], self.rm_coords)[:, 0, :]
+
+ def apply_segmentation(self, segmentation):
+ segmentation = self.apply_image(segmentation, interp=cv2.INTER_NEAREST)
+ return segmentation
+
+ def create_rotation_matrix(self, offset=0):
+ center = (self.center[0] + offset, self.center[1] + offset)
+ rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1)
+ if self.expand:
+ # Find the coordinates of the center of rotation in the new image
+ # The only point for which we know the future coordinates is the center of the image
+ rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :]
+ new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center
+ # shift the rotation center to the new coordinates
+ rm[:, 2] += new_center
+ return rm
+
+ def inverse(self):
+ """
+ The inverse is to rotate it back with expand, and crop to get the original shape.
+ """
+ if not self.expand: # Not possible to inverse if a part of the image is lost
+ raise NotImplementedError()
+ rotation = RotationTransform(
+ self.bound_h, self.bound_w, -self.angle, True, None, self.interp
+ )
+ crop = CropTransform(
+ (rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h
+ )
+ return TransformList([rotation, crop])
+
+
+class ColorTransform(Transform):
+ """
+ Generic wrapper for any photometric transforms.
+ These transformations should only affect the color space and
+ not the coordinate space of the image (e.g. annotation
+ coordinates such as bounding boxes should not be changed)
+ """
+
+ def __init__(self, op):
+ """
+ Args:
+ op (Callable): operation to be applied to the image,
+ which takes in an ndarray and returns an ndarray.
+ """
+ if not callable(op):
+ raise ValueError("op parameter should be callable")
+ super().__init__()
+ self._set_attributes(locals())
+
+ def apply_image(self, img):
+ return self.op(img)
+
+ def apply_coords(self, coords):
+ return coords
+
+ def inverse(self):
+ return NoOpTransform()
+
+ def apply_segmentation(self, segmentation):
+ return segmentation
+
+
+class PILColorTransform(ColorTransform):
+ """
+ Generic wrapper for PIL Photometric image transforms,
+ which affect the color space and not the coordinate
+ space of the image
+ """
+
+ def __init__(self, op):
+ """
+ Args:
+ op (Callable): operation to be applied to the image,
+ which takes in a PIL Image and returns a transformed
+ PIL Image.
+ For reference on possible operations see:
+ - https://pillow.readthedocs.io/en/stable/
+ """
+ if not callable(op):
+ raise ValueError("op parameter should be callable")
+ super().__init__(op)
+
+ def apply_image(self, img):
+ img = Image.fromarray(img)
+ return np.asarray(super().apply_image(img))
+
+
+def HFlip_rotated_box(transform, rotated_boxes):
+ """
+ Apply the horizontal flip transform on rotated boxes.
+
+ Args:
+ rotated_boxes (ndarray): Nx5 floating point array of
+ (x_center, y_center, width, height, angle_degrees) format
+ in absolute coordinates.
+ """
+ # Transform x_center
+ rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]
+ # Transform angle
+ rotated_boxes[:, 4] = -rotated_boxes[:, 4]
+ return rotated_boxes
+
+
+def Resize_rotated_box(transform, rotated_boxes):
+ """
+ Apply the resizing transform on rotated boxes. For details of how these (approximation)
+ formulas are derived, please refer to :meth:`RotatedBoxes.scale`.
+
+ Args:
+ rotated_boxes (ndarray): Nx5 floating point array of
+ (x_center, y_center, width, height, angle_degrees) format
+ in absolute coordinates.
+ """
+ scale_factor_x = transform.new_w * 1.0 / transform.w
+ scale_factor_y = transform.new_h * 1.0 / transform.h
+ rotated_boxes[:, 0] *= scale_factor_x
+ rotated_boxes[:, 1] *= scale_factor_y
+ theta = rotated_boxes[:, 4] * np.pi / 180.0
+ c = np.cos(theta)
+ s = np.sin(theta)
+ rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s))
+ rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c))
+ rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi
+
+ return rotated_boxes
+
+
+HFlipTransform.register_type("rotated_box", HFlip_rotated_box)
+ResizeTransform.register_type("rotated_box", Resize_rotated_box)
+
+# not necessary any more with latest fvcore
+NoOpTransform.register_type("rotated_box", lambda t, x: x)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/__init__.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..08a61572b4c7d09c8d400e903a96cbf5b2cc4763
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/__init__.py
@@ -0,0 +1,12 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .launch import *
+from .train_loop import *
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
+
+
+# prefer to let hooks and defaults live in separate namespaces (therefore not in __all__)
+# but still make them available here
+from .hooks import *
+from .defaults import *
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/defaults.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/defaults.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc3faa15550a348dbe1445f7c7c91b26ba59d01b
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/defaults.py
@@ -0,0 +1,715 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+"""
+This file contains components with some default boilerplate logic user may need
+in training / testing. They will not work for everyone, but many users may find them useful.
+
+The behavior of functions/classes in this file is subject to change,
+since they are meant to represent the "common default behavior" people need in their projects.
+"""
+
+import argparse
+import logging
+import os
+import sys
+import weakref
+from collections import OrderedDict
+from typing import Optional
+import torch
+from fvcore.nn.precise_bn import get_bn_modules
+from omegaconf import OmegaConf
+from torch.nn.parallel import DistributedDataParallel
+
+import detectron2.data.transforms as T
+from detectron2.checkpoint import DetectionCheckpointer
+from detectron2.config import CfgNode, LazyConfig
+from detectron2.data import (
+ MetadataCatalog,
+ build_detection_test_loader,
+ build_detection_train_loader,
+)
+from detectron2.evaluation import (
+ DatasetEvaluator,
+ inference_on_dataset,
+ print_csv_format,
+ verify_results,
+)
+from detectron2.modeling import build_model
+from detectron2.solver import build_lr_scheduler, build_optimizer
+from detectron2.utils import comm
+from detectron2.utils.collect_env import collect_env_info
+from detectron2.utils.env import seed_all_rng
+from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
+from detectron2.utils.file_io import PathManager
+from detectron2.utils.logger import setup_logger
+
+from . import hooks
+from .train_loop import AMPTrainer, SimpleTrainer, TrainerBase
+
+__all__ = [
+ "create_ddp_model",
+ "default_argument_parser",
+ "default_setup",
+ "default_writers",
+ "DefaultPredictor",
+ "DefaultTrainer",
+]
+
+
+def create_ddp_model(model, *, fp16_compression=False, **kwargs):
+ """
+ Create a DistributedDataParallel model if there are >1 processes.
+
+ Args:
+ model: a torch.nn.Module
+ fp16_compression: add fp16 compression hooks to the ddp object.
+ See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
+ kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
+ """ # noqa
+ if comm.get_world_size() == 1:
+ return model
+ if "device_ids" not in kwargs:
+ kwargs["device_ids"] = [comm.get_local_rank()]
+ ddp = DistributedDataParallel(model, **kwargs)
+ if fp16_compression:
+ from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks
+
+ ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)
+ return ddp
+
+
+def default_argument_parser(epilog=None):
+ """
+ Create a parser with some common arguments used by detectron2 users.
+
+ Args:
+ epilog (str): epilog passed to ArgumentParser describing the usage.
+
+ Returns:
+ argparse.ArgumentParser:
+ """
+ parser = argparse.ArgumentParser(
+ epilog=epilog
+ or f"""
+Examples:
+
+Run on single machine:
+ $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
+
+Change some config options:
+ $ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
+
+Run on multiple machines:
+ (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url [--other-flags]
+ (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url [--other-flags]
+""",
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
+ parser.add_argument(
+ "--resume",
+ action="store_true",
+ help="Whether to attempt to resume from the checkpoint directory. "
+ "See documentation of `DefaultTrainer.resume_or_load()` for what it means.",
+ )
+ parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
+ parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
+ parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
+ parser.add_argument(
+ "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
+ )
+
+ # PyTorch still may leave orphan processes in multi-gpu training.
+ # Therefore we use a deterministic way to obtain port,
+ # so that users are aware of orphan processes by seeing the port occupied.
+ port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
+ parser.add_argument(
+ "--dist-url",
+ default="tcp://127.0.0.1:{}".format(port),
+ help="initialization URL for pytorch distributed backend. See "
+ "https://pytorch.org/docs/stable/distributed.html for details.",
+ )
+ parser.add_argument(
+ "opts",
+ help="""
+Modify config options at the end of the command. For Yacs configs, use
+space-separated "PATH.KEY VALUE" pairs.
+For python-based LazyConfig, use "path.key=value".
+ """.strip(),
+ default=None,
+ nargs=argparse.REMAINDER,
+ )
+ return parser
+
+
+def _try_get_key(cfg, *keys, default=None):
+ """
+ Try select keys from cfg until the first key that exists. Otherwise return default.
+ """
+ if isinstance(cfg, CfgNode):
+ cfg = OmegaConf.create(cfg.dump())
+ for k in keys:
+ none = object()
+ p = OmegaConf.select(cfg, k, default=none)
+ if p is not none:
+ return p
+ return default
+
+
+def _highlight(code, filename):
+ try:
+ import pygments
+ except ImportError:
+ return code
+
+ from pygments.lexers import Python3Lexer, YamlLexer
+ from pygments.formatters import Terminal256Formatter
+
+ lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer()
+ code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
+ return code
+
+
+def default_setup(cfg, args):
+ """
+ Perform some basic common setups at the beginning of a job, including:
+
+ 1. Set up the detectron2 logger
+ 2. Log basic information about environment, cmdline arguments, and config
+ 3. Backup the config to the output directory
+
+ Args:
+ cfg (CfgNode or omegaconf.DictConfig): the full config to be used
+ args (argparse.NameSpace): the command line arguments to be logged
+ """
+ output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir")
+ if comm.is_main_process() and output_dir:
+ PathManager.mkdirs(output_dir)
+
+ rank = comm.get_rank()
+ setup_logger(output_dir, distributed_rank=rank, name="fvcore")
+ logger = setup_logger(output_dir, distributed_rank=rank)
+
+ logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
+ logger.info("Environment info:\n" + collect_env_info())
+
+ logger.info("Command line arguments: " + str(args))
+ if hasattr(args, "config_file") and args.config_file != "":
+ logger.info(
+ "Contents of args.config_file={}:\n{}".format(
+ args.config_file,
+ _highlight(PathManager.open(args.config_file, "r").read(), args.config_file),
+ )
+ )
+
+ if comm.is_main_process() and output_dir:
+ # Note: some of our scripts may expect the existence of
+ # config.yaml in output directory
+ path = os.path.join(output_dir, "config.yaml")
+ if isinstance(cfg, CfgNode):
+ logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml")))
+ with PathManager.open(path, "w") as f:
+ f.write(cfg.dump())
+ else:
+ LazyConfig.save(cfg, path)
+ logger.info("Full config saved to {}".format(path))
+
+ # make sure each worker has a different, yet deterministic seed if specified
+ seed = _try_get_key(cfg, "SEED", "train.seed", default=-1)
+ seed_all_rng(None if seed < 0 else seed + rank)
+
+ # cudnn benchmark has large overhead. It shouldn't be used considering the small size of
+ # typical validation set.
+ if not (hasattr(args, "eval_only") and args.eval_only):
+ torch.backends.cudnn.benchmark = _try_get_key(
+ cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False
+ )
+
+
+def default_writers(output_dir: str, max_iter: Optional[int] = None):
+ """
+ Build a list of :class:`EventWriter` to be used.
+ It now consists of a :class:`CommonMetricPrinter`,
+ :class:`TensorboardXWriter` and :class:`JSONWriter`.
+
+ Args:
+ output_dir: directory to store JSON metrics and tensorboard events
+ max_iter: the total number of iterations
+
+ Returns:
+ list[EventWriter]: a list of :class:`EventWriter` objects.
+ """
+ PathManager.mkdirs(output_dir)
+ return [
+ # It may not always print what you want to see, since it prints "common" metrics only.
+ CommonMetricPrinter(max_iter),
+ JSONWriter(os.path.join(output_dir, "metrics.json")),
+ TensorboardXWriter(output_dir),
+ ]
+
+
+class DefaultPredictor:
+ """
+ Create a simple end-to-end predictor with the given config that runs on
+ single device for a single input image.
+
+ Compared to using the model directly, this class does the following additions:
+
+ 1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
+ 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
+ 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
+ 4. Take one input image and produce a single output, instead of a batch.
+
+ This is meant for simple demo purposes, so it does the above steps automatically.
+ This is not meant for benchmarks or running complicated inference logic.
+ If you'd like to do anything more complicated, please refer to its source code as
+ examples to build and use the model manually.
+
+ Attributes:
+ metadata (Metadata): the metadata of the underlying dataset, obtained from
+ cfg.DATASETS.TEST.
+
+ Examples:
+ ::
+ pred = DefaultPredictor(cfg)
+ inputs = cv2.imread("input.jpg")
+ outputs = pred(inputs)
+ """
+
+ def __init__(self, cfg):
+ self.cfg = cfg.clone() # cfg can be modified by model
+ self.model = build_model(self.cfg)
+ self.model.eval()
+ if len(cfg.DATASETS.TEST):
+ self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
+
+ checkpointer = DetectionCheckpointer(self.model)
+ checkpointer.load(cfg.MODEL.WEIGHTS)
+
+ self.aug = T.ResizeShortestEdge(
+ [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
+ )
+
+ self.input_format = cfg.INPUT.FORMAT
+ assert self.input_format in ["RGB", "BGR"], self.input_format
+
+ def __call__(self, original_image):
+ """
+ Args:
+ original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
+
+ Returns:
+ predictions (dict):
+ the output of the model for one image only.
+ See :doc:`/tutorials/models` for details about the format.
+ """
+ with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
+ # Apply pre-processing to image.
+ if self.input_format == "RGB":
+ # whether the model expects BGR inputs or RGB
+ original_image = original_image[:, :, ::-1]
+ height, width = original_image.shape[:2]
+ image = self.aug.get_transform(original_image).apply_image(original_image)
+ image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
+
+ inputs = {"image": image, "height": height, "width": width}
+ predictions = self.model([inputs])[0]
+ return predictions
+
+
+class DefaultTrainer(TrainerBase):
+ """
+ A trainer with default training logic. It does the following:
+
+ 1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader
+ defined by the given config. Create a LR scheduler defined by the config.
+ 2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
+ `resume_or_load` is called.
+ 3. Register a few common hooks defined by the config.
+
+ It is created to simplify the **standard model training workflow** and reduce code boilerplate
+ for users who only need the standard training workflow, with standard features.
+ It means this class makes *many assumptions* about your training logic that
+ may easily become invalid in a new research. In fact, any assumptions beyond those made in the
+ :class:`SimpleTrainer` are too much for research.
+
+ The code of this class has been annotated about restrictive assumptions it makes.
+ When they do not work for you, you're encouraged to:
+
+ 1. Overwrite methods of this class, OR:
+ 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
+ nothing else. You can then add your own hooks if needed. OR:
+ 3. Write your own training loop similar to `tools/plain_train_net.py`.
+
+ See the :doc:`/tutorials/training` tutorials for more details.
+
+ Note that the behavior of this class, like other functions/classes in
+ this file, is not stable, since it is meant to represent the "common default behavior".
+ It is only guaranteed to work well with the standard models and training workflow in detectron2.
+ To obtain more stable behavior, write your own training logic with other public APIs.
+
+ Examples:
+ ::
+ trainer = DefaultTrainer(cfg)
+ trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
+ trainer.train()
+
+ Attributes:
+ scheduler:
+ checkpointer (DetectionCheckpointer):
+ cfg (CfgNode):
+ """
+
+ def __init__(self, cfg):
+ """
+ Args:
+ cfg (CfgNode):
+ """
+ super().__init__()
+ logger = logging.getLogger("detectron2")
+ if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
+ setup_logger()
+ cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
+
+ # Assume these objects must be constructed in this order.
+ model = self.build_model(cfg)
+ optimizer = self.build_optimizer(cfg, model)
+ data_loader = self.build_train_loader(cfg)
+
+ model = create_ddp_model(model, broadcast_buffers=False)
+ self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
+ model, data_loader, optimizer
+ )
+
+ self.scheduler = self.build_lr_scheduler(cfg, optimizer)
+ self.checkpointer = DetectionCheckpointer(
+ # Assume you want to save checkpoints together with logs/statistics
+ model,
+ cfg.OUTPUT_DIR,
+ trainer=weakref.proxy(self),
+ )
+ self.start_iter = 0
+ self.max_iter = cfg.SOLVER.MAX_ITER
+ self.cfg = cfg
+
+ self.register_hooks(self.build_hooks())
+
+ def resume_or_load(self, resume=True):
+ """
+ If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
+ a `last_checkpoint` file), resume from the file. Resuming means loading all
+ available states (eg. optimizer and scheduler) and update iteration counter
+ from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
+
+ Otherwise, this is considered as an independent training. The method will load model
+ weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
+ from iteration 0.
+
+ Args:
+ resume (bool): whether to do resume or not
+ """
+ self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
+ if resume and self.checkpointer.has_checkpoint():
+ # The checkpoint stores the training iteration that just finished, thus we start
+ # at the next iteration
+ self.start_iter = self.iter + 1
+
+ def build_hooks(self):
+ """
+ Build a list of default hooks, including timing, evaluation,
+ checkpointing, lr scheduling, precise BN, writing events.
+
+ Returns:
+ list[HookBase]:
+ """
+ cfg = self.cfg.clone()
+ cfg.defrost()
+ cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
+
+ ret = [
+ hooks.IterationTimer(),
+ hooks.LRScheduler(),
+ hooks.PreciseBN(
+ # Run at the same freq as (but before) evaluation.
+ cfg.TEST.EVAL_PERIOD,
+ self.model,
+ # Build a new data loader to not affect training
+ self.build_train_loader(cfg),
+ cfg.TEST.PRECISE_BN.NUM_ITER,
+ )
+ if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
+ else None,
+ ]
+
+ # Do PreciseBN before checkpointer, because it updates the model and need to
+ # be saved by checkpointer.
+ # This is not always the best: if checkpointing has a different frequency,
+ # some checkpoints may have more precise statistics than others.
+ if comm.is_main_process():
+ ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
+
+ def test_and_save_results():
+ self._last_eval_results = self.test(self.cfg, self.model)
+ return self._last_eval_results
+
+ # Do evaluation after checkpointer, because then if it fails,
+ # we can use the saved checkpoint to debug.
+ ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
+
+ if comm.is_main_process():
+ # Here the default print/log frequency of each writer is used.
+ # run writers in the end, so that evaluation metrics are written
+ ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
+ return ret
+
+ def build_writers(self):
+ """
+ Build a list of writers to be used using :func:`default_writers()`.
+ If you'd like a different list of writers, you can overwrite it in
+ your trainer.
+
+ Returns:
+ list[EventWriter]: a list of :class:`EventWriter` objects.
+ """
+ return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
+
+ def train(self):
+ """
+ Run training.
+
+ Returns:
+ OrderedDict of results, if evaluation is enabled. Otherwise None.
+ """
+ super().train(self.start_iter, self.max_iter)
+ if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
+ assert hasattr(
+ self, "_last_eval_results"
+ ), "No evaluation results obtained during training!"
+ verify_results(self.cfg, self._last_eval_results)
+ return self._last_eval_results
+
+ def run_step(self):
+ self._trainer.iter = self.iter
+ self._trainer.run_step()
+
+ def state_dict(self):
+ ret = super().state_dict()
+ ret["_trainer"] = self._trainer.state_dict()
+ return ret
+
+ def load_state_dict(self, state_dict):
+ super().load_state_dict(state_dict)
+ self._trainer.load_state_dict(state_dict["_trainer"])
+
+ @classmethod
+ def build_model(cls, cfg):
+ """
+ Returns:
+ torch.nn.Module:
+
+ It now calls :func:`detectron2.modeling.build_model`.
+ Overwrite it if you'd like a different model.
+ """
+ model = build_model(cfg)
+ logger = logging.getLogger(__name__)
+ logger.info("Model:\n{}".format(model))
+ return model
+
+ @classmethod
+ def build_optimizer(cls, cfg, model):
+ """
+ Returns:
+ torch.optim.Optimizer:
+
+ It now calls :func:`detectron2.solver.build_optimizer`.
+ Overwrite it if you'd like a different optimizer.
+ """
+ return build_optimizer(cfg, model)
+
+ @classmethod
+ def build_lr_scheduler(cls, cfg, optimizer):
+ """
+ It now calls :func:`detectron2.solver.build_lr_scheduler`.
+ Overwrite it if you'd like a different scheduler.
+ """
+ return build_lr_scheduler(cfg, optimizer)
+
+ @classmethod
+ def build_train_loader(cls, cfg):
+ """
+ Returns:
+ iterable
+
+ It now calls :func:`detectron2.data.build_detection_train_loader`.
+ Overwrite it if you'd like a different data loader.
+ """
+ return build_detection_train_loader(cfg)
+
+ @classmethod
+ def build_test_loader(cls, cfg, dataset_name):
+ """
+ Returns:
+ iterable
+
+ It now calls :func:`detectron2.data.build_detection_test_loader`.
+ Overwrite it if you'd like a different data loader.
+ """
+ return build_detection_test_loader(cfg, dataset_name)
+
+ @classmethod
+ def build_evaluator(cls, cfg, dataset_name):
+ """
+ Returns:
+ DatasetEvaluator or None
+
+ It is not implemented by default.
+ """
+ raise NotImplementedError(
+ """
+If you want DefaultTrainer to automatically run evaluation,
+please implement `build_evaluator()` in subclasses (see train_net.py for example).
+Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example).
+"""
+ )
+
+ @classmethod
+ def test(cls, cfg, model, evaluators=None):
+ """
+ Evaluate the given model. The given model is expected to already contain
+ weights to evaluate.
+
+ Args:
+ cfg (CfgNode):
+ model (nn.Module):
+ evaluators (list[DatasetEvaluator] or None): if None, will call
+ :meth:`build_evaluator`. Otherwise, must have the same length as
+ ``cfg.DATASETS.TEST``.
+
+ Returns:
+ dict: a dict of result metrics
+ """
+ logger = logging.getLogger(__name__)
+ if isinstance(evaluators, DatasetEvaluator):
+ evaluators = [evaluators]
+ if evaluators is not None:
+ assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
+ len(cfg.DATASETS.TEST), len(evaluators)
+ )
+
+ results = OrderedDict()
+ for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
+ data_loader = cls.build_test_loader(cfg, dataset_name)
+ # When evaluators are passed in as arguments,
+ # implicitly assume that evaluators can be created before data_loader.
+ if evaluators is not None:
+ evaluator = evaluators[idx]
+ else:
+ try:
+ evaluator = cls.build_evaluator(cfg, dataset_name)
+ except NotImplementedError:
+ logger.warn(
+ "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
+ "or implement its `build_evaluator` method."
+ )
+ results[dataset_name] = {}
+ continue
+ results_i = inference_on_dataset(model, data_loader, evaluator)
+ results[dataset_name] = results_i
+ if comm.is_main_process():
+ assert isinstance(
+ results_i, dict
+ ), "Evaluator must return a dict on the main process. Got {} instead.".format(
+ results_i
+ )
+ logger.info("Evaluation results for {} in csv format:".format(dataset_name))
+ print_csv_format(results_i)
+
+ if len(results) == 1:
+ results = list(results.values())[0]
+ return results
+
+ @staticmethod
+ def auto_scale_workers(cfg, num_workers: int):
+ """
+ When the config is defined for certain number of workers (according to
+ ``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of
+ workers currently in use, returns a new cfg where the total batch size
+ is scaled so that the per-GPU batch size stays the same as the
+ original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.
+
+ Other config options are also scaled accordingly:
+ * training steps and warmup steps are scaled inverse proportionally.
+ * learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.
+
+ For example, with the original config like the following:
+
+ .. code-block:: yaml
+
+ IMS_PER_BATCH: 16
+ BASE_LR: 0.1
+ REFERENCE_WORLD_SIZE: 8
+ MAX_ITER: 5000
+ STEPS: (4000,)
+ CHECKPOINT_PERIOD: 1000
+
+ When this config is used on 16 GPUs instead of the reference number 8,
+ calling this method will return a new config with:
+
+ .. code-block:: yaml
+
+ IMS_PER_BATCH: 32
+ BASE_LR: 0.2
+ REFERENCE_WORLD_SIZE: 16
+ MAX_ITER: 2500
+ STEPS: (2000,)
+ CHECKPOINT_PERIOD: 500
+
+ Note that both the original config and this new config can be trained on 16 GPUs.
+ It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).
+
+ Returns:
+ CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.
+ """
+ old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
+ if old_world_size == 0 or old_world_size == num_workers:
+ return cfg
+ cfg = cfg.clone()
+ frozen = cfg.is_frozen()
+ cfg.defrost()
+
+ assert (
+ cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0
+ ), "Invalid REFERENCE_WORLD_SIZE in config!"
+ scale = num_workers / old_world_size
+ bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))
+ lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale
+ max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))
+ warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))
+ cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)
+ cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))
+ cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))
+ cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant
+ logger = logging.getLogger(__name__)
+ logger.info(
+ f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, "
+ f"max_iter={max_iter}, warmup={warmup_iter}."
+ )
+
+ if frozen:
+ cfg.freeze()
+ return cfg
+
+
+# Access basic attributes from the underlying trainer
+for _attr in ["model", "data_loader", "optimizer"]:
+ setattr(
+ DefaultTrainer,
+ _attr,
+ property(
+ # getter
+ lambda self, x=_attr: getattr(self._trainer, x),
+ # setter
+ lambda self, value, x=_attr: setattr(self._trainer, x, value),
+ ),
+ )
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/hooks.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/hooks.py
new file mode 100644
index 0000000000000000000000000000000000000000..52c321f979726b8aa89ba34874bc6729a75b70b4
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/hooks.py
@@ -0,0 +1,686 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import datetime
+import itertools
+import logging
+import math
+import operator
+import os
+import tempfile
+import time
+import warnings
+from collections import Counter
+import torch
+from fvcore.common.checkpoint import Checkpointer
+from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer
+from fvcore.common.param_scheduler import ParamScheduler
+from fvcore.common.timer import Timer
+from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
+
+import detectron2.utils.comm as comm
+from detectron2.evaluation.testing import flatten_results_dict
+from detectron2.solver import LRMultiplier
+from detectron2.utils.events import EventStorage, EventWriter
+from detectron2.utils.file_io import PathManager
+
+from .train_loop import HookBase
+
+__all__ = [
+ "CallbackHook",
+ "IterationTimer",
+ "PeriodicWriter",
+ "PeriodicCheckpointer",
+ "BestCheckpointer",
+ "LRScheduler",
+ "AutogradProfiler",
+ "EvalHook",
+ "PreciseBN",
+ "TorchProfiler",
+ "TorchMemoryStats",
+]
+
+
+"""
+Implement some common hooks.
+"""
+
+
+class CallbackHook(HookBase):
+ """
+ Create a hook using callback functions provided by the user.
+ """
+
+ def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None):
+ """
+ Each argument is a function that takes one argument: the trainer.
+ """
+ self._before_train = before_train
+ self._before_step = before_step
+ self._after_step = after_step
+ self._after_train = after_train
+
+ def before_train(self):
+ if self._before_train:
+ self._before_train(self.trainer)
+
+ def after_train(self):
+ if self._after_train:
+ self._after_train(self.trainer)
+ # The functions may be closures that hold reference to the trainer
+ # Therefore, delete them to avoid circular reference.
+ del self._before_train, self._after_train
+ del self._before_step, self._after_step
+
+ def before_step(self):
+ if self._before_step:
+ self._before_step(self.trainer)
+
+ def after_step(self):
+ if self._after_step:
+ self._after_step(self.trainer)
+
+
+class IterationTimer(HookBase):
+ """
+ Track the time spent for each iteration (each run_step call in the trainer).
+ Print a summary in the end of training.
+
+ This hook uses the time between the call to its :meth:`before_step`
+ and :meth:`after_step` methods.
+ Under the convention that :meth:`before_step` of all hooks should only
+ take negligible amount of time, the :class:`IterationTimer` hook should be
+ placed at the beginning of the list of hooks to obtain accurate timing.
+ """
+
+ def __init__(self, warmup_iter=3):
+ """
+ Args:
+ warmup_iter (int): the number of iterations at the beginning to exclude
+ from timing.
+ """
+ self._warmup_iter = warmup_iter
+ self._step_timer = Timer()
+ self._start_time = time.perf_counter()
+ self._total_timer = Timer()
+
+ def before_train(self):
+ self._start_time = time.perf_counter()
+ self._total_timer.reset()
+ self._total_timer.pause()
+
+ def after_train(self):
+ logger = logging.getLogger(__name__)
+ total_time = time.perf_counter() - self._start_time
+ total_time_minus_hooks = self._total_timer.seconds()
+ hook_time = total_time - total_time_minus_hooks
+
+ num_iter = self.trainer.storage.iter + 1 - self.trainer.start_iter - self._warmup_iter
+
+ if num_iter > 0 and total_time_minus_hooks > 0:
+ # Speed is meaningful only after warmup
+ # NOTE this format is parsed by grep in some scripts
+ logger.info(
+ "Overall training speed: {} iterations in {} ({:.4f} s / it)".format(
+ num_iter,
+ str(datetime.timedelta(seconds=int(total_time_minus_hooks))),
+ total_time_minus_hooks / num_iter,
+ )
+ )
+
+ logger.info(
+ "Total training time: {} ({} on hooks)".format(
+ str(datetime.timedelta(seconds=int(total_time))),
+ str(datetime.timedelta(seconds=int(hook_time))),
+ )
+ )
+
+ def before_step(self):
+ self._step_timer.reset()
+ self._total_timer.resume()
+
+ def after_step(self):
+ # +1 because we're in after_step, the current step is done
+ # but not yet counted
+ iter_done = self.trainer.storage.iter - self.trainer.start_iter + 1
+ if iter_done >= self._warmup_iter:
+ sec = self._step_timer.seconds()
+ self.trainer.storage.put_scalars(time=sec)
+ else:
+ self._start_time = time.perf_counter()
+ self._total_timer.reset()
+
+ self._total_timer.pause()
+
+
+class PeriodicWriter(HookBase):
+ """
+ Write events to EventStorage (by calling ``writer.write()``) periodically.
+
+ It is executed every ``period`` iterations and after the last iteration.
+ Note that ``period`` does not affect how data is smoothed by each writer.
+ """
+
+ def __init__(self, writers, period=20):
+ """
+ Args:
+ writers (list[EventWriter]): a list of EventWriter objects
+ period (int):
+ """
+ self._writers = writers
+ for w in writers:
+ assert isinstance(w, EventWriter), w
+ self._period = period
+
+ def after_step(self):
+ if (self.trainer.iter + 1) % self._period == 0 or (
+ self.trainer.iter == self.trainer.max_iter - 1
+ ):
+ for writer in self._writers:
+ writer.write()
+
+ def after_train(self):
+ for writer in self._writers:
+ # If any new data is found (e.g. produced by other after_train),
+ # write them before closing
+ writer.write()
+ writer.close()
+
+
+class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):
+ """
+ Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook.
+
+ Note that when used as a hook,
+ it is unable to save additional data other than what's defined
+ by the given `checkpointer`.
+
+ It is executed every ``period`` iterations and after the last iteration.
+ """
+
+ def before_train(self):
+ self.max_iter = self.trainer.max_iter
+
+ def after_step(self):
+ # No way to use **kwargs
+ self.step(self.trainer.iter)
+
+
+class BestCheckpointer(HookBase):
+ """
+ Checkpoints best weights based off given metric.
+
+ This hook should be used in conjunction to and executed after the hook
+ that produces the metric, e.g. `EvalHook`.
+ """
+
+ def __init__(
+ self,
+ eval_period: int,
+ checkpointer: Checkpointer,
+ val_metric: str,
+ mode: str = "max",
+ file_prefix: str = "model_best",
+ ) -> None:
+ """
+ Args:
+ eval_period (int): the period `EvalHook` is set to run.
+ checkpointer: the checkpointer object used to save checkpoints.
+ val_metric (str): validation metric to track for best checkpoint, e.g. "bbox/AP50"
+ mode (str): one of {'max', 'min'}. controls whether the chosen val metric should be
+ maximized or minimized, e.g. for "bbox/AP50" it should be "max"
+ file_prefix (str): the prefix of checkpoint's filename, defaults to "model_best"
+ """
+ self._logger = logging.getLogger(__name__)
+ self._period = eval_period
+ self._val_metric = val_metric
+ assert mode in [
+ "max",
+ "min",
+ ], f'Mode "{mode}" to `BestCheckpointer` is unknown. It should be one of {"max", "min"}.'
+ if mode == "max":
+ self._compare = operator.gt
+ else:
+ self._compare = operator.lt
+ self._checkpointer = checkpointer
+ self._file_prefix = file_prefix
+ self.best_metric = None
+ self.best_iter = None
+
+ def _update_best(self, val, iteration):
+ if math.isnan(val) or math.isinf(val):
+ return False
+ self.best_metric = val
+ self.best_iter = iteration
+ return True
+
+ def _best_checking(self):
+ metric_tuple = self.trainer.storage.latest().get(self._val_metric)
+ if metric_tuple is None:
+ self._logger.warning(
+ f"Given val metric {self._val_metric} does not seem to be computed/stored."
+ "Will not be checkpointing based on it."
+ )
+ return
+ else:
+ latest_metric, metric_iter = metric_tuple
+
+ if self.best_metric is None:
+ if self._update_best(latest_metric, metric_iter):
+ additional_state = {"iteration": metric_iter}
+ self._checkpointer.save(f"{self._file_prefix}", **additional_state)
+ self._logger.info(
+ f"Saved first model at {self.best_metric:0.5f} @ {self.best_iter} steps"
+ )
+ elif self._compare(latest_metric, self.best_metric):
+ additional_state = {"iteration": metric_iter}
+ self._checkpointer.save(f"{self._file_prefix}", **additional_state)
+ self._logger.info(
+ f"Saved best model as latest eval score for {self._val_metric} is "
+ f"{latest_metric:0.5f}, better than last best score "
+ f"{self.best_metric:0.5f} @ iteration {self.best_iter}."
+ )
+ self._update_best(latest_metric, metric_iter)
+ else:
+ self._logger.info(
+ f"Not saving as latest eval score for {self._val_metric} is {latest_metric:0.5f}, "
+ f"not better than best score {self.best_metric:0.5f} @ iteration {self.best_iter}."
+ )
+
+ def after_step(self):
+ # same conditions as `EvalHook`
+ next_iter = self.trainer.iter + 1
+ if (
+ self._period > 0
+ and next_iter % self._period == 0
+ and next_iter != self.trainer.max_iter
+ ):
+ self._best_checking()
+
+ def after_train(self):
+ # same conditions as `EvalHook`
+ if self.trainer.iter + 1 >= self.trainer.max_iter:
+ self._best_checking()
+
+
+class LRScheduler(HookBase):
+ """
+ A hook which executes a torch builtin LR scheduler and summarizes the LR.
+ It is executed after every iteration.
+ """
+
+ def __init__(self, optimizer=None, scheduler=None):
+ """
+ Args:
+ optimizer (torch.optim.Optimizer):
+ scheduler (torch.optim.LRScheduler or fvcore.common.param_scheduler.ParamScheduler):
+ if a :class:`ParamScheduler` object, it defines the multiplier over the base LR
+ in the optimizer.
+
+ If any argument is not given, will try to obtain it from the trainer.
+ """
+ self._optimizer = optimizer
+ self._scheduler = scheduler
+
+ def before_train(self):
+ self._optimizer = self._optimizer or self.trainer.optimizer
+ if isinstance(self.scheduler, ParamScheduler):
+ self._scheduler = LRMultiplier(
+ self._optimizer,
+ self.scheduler,
+ self.trainer.max_iter,
+ last_iter=self.trainer.iter - 1,
+ )
+ self._best_param_group_id = LRScheduler.get_best_param_group_id(self._optimizer)
+
+ @staticmethod
+ def get_best_param_group_id(optimizer):
+ # NOTE: some heuristics on what LR to summarize
+ # summarize the param group with most parameters
+ largest_group = max(len(g["params"]) for g in optimizer.param_groups)
+
+ if largest_group == 1:
+ # If all groups have one parameter,
+ # then find the most common initial LR, and use it for summary
+ lr_count = Counter([g["lr"] for g in optimizer.param_groups])
+ lr = lr_count.most_common()[0][0]
+ for i, g in enumerate(optimizer.param_groups):
+ if g["lr"] == lr:
+ return i
+ else:
+ for i, g in enumerate(optimizer.param_groups):
+ if len(g["params"]) == largest_group:
+ return i
+
+ def after_step(self):
+ lr = self._optimizer.param_groups[self._best_param_group_id]["lr"]
+ self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False)
+ self.scheduler.step()
+
+ @property
+ def scheduler(self):
+ return self._scheduler or self.trainer.scheduler
+
+ def state_dict(self):
+ if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler):
+ return self.scheduler.state_dict()
+ return {}
+
+ def load_state_dict(self, state_dict):
+ if isinstance(self.scheduler, torch.optim.lr_scheduler._LRScheduler):
+ logger = logging.getLogger(__name__)
+ logger.info("Loading scheduler from state_dict ...")
+ self.scheduler.load_state_dict(state_dict)
+
+
+class TorchProfiler(HookBase):
+ """
+ A hook which runs `torch.profiler.profile`.
+
+ Examples:
+ ::
+ hooks.TorchProfiler(
+ lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR
+ )
+
+ The above example will run the profiler for iteration 10~20 and dump
+ results to ``OUTPUT_DIR``. We did not profile the first few iterations
+ because they are typically slower than the rest.
+ The result files can be loaded in the ``chrome://tracing`` page in chrome browser,
+ and the tensorboard visualizations can be visualized using
+ ``tensorboard --logdir OUTPUT_DIR/log``
+ """
+
+ def __init__(self, enable_predicate, output_dir, *, activities=None, save_tensorboard=True):
+ """
+ Args:
+ enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
+ and returns whether to enable the profiler.
+ It will be called once every step, and can be used to select which steps to profile.
+ output_dir (str): the output directory to dump tracing files.
+ activities (iterable): same as in `torch.profiler.profile`.
+ save_tensorboard (bool): whether to save tensorboard visualizations at (output_dir)/log/
+ """
+ self._enable_predicate = enable_predicate
+ self._activities = activities
+ self._output_dir = output_dir
+ self._save_tensorboard = save_tensorboard
+
+ def before_step(self):
+ if self._enable_predicate(self.trainer):
+ if self._save_tensorboard:
+ on_trace_ready = torch.profiler.tensorboard_trace_handler(
+ os.path.join(
+ self._output_dir,
+ "log",
+ "profiler-tensorboard-iter{}".format(self.trainer.iter),
+ ),
+ f"worker{comm.get_rank()}",
+ )
+ else:
+ on_trace_ready = None
+ self._profiler = torch.profiler.profile(
+ activities=self._activities,
+ on_trace_ready=on_trace_ready,
+ record_shapes=True,
+ profile_memory=True,
+ with_stack=True,
+ with_flops=True,
+ )
+ self._profiler.__enter__()
+ else:
+ self._profiler = None
+
+ def after_step(self):
+ if self._profiler is None:
+ return
+ self._profiler.__exit__(None, None, None)
+ if not self._save_tensorboard:
+ PathManager.mkdirs(self._output_dir)
+ out_file = os.path.join(
+ self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter)
+ )
+ if "://" not in out_file:
+ self._profiler.export_chrome_trace(out_file)
+ else:
+ # Support non-posix filesystems
+ with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d:
+ tmp_file = os.path.join(d, "tmp.json")
+ self._profiler.export_chrome_trace(tmp_file)
+ with open(tmp_file) as f:
+ content = f.read()
+ with PathManager.open(out_file, "w") as f:
+ f.write(content)
+
+
+class AutogradProfiler(TorchProfiler):
+ """
+ A hook which runs `torch.autograd.profiler.profile`.
+
+ Examples:
+ ::
+ hooks.AutogradProfiler(
+ lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR
+ )
+
+ The above example will run the profiler for iteration 10~20 and dump
+ results to ``OUTPUT_DIR``. We did not profile the first few iterations
+ because they are typically slower than the rest.
+ The result files can be loaded in the ``chrome://tracing`` page in chrome browser.
+
+ Note:
+ When used together with NCCL on older version of GPUs,
+ autograd profiler may cause deadlock because it unnecessarily allocates
+ memory on every device it sees. The memory management calls, if
+ interleaved with NCCL calls, lead to deadlock on GPUs that do not
+ support ``cudaLaunchCooperativeKernelMultiDevice``.
+ """
+
+ def __init__(self, enable_predicate, output_dir, *, use_cuda=True):
+ """
+ Args:
+ enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
+ and returns whether to enable the profiler.
+ It will be called once every step, and can be used to select which steps to profile.
+ output_dir (str): the output directory to dump tracing files.
+ use_cuda (bool): same as in `torch.autograd.profiler.profile`.
+ """
+ warnings.warn("AutogradProfiler has been deprecated in favor of TorchProfiler.")
+ self._enable_predicate = enable_predicate
+ self._use_cuda = use_cuda
+ self._output_dir = output_dir
+
+ def before_step(self):
+ if self._enable_predicate(self.trainer):
+ self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda)
+ self._profiler.__enter__()
+ else:
+ self._profiler = None
+
+
+class EvalHook(HookBase):
+ """
+ Run an evaluation function periodically, and at the end of training.
+
+ It is executed every ``eval_period`` iterations and after the last iteration.
+ """
+
+ def __init__(self, eval_period, eval_function):
+ """
+ Args:
+ eval_period (int): the period to run `eval_function`. Set to 0 to
+ not evaluate periodically (but still after the last iteration).
+ eval_function (callable): a function which takes no arguments, and
+ returns a nested dict of evaluation metrics.
+
+ Note:
+ This hook must be enabled in all or none workers.
+ If you would like only certain workers to perform evaluation,
+ give other workers a no-op function (`eval_function=lambda: None`).
+ """
+ self._period = eval_period
+ self._func = eval_function
+
+ def _do_eval(self):
+ results = self._func()
+
+ if results:
+ assert isinstance(
+ results, dict
+ ), "Eval function must return a dict. Got {} instead.".format(results)
+
+ flattened_results = flatten_results_dict(results)
+ for k, v in flattened_results.items():
+ try:
+ v = float(v)
+ except Exception as e:
+ raise ValueError(
+ "[EvalHook] eval_function should return a nested dict of float. "
+ "Got '{}: {}' instead.".format(k, v)
+ ) from e
+ self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
+
+ # Evaluation may take different time among workers.
+ # A barrier make them start the next iteration together.
+ comm.synchronize()
+
+ def after_step(self):
+ next_iter = self.trainer.iter + 1
+ if self._period > 0 and next_iter % self._period == 0:
+ # do the last eval in after_train
+ if next_iter != self.trainer.max_iter:
+ self._do_eval()
+
+ def after_train(self):
+ # This condition is to prevent the eval from running after a failed training
+ if self.trainer.iter + 1 >= self.trainer.max_iter:
+ self._do_eval()
+ # func is likely a closure that holds reference to the trainer
+ # therefore we clean it to avoid circular reference in the end
+ del self._func
+
+
+class PreciseBN(HookBase):
+ """
+ The standard implementation of BatchNorm uses EMA in inference, which is
+ sometimes suboptimal.
+ This class computes the true average of statistics rather than the moving average,
+ and put true averages to every BN layer in the given model.
+
+ It is executed every ``period`` iterations and after the last iteration.
+ """
+
+ def __init__(self, period, model, data_loader, num_iter):
+ """
+ Args:
+ period (int): the period this hook is run, or 0 to not run during training.
+ The hook will always run in the end of training.
+ model (nn.Module): a module whose all BN layers in training mode will be
+ updated by precise BN.
+ Note that user is responsible for ensuring the BN layers to be
+ updated are in training mode when this hook is triggered.
+ data_loader (iterable): it will produce data to be run by `model(data)`.
+ num_iter (int): number of iterations used to compute the precise
+ statistics.
+ """
+ self._logger = logging.getLogger(__name__)
+ if len(get_bn_modules(model)) == 0:
+ self._logger.info(
+ "PreciseBN is disabled because model does not contain BN layers in training mode."
+ )
+ self._disabled = True
+ return
+
+ self._model = model
+ self._data_loader = data_loader
+ self._num_iter = num_iter
+ self._period = period
+ self._disabled = False
+
+ self._data_iter = None
+
+ def after_step(self):
+ next_iter = self.trainer.iter + 1
+ is_final = next_iter == self.trainer.max_iter
+ if is_final or (self._period > 0 and next_iter % self._period == 0):
+ self.update_stats()
+
+ def update_stats(self):
+ """
+ Update the model with precise statistics. Users can manually call this method.
+ """
+ if self._disabled:
+ return
+
+ if self._data_iter is None:
+ self._data_iter = iter(self._data_loader)
+
+ def data_loader():
+ for num_iter in itertools.count(1):
+ if num_iter % 100 == 0:
+ self._logger.info(
+ "Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter)
+ )
+ # This way we can reuse the same iterator
+ yield next(self._data_iter)
+
+ with EventStorage(): # capture events in a new storage to discard them
+ self._logger.info(
+ "Running precise-BN for {} iterations... ".format(self._num_iter)
+ + "Note that this could produce different statistics every time."
+ )
+ update_bn_stats(self._model, data_loader(), self._num_iter)
+
+
+class TorchMemoryStats(HookBase):
+ """
+ Writes pytorch's cuda memory statistics periodically.
+ """
+
+ def __init__(self, period=20, max_runs=10):
+ """
+ Args:
+ period (int): Output stats each 'period' iterations
+ max_runs (int): Stop the logging after 'max_runs'
+ """
+
+ self._logger = logging.getLogger(__name__)
+ self._period = period
+ self._max_runs = max_runs
+ self._runs = 0
+
+ def after_step(self):
+ if self._runs > self._max_runs:
+ return
+
+ if (self.trainer.iter + 1) % self._period == 0 or (
+ self.trainer.iter == self.trainer.max_iter - 1
+ ):
+ if torch.cuda.is_available():
+ max_reserved_mb = torch.cuda.max_memory_reserved() / 1024.0 / 1024.0
+ reserved_mb = torch.cuda.memory_reserved() / 1024.0 / 1024.0
+ max_allocated_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0
+ allocated_mb = torch.cuda.memory_allocated() / 1024.0 / 1024.0
+
+ self._logger.info(
+ (
+ " iter: {} "
+ " max_reserved_mem: {:.0f}MB "
+ " reserved_mem: {:.0f}MB "
+ " max_allocated_mem: {:.0f}MB "
+ " allocated_mem: {:.0f}MB "
+ ).format(
+ self.trainer.iter,
+ max_reserved_mb,
+ reserved_mb,
+ max_allocated_mb,
+ allocated_mb,
+ )
+ )
+
+ self._runs += 1
+ if self._runs == self._max_runs:
+ mem_summary = torch.cuda.memory_summary()
+ self._logger.info("\n" + mem_summary)
+
+ torch.cuda.reset_peak_memory_stats()
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/launch.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/launch.py
new file mode 100644
index 0000000000000000000000000000000000000000..46f98691f163a82fdfcf75d910b28590af042de9
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/launch.py
@@ -0,0 +1,126 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+from datetime import timedelta
+import torch
+import torch.distributed as dist
+import torch.multiprocessing as mp
+
+from detectron2.utils import comm
+
+__all__ = ["DEFAULT_TIMEOUT", "launch"]
+
+DEFAULT_TIMEOUT = timedelta(minutes=30)
+
+
+def _find_free_port():
+ import socket
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ # Binding to port 0 will cause the OS to find an available port for us
+ sock.bind(("", 0))
+ port = sock.getsockname()[1]
+ sock.close()
+ # NOTE: there is still a chance the port could be taken by other processes.
+ return port
+
+
+def launch(
+ main_func,
+ num_gpus_per_machine,
+ num_machines=1,
+ machine_rank=0,
+ dist_url=None,
+ args=(),
+ timeout=DEFAULT_TIMEOUT,
+):
+ """
+ Launch multi-gpu or distributed training.
+ This function must be called on all machines involved in the training.
+ It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine.
+
+ Args:
+ main_func: a function that will be called by `main_func(*args)`
+ num_gpus_per_machine (int): number of GPUs per machine
+ num_machines (int): the total number of machines
+ machine_rank (int): the rank of this machine
+ dist_url (str): url to connect to for distributed jobs, including protocol
+ e.g. "tcp://127.0.0.1:8686".
+ Can be set to "auto" to automatically select a free port on localhost
+ timeout (timedelta): timeout of the distributed workers
+ args (tuple): arguments passed to main_func
+ """
+ world_size = num_machines * num_gpus_per_machine
+ if world_size > 1:
+ # https://github.com/pytorch/pytorch/pull/14391
+ # TODO prctl in spawned processes
+
+ if dist_url == "auto":
+ assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs."
+ port = _find_free_port()
+ dist_url = f"tcp://127.0.0.1:{port}"
+ if num_machines > 1 and dist_url.startswith("file://"):
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ "file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://"
+ )
+
+ mp.spawn(
+ _distributed_worker,
+ nprocs=num_gpus_per_machine,
+ args=(
+ main_func,
+ world_size,
+ num_gpus_per_machine,
+ machine_rank,
+ dist_url,
+ args,
+ timeout,
+ ),
+ daemon=False,
+ )
+ else:
+ main_func(*args)
+
+
+def _distributed_worker(
+ local_rank,
+ main_func,
+ world_size,
+ num_gpus_per_machine,
+ machine_rank,
+ dist_url,
+ args,
+ timeout=DEFAULT_TIMEOUT,
+):
+ assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
+ global_rank = machine_rank * num_gpus_per_machine + local_rank
+ try:
+ dist.init_process_group(
+ backend="NCCL",
+ init_method=dist_url,
+ world_size=world_size,
+ rank=global_rank,
+ timeout=timeout,
+ )
+ except Exception as e:
+ logger = logging.getLogger(__name__)
+ logger.error("Process group URL: {}".format(dist_url))
+ raise e
+
+ # Setup the local process group (which contains ranks within the same machine)
+ assert comm._LOCAL_PROCESS_GROUP is None
+ num_machines = world_size // num_gpus_per_machine
+ for i in range(num_machines):
+ ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
+ pg = dist.new_group(ranks_on_i)
+ if i == machine_rank:
+ comm._LOCAL_PROCESS_GROUP = pg
+
+ assert num_gpus_per_machine <= torch.cuda.device_count()
+ torch.cuda.set_device(local_rank)
+
+ # synchronize is needed here to prevent a possible timeout after calling init_process_group
+ # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
+ comm.synchronize()
+
+ main_func(*args)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/train_loop.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/train_loop.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4a86b52a5604f2b5799abac299ca4726345b7a6
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/engine/train_loop.py
@@ -0,0 +1,417 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import logging
+import numpy as np
+import time
+import weakref
+from typing import List, Mapping, Optional
+import torch
+from torch.nn.parallel import DataParallel, DistributedDataParallel
+
+import detectron2.utils.comm as comm
+from detectron2.utils.events import EventStorage, get_event_storage
+from detectron2.utils.logger import _log_api_usage
+
+__all__ = ["HookBase", "TrainerBase", "SimpleTrainer", "AMPTrainer"]
+
+
+class HookBase:
+ """
+ Base class for hooks that can be registered with :class:`TrainerBase`.
+
+ Each hook can implement 4 methods. The way they are called is demonstrated
+ in the following snippet:
+ ::
+ hook.before_train()
+ for iter in range(start_iter, max_iter):
+ hook.before_step()
+ trainer.run_step()
+ hook.after_step()
+ iter += 1
+ hook.after_train()
+
+ Notes:
+ 1. In the hook method, users can access ``self.trainer`` to access more
+ properties about the context (e.g., model, current iteration, or config
+ if using :class:`DefaultTrainer`).
+
+ 2. A hook that does something in :meth:`before_step` can often be
+ implemented equivalently in :meth:`after_step`.
+ If the hook takes non-trivial time, it is strongly recommended to
+ implement the hook in :meth:`after_step` instead of :meth:`before_step`.
+ The convention is that :meth:`before_step` should only take negligible time.
+
+ Following this convention will allow hooks that do care about the difference
+ between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
+ function properly.
+
+ """
+
+ trainer: "TrainerBase" = None
+ """
+ A weak reference to the trainer object. Set by the trainer when the hook is registered.
+ """
+
+ def before_train(self):
+ """
+ Called before the first iteration.
+ """
+ pass
+
+ def after_train(self):
+ """
+ Called after the last iteration.
+ """
+ pass
+
+ def before_step(self):
+ """
+ Called before each iteration.
+ """
+ pass
+
+ def after_step(self):
+ """
+ Called after each iteration.
+ """
+ pass
+
+ def state_dict(self):
+ """
+ Hooks are stateless by default, but can be made checkpointable by
+ implementing `state_dict` and `load_state_dict`.
+ """
+ return {}
+
+
+class TrainerBase:
+ """
+ Base class for iterative trainer with hooks.
+
+ The only assumption we made here is: the training runs in a loop.
+ A subclass can implement what the loop is.
+ We made no assumptions about the existence of dataloader, optimizer, model, etc.
+
+ Attributes:
+ iter(int): the current iteration.
+
+ start_iter(int): The iteration to start with.
+ By convention the minimum possible value is 0.
+
+ max_iter(int): The iteration to end training.
+
+ storage(EventStorage): An EventStorage that's opened during the course of training.
+ """
+
+ def __init__(self) -> None:
+ self._hooks: List[HookBase] = []
+ self.iter: int = 0
+ self.start_iter: int = 0
+ self.max_iter: int
+ self.storage: EventStorage
+ _log_api_usage("trainer." + self.__class__.__name__)
+
+ def register_hooks(self, hooks: List[Optional[HookBase]]) -> None:
+ """
+ Register hooks to the trainer. The hooks are executed in the order
+ they are registered.
+
+ Args:
+ hooks (list[Optional[HookBase]]): list of hooks
+ """
+ hooks = [h for h in hooks if h is not None]
+ for h in hooks:
+ assert isinstance(h, HookBase)
+ # To avoid circular reference, hooks and trainer cannot own each other.
+ # This normally does not matter, but will cause memory leak if the
+ # involved objects contain __del__:
+ # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
+ h.trainer = weakref.proxy(self)
+ self._hooks.extend(hooks)
+
+ def train(self, start_iter: int, max_iter: int):
+ """
+ Args:
+ start_iter, max_iter (int): See docs above
+ """
+ logger = logging.getLogger(__name__)
+ logger.info("Starting training from iteration {}".format(start_iter))
+
+ self.iter = self.start_iter = start_iter
+ self.max_iter = max_iter
+
+ with EventStorage(start_iter) as self.storage:
+ try:
+ self.before_train()
+ for self.iter in range(start_iter, max_iter):
+ self.before_step()
+ self.run_step()
+ self.after_step()
+ # self.iter == max_iter can be used by `after_train` to
+ # tell whether the training successfully finished or failed
+ # due to exceptions.
+ self.iter += 1
+ except Exception:
+ logger.exception("Exception during training:")
+ raise
+ finally:
+ self.after_train()
+
+ def before_train(self):
+ for h in self._hooks:
+ h.before_train()
+
+ def after_train(self):
+ self.storage.iter = self.iter
+ for h in self._hooks:
+ h.after_train()
+
+ def before_step(self):
+ # Maintain the invariant that storage.iter == trainer.iter
+ # for the entire execution of each step
+ self.storage.iter = self.iter
+
+ for h in self._hooks:
+ h.before_step()
+
+ def after_step(self):
+ for h in self._hooks:
+ h.after_step()
+
+ def run_step(self):
+ raise NotImplementedError
+
+ def state_dict(self):
+ ret = {"iteration": self.iter}
+ hooks_state = {}
+ for h in self._hooks:
+ sd = h.state_dict()
+ if sd:
+ name = type(h).__qualname__
+ if name in hooks_state:
+ # TODO handle repetitive stateful hooks
+ continue
+ hooks_state[name] = sd
+ if hooks_state:
+ ret["hooks"] = hooks_state
+ return ret
+
+ def load_state_dict(self, state_dict):
+ logger = logging.getLogger(__name__)
+ self.iter = state_dict["iteration"]
+ for key, value in state_dict.get("hooks", {}).items():
+ for h in self._hooks:
+ try:
+ name = type(h).__qualname__
+ except AttributeError:
+ continue
+ if name == key:
+ h.load_state_dict(value)
+ break
+ else:
+ logger.warning(f"Cannot find the hook '{key}', its state_dict is ignored.")
+
+
+class SimpleTrainer(TrainerBase):
+ """
+ A simple trainer for the most common type of task:
+ single-cost single-optimizer single-data-source iterative optimization,
+ optionally using data-parallelism.
+ It assumes that every step, you:
+
+ 1. Compute the loss with a data from the data_loader.
+ 2. Compute the gradients with the above loss.
+ 3. Update the model with the optimizer.
+
+ All other tasks during training (checkpointing, logging, evaluation, LR schedule)
+ are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
+
+ If you want to do anything fancier than this,
+ either subclass TrainerBase and implement your own `run_step`,
+ or write your own training loop.
+ """
+
+ def __init__(self, model, data_loader, optimizer):
+ """
+ Args:
+ model: a torch Module. Takes a data from data_loader and returns a
+ dict of losses.
+ data_loader: an iterable. Contains data to be used to call model.
+ optimizer: a torch optimizer.
+ """
+ super().__init__()
+
+ """
+ We set the model to training mode in the trainer.
+ However it's valid to train a model that's in eval mode.
+ If you want your model (or a submodule of it) to behave
+ like evaluation during training, you can overwrite its train() method.
+ """
+ model.train()
+
+ self.model = model
+ self.data_loader = data_loader
+ self._data_loader_iter = iter(data_loader)
+ self.optimizer = optimizer
+
+ def run_step(self):
+ """
+ Implement the standard training logic described above.
+ """
+ assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
+ start = time.perf_counter()
+ """
+ If you want to do something with the data, you can wrap the dataloader.
+ """
+ data = next(self._data_loader_iter)
+ data_time = time.perf_counter() - start
+
+ """
+ If you want to do something with the losses, you can wrap the model.
+ """
+ loss_dict = self.model(data)
+ if isinstance(loss_dict, torch.Tensor):
+ losses = loss_dict
+ loss_dict = {"total_loss": loss_dict}
+ else:
+ losses = sum(loss_dict.values())
+
+ """
+ If you need to accumulate gradients or do something similar, you can
+ wrap the optimizer with your custom `zero_grad()` method.
+ """
+ self.optimizer.zero_grad()
+ losses.backward()
+
+ self._write_metrics(loss_dict, data_time)
+
+ """
+ If you need gradient clipping/scaling or other processing, you can
+ wrap the optimizer with your custom `step()` method. But it is
+ suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
+ """
+ self.optimizer.step()
+
+ def _write_metrics(
+ self,
+ loss_dict: Mapping[str, torch.Tensor],
+ data_time: float,
+ prefix: str = "",
+ ) -> None:
+ SimpleTrainer.write_metrics(loss_dict, data_time, prefix)
+
+ @staticmethod
+ def write_metrics(
+ loss_dict: Mapping[str, torch.Tensor],
+ data_time: float,
+ prefix: str = "",
+ ) -> None:
+ """
+ Args:
+ loss_dict (dict): dict of scalar losses
+ data_time (float): time taken by the dataloader iteration
+ prefix (str): prefix for logging keys
+ """
+ metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()}
+ metrics_dict["data_time"] = data_time
+
+ # Gather metrics among all workers for logging
+ # This assumes we do DDP-style training, which is currently the only
+ # supported method in detectron2.
+ all_metrics_dict = comm.gather(metrics_dict)
+
+ if comm.is_main_process():
+ storage = get_event_storage()
+
+ # data_time among workers can have high variance. The actual latency
+ # caused by data_time is the maximum among workers.
+ data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
+ storage.put_scalar("data_time", data_time)
+
+ # average the rest metrics
+ metrics_dict = {
+ k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
+ }
+ total_losses_reduced = sum(metrics_dict.values())
+ if not np.isfinite(total_losses_reduced):
+ raise FloatingPointError(
+ f"Loss became infinite or NaN at iteration={storage.iter}!\n"
+ f"loss_dict = {metrics_dict}"
+ )
+
+ storage.put_scalar("{}total_loss".format(prefix), total_losses_reduced)
+ if len(metrics_dict) > 1:
+ storage.put_scalars(**metrics_dict)
+
+ def state_dict(self):
+ ret = super().state_dict()
+ ret["optimizer"] = self.optimizer.state_dict()
+ return ret
+
+ def load_state_dict(self, state_dict):
+ super().load_state_dict(state_dict)
+ self.optimizer.load_state_dict(state_dict["optimizer"])
+
+
+class AMPTrainer(SimpleTrainer):
+ """
+ Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision
+ in the training loop.
+ """
+
+ def __init__(self, model, data_loader, optimizer, grad_scaler=None):
+ """
+ Args:
+ model, data_loader, optimizer: same as in :class:`SimpleTrainer`.
+ grad_scaler: torch GradScaler to automatically scale gradients.
+ """
+ unsupported = "AMPTrainer does not support single-process multi-device training!"
+ if isinstance(model, DistributedDataParallel):
+ assert not (model.device_ids and len(model.device_ids) > 1), unsupported
+ assert not isinstance(model, DataParallel), unsupported
+
+ super().__init__(model, data_loader, optimizer)
+
+ if grad_scaler is None:
+ from torch.cuda.amp import GradScaler
+
+ grad_scaler = GradScaler()
+ self.grad_scaler = grad_scaler
+
+ def run_step(self):
+ """
+ Implement the AMP training logic.
+ """
+ assert self.model.training, "[AMPTrainer] model was changed to eval mode!"
+ assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!"
+ from torch.cuda.amp import autocast
+
+ start = time.perf_counter()
+ data = next(self._data_loader_iter)
+ data_time = time.perf_counter() - start
+
+ with autocast():
+ loss_dict = self.model(data)
+ if isinstance(loss_dict, torch.Tensor):
+ losses = loss_dict
+ loss_dict = {"total_loss": loss_dict}
+ else:
+ losses = sum(loss_dict.values())
+
+ self.optimizer.zero_grad()
+ self.grad_scaler.scale(losses).backward()
+
+ self._write_metrics(loss_dict, data_time)
+
+ self.grad_scaler.step(self.optimizer)
+ self.grad_scaler.update()
+
+ def state_dict(self):
+ ret = super().state_dict()
+ ret["grad_scaler"] = self.grad_scaler.state_dict()
+ return ret
+
+ def load_state_dict(self, state_dict):
+ super().load_state_dict(state_dict)
+ self.grad_scaler.load_state_dict(state_dict["grad_scaler"])
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/__init__.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d96609e8f2261a6800fe85fcf3e1eaeaa44455c6
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/__init__.py
@@ -0,0 +1,12 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator
+from .coco_evaluation import COCOEvaluator
+from .rotated_coco_evaluation import RotatedCOCOEvaluator
+from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset
+from .lvis_evaluation import LVISEvaluator
+from .panoptic_evaluation import COCOPanopticEvaluator
+from .pascal_voc_evaluation import PascalVOCDetectionEvaluator
+from .sem_seg_evaluation import SemSegEvaluator
+from .testing import print_csv_format, verify_results
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/cityscapes_evaluation.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/cityscapes_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..3fb6c4cd5f752d639570d022cb23ce18491c370a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/cityscapes_evaluation.py
@@ -0,0 +1,194 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import glob
+import logging
+import numpy as np
+import os
+import tempfile
+from collections import OrderedDict
+import torch
+from PIL import Image
+
+from detectron2.data import MetadataCatalog
+from detectron2.utils import comm
+from detectron2.utils.file_io import PathManager
+
+from .evaluator import DatasetEvaluator
+
+
+class CityscapesEvaluator(DatasetEvaluator):
+ """
+ Base class for evaluation using cityscapes API.
+ """
+
+ def __init__(self, dataset_name):
+ """
+ Args:
+ dataset_name (str): the name of the dataset.
+ It must have the following metadata associated with it:
+ "thing_classes", "gt_dir".
+ """
+ self._metadata = MetadataCatalog.get(dataset_name)
+ self._cpu_device = torch.device("cpu")
+ self._logger = logging.getLogger(__name__)
+
+ def reset(self):
+ self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_")
+ self._temp_dir = self._working_dir.name
+ # All workers will write to the same results directory
+ # TODO this does not work in distributed training
+ self._temp_dir = comm.all_gather(self._temp_dir)[0]
+ if self._temp_dir != self._working_dir.name:
+ self._working_dir.cleanup()
+ self._logger.info(
+ "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir)
+ )
+
+
+class CityscapesInstanceEvaluator(CityscapesEvaluator):
+ """
+ Evaluate instance segmentation results on cityscapes dataset using cityscapes API.
+
+ Note:
+ * It does not work in multi-machine distributed training.
+ * It contains a synchronization, therefore has to be used on all ranks.
+ * Only the main process runs evaluation.
+ """
+
+ def process(self, inputs, outputs):
+ from cityscapesscripts.helpers.labels import name2label
+
+ for input, output in zip(inputs, outputs):
+ file_name = input["file_name"]
+ basename = os.path.splitext(os.path.basename(file_name))[0]
+ pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt")
+
+ if "instances" in output:
+ output = output["instances"].to(self._cpu_device)
+ num_instances = len(output)
+ with open(pred_txt, "w") as fout:
+ for i in range(num_instances):
+ pred_class = output.pred_classes[i]
+ classes = self._metadata.thing_classes[pred_class]
+ class_id = name2label[classes].id
+ score = output.scores[i]
+ mask = output.pred_masks[i].numpy().astype("uint8")
+ png_filename = os.path.join(
+ self._temp_dir, basename + "_{}_{}.png".format(i, classes)
+ )
+
+ Image.fromarray(mask * 255).save(png_filename)
+ fout.write(
+ "{} {} {}\n".format(os.path.basename(png_filename), class_id, score)
+ )
+ else:
+ # Cityscapes requires a prediction file for every ground truth image.
+ with open(pred_txt, "w") as fout:
+ pass
+
+ def evaluate(self):
+ """
+ Returns:
+ dict: has a key "segm", whose value is a dict of "AP" and "AP50".
+ """
+ comm.synchronize()
+ if comm.get_rank() > 0:
+ return
+ import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval
+
+ self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
+
+ # set some global states in cityscapes evaluation API, before evaluating
+ cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
+ cityscapes_eval.args.predictionWalk = None
+ cityscapes_eval.args.JSONOutput = False
+ cityscapes_eval.args.colorized = False
+ cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json")
+
+ # These lines are adopted from
+ # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
+ gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
+ groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png"))
+ assert len(
+ groundTruthImgList
+ ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
+ cityscapes_eval.args.groundTruthSearch
+ )
+ predictionImgList = []
+ for gt in groundTruthImgList:
+ predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args))
+ results = cityscapes_eval.evaluateImgLists(
+ predictionImgList, groundTruthImgList, cityscapes_eval.args
+ )["averages"]
+
+ ret = OrderedDict()
+ ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100}
+ self._working_dir.cleanup()
+ return ret
+
+
+class CityscapesSemSegEvaluator(CityscapesEvaluator):
+ """
+ Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.
+
+ Note:
+ * It does not work in multi-machine distributed training.
+ * It contains a synchronization, therefore has to be used on all ranks.
+ * Only the main process runs evaluation.
+ """
+
+ def process(self, inputs, outputs):
+ from cityscapesscripts.helpers.labels import trainId2label
+
+ for input, output in zip(inputs, outputs):
+ file_name = input["file_name"]
+ basename = os.path.splitext(os.path.basename(file_name))[0]
+ pred_filename = os.path.join(self._temp_dir, basename + "_pred.png")
+
+ output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy()
+ pred = 255 * np.ones(output.shape, dtype=np.uint8)
+ for train_id, label in trainId2label.items():
+ if label.ignoreInEval:
+ continue
+ pred[output == train_id] = label.id
+ Image.fromarray(pred).save(pred_filename)
+
+ def evaluate(self):
+ comm.synchronize()
+ if comm.get_rank() > 0:
+ return
+ # Load the Cityscapes eval script *after* setting the required env var,
+ # since the script reads CITYSCAPES_DATASET into global variables at load time.
+ import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval
+
+ self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
+
+ # set some global states in cityscapes evaluation API, before evaluating
+ cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
+ cityscapes_eval.args.predictionWalk = None
+ cityscapes_eval.args.JSONOutput = False
+ cityscapes_eval.args.colorized = False
+
+ # These lines are adopted from
+ # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa
+ gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
+ groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png"))
+ assert len(
+ groundTruthImgList
+ ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
+ cityscapes_eval.args.groundTruthSearch
+ )
+ predictionImgList = []
+ for gt in groundTruthImgList:
+ predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt))
+ results = cityscapes_eval.evaluateImgLists(
+ predictionImgList, groundTruthImgList, cityscapes_eval.args
+ )
+ ret = OrderedDict()
+ ret["sem_seg"] = {
+ "IoU": 100.0 * results["averageScoreClasses"],
+ "iIoU": 100.0 * results["averageScoreInstClasses"],
+ "IoU_sup": 100.0 * results["averageScoreCategories"],
+ "iIoU_sup": 100.0 * results["averageScoreInstCategories"],
+ }
+ self._working_dir.cleanup()
+ return ret
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/coco_evaluation.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/coco_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..aad7f5a6e79a9047e7eea623ecc761ea9655b8d6
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/coco_evaluation.py
@@ -0,0 +1,710 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import contextlib
+import copy
+import io
+import itertools
+import json
+import logging
+import numpy as np
+import os
+import pickle
+from collections import OrderedDict
+import pycocotools.mask as mask_util
+import torch
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
+from tabulate import tabulate
+
+import detectron2.utils.comm as comm
+from detectron2.config import CfgNode
+from detectron2.data import MetadataCatalog
+from detectron2.data.datasets.coco import convert_to_coco_json
+from detectron2.evaluation.fast_eval_api import COCOeval_opt
+from detectron2.structures import Boxes, BoxMode, pairwise_iou
+from detectron2.utils.file_io import PathManager
+from detectron2.utils.logger import create_small_table
+
+from .evaluator import DatasetEvaluator
+
+
+class COCOEvaluator(DatasetEvaluator):
+ """
+ Evaluate AR for object proposals, AP for instance detection/segmentation, AP
+ for keypoint detection outputs using COCO's metrics.
+ See http://cocodataset.org/#detection-eval and
+ http://cocodataset.org/#keypoints-eval to understand its metrics.
+ The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
+ the metric cannot be computed (e.g. due to no predictions made).
+
+ In addition to COCO, this evaluator is able to support any bounding box detection,
+ instance segmentation, or keypoint detection dataset.
+ """
+
+ def __init__(
+ self,
+ dataset_name,
+ tasks=None,
+ distributed=True,
+ output_dir=None,
+ *,
+ max_dets_per_image=None,
+ use_fast_impl=True,
+ kpt_oks_sigmas=(),
+ ):
+ """
+ Args:
+ dataset_name (str): name of the dataset to be evaluated.
+ It must have either the following corresponding metadata:
+
+ "json_file": the path to the COCO format annotation
+
+ Or it must be in detectron2's standard dataset format
+ so it can be converted to COCO format automatically.
+ tasks (tuple[str]): tasks that can be evaluated under the given
+ configuration. A task is one of "bbox", "segm", "keypoints".
+ By default, will infer this automatically from predictions.
+ distributed (True): if True, will collect results from all ranks and run evaluation
+ in the main process.
+ Otherwise, will only evaluate the results in the current process.
+ output_dir (str): optional, an output directory to dump all
+ results predicted on the dataset. The dump contains two files:
+
+ 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
+ contains all the results in the format they are produced by the model.
+ 2. "coco_instances_results.json" a json file in COCO's result format.
+ max_dets_per_image (int): limit on the maximum number of detections per image.
+ By default in COCO, this limit is to 100, but this can be customized
+ to be greater, as is needed in evaluation metrics AP fixed and AP pool
+ (see https://arxiv.org/pdf/2102.01066.pdf)
+ This doesn't affect keypoint evaluation.
+ use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
+ Although the results should be very close to the official implementation in COCO
+ API, it is still recommended to compute results with the official API for use in
+ papers. The faster implementation also uses more RAM.
+ kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS.
+ See http://cocodataset.org/#keypoints-eval
+ When empty, it will use the defaults in COCO.
+ Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
+ """
+ self._logger = logging.getLogger(__name__)
+ self._distributed = distributed
+ self._output_dir = output_dir
+ self._use_fast_impl = use_fast_impl
+
+ # COCOeval requires the limit on the number of detections per image (maxDets) to be a list
+ # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the
+ # 3rd element (100) is used as the limit on the number of detections per image when
+ # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval,
+ # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults.
+ if max_dets_per_image is None:
+ max_dets_per_image = [1, 10, 100]
+ else:
+ max_dets_per_image = [1, 10, max_dets_per_image]
+ self._max_dets_per_image = max_dets_per_image
+
+ if tasks is not None and isinstance(tasks, CfgNode):
+ kpt_oks_sigmas = (
+ tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas
+ )
+ self._logger.warn(
+ "COCO Evaluator instantiated using config, this is deprecated behavior."
+ " Please pass in explicit arguments instead."
+ )
+ self._tasks = None # Infering it from predictions should be better
+ else:
+ self._tasks = tasks
+
+ self._cpu_device = torch.device("cpu")
+
+ self._metadata = MetadataCatalog.get(dataset_name)
+ if not hasattr(self._metadata, "json_file"):
+ if output_dir is None:
+ raise ValueError(
+ "output_dir must be provided to COCOEvaluator "
+ "for datasets not in COCO format."
+ )
+ self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...")
+
+ cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json")
+ self._metadata.json_file = cache_path
+ convert_to_coco_json(dataset_name, cache_path)
+
+ json_file = PathManager.get_local_path(self._metadata.json_file)
+ with contextlib.redirect_stdout(io.StringIO()):
+ self._coco_api = COCO(json_file)
+
+ # Test set json files do not contain annotations (evaluation must be
+ # performed using the COCO evaluation server).
+ self._do_evaluation = "annotations" in self._coco_api.dataset
+ if self._do_evaluation:
+ self._kpt_oks_sigmas = kpt_oks_sigmas
+
+ def reset(self):
+ self._predictions = []
+
+ def process(self, inputs, outputs):
+ """
+ Args:
+ inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
+ It is a list of dict. Each dict corresponds to an image and
+ contains keys like "height", "width", "file_name", "image_id".
+ outputs: the outputs of a COCO model. It is a list of dicts with key
+ "instances" that contains :class:`Instances`.
+ """
+ for input, output in zip(inputs, outputs):
+ prediction = {"image_id": input["image_id"]}
+
+ if "instances" in output:
+ instances = output["instances"].to(self._cpu_device)
+ prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
+ if "proposals" in output:
+ prediction["proposals"] = output["proposals"].to(self._cpu_device)
+ if len(prediction) > 1:
+ self._predictions.append(prediction)
+
+ def evaluate(self, img_ids=None):
+ """
+ Args:
+ img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
+ """
+ if self._distributed:
+ comm.synchronize()
+ predictions = comm.gather(self._predictions, dst=0)
+ predictions = list(itertools.chain(*predictions))
+
+ if not comm.is_main_process():
+ return {}
+ else:
+ predictions = self._predictions
+
+ if len(predictions) == 0:
+ self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
+ return {}
+
+ if self._output_dir:
+ PathManager.mkdirs(self._output_dir)
+ file_path = os.path.join(self._output_dir, "instances_predictions.pth")
+ with PathManager.open(file_path, "wb") as f:
+ torch.save(predictions, f)
+
+ self._results = OrderedDict()
+ if "proposals" in predictions[0]:
+ self._eval_box_proposals(predictions)
+ if "instances" in predictions[0]:
+ self._eval_predictions(predictions, img_ids=img_ids)
+ # Copy so the caller can do whatever with results
+ return copy.deepcopy(self._results)
+
+ def _tasks_from_predictions(self, predictions):
+ """
+ Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
+ """
+ tasks = {"bbox"}
+ for pred in predictions:
+ if "segmentation" in pred:
+ tasks.add("segm")
+ if "keypoints" in pred:
+ tasks.add("keypoints")
+ return sorted(tasks)
+
+ def _eval_predictions(self, predictions, img_ids=None):
+ """
+ Evaluate predictions. Fill self._results with the metrics of the tasks.
+ """
+ self._logger.info("Preparing results for COCO format ...")
+ coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
+ tasks = self._tasks or self._tasks_from_predictions(coco_results)
+
+ # unmap the category ids for COCO
+ if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
+ dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
+ all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
+ num_classes = len(all_contiguous_ids)
+ assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
+
+ reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
+ for result in coco_results:
+ category_id = result["category_id"]
+ assert category_id < num_classes, (
+ f"A prediction has class={category_id}, "
+ f"but the dataset only has {num_classes} classes and "
+ f"predicted class id should be in [0, {num_classes - 1}]."
+ )
+ result["category_id"] = reverse_id_mapping[category_id]
+
+ if self._output_dir:
+ file_path = os.path.join(self._output_dir, "coco_instances_results.json")
+ self._logger.info("Saving results to {}".format(file_path))
+ with PathManager.open(file_path, "w") as f:
+ f.write(json.dumps(coco_results))
+ f.flush()
+
+ if not self._do_evaluation:
+ self._logger.info("Annotations are not available for evaluation.")
+ return
+
+ self._logger.info(
+ "Evaluating predictions with {} COCO API...".format(
+ "unofficial" if self._use_fast_impl else "official"
+ )
+ )
+ for task in sorted(tasks):
+ assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
+ coco_eval = (
+ _evaluate_predictions_on_coco(
+ self._coco_api,
+ coco_results,
+ task,
+ kpt_oks_sigmas=self._kpt_oks_sigmas,
+ use_fast_impl=self._use_fast_impl,
+ img_ids=img_ids,
+ max_dets_per_image=self._max_dets_per_image,
+ )
+ if len(coco_results) > 0
+ else None # cocoapi does not handle empty results very well
+ )
+
+ res = self._derive_coco_results(
+ coco_eval, task, class_names=self._metadata.get("thing_classes")
+ )
+ self._results[task] = res
+
+ def _eval_box_proposals(self, predictions):
+ """
+ Evaluate the box proposals in predictions.
+ Fill self._results with the metrics for "box_proposals" task.
+ """
+ if self._output_dir:
+ # Saving generated box proposals to file.
+ # Predicted box_proposals are in XYXY_ABS mode.
+ bbox_mode = BoxMode.XYXY_ABS.value
+ ids, boxes, objectness_logits = [], [], []
+ for prediction in predictions:
+ ids.append(prediction["image_id"])
+ boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
+ objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
+
+ proposal_data = {
+ "boxes": boxes,
+ "objectness_logits": objectness_logits,
+ "ids": ids,
+ "bbox_mode": bbox_mode,
+ }
+ with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
+ pickle.dump(proposal_data, f)
+
+ if not self._do_evaluation:
+ self._logger.info("Annotations are not available for evaluation.")
+ return
+
+ self._logger.info("Evaluating bbox proposals ...")
+ res = {}
+ areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
+ for limit in [100, 1000]:
+ for area, suffix in areas.items():
+ stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit)
+ key = "AR{}@{:d}".format(suffix, limit)
+ res[key] = float(stats["ar"].item() * 100)
+ self._logger.info("Proposal metrics: \n" + create_small_table(res))
+ self._results["box_proposals"] = res
+
+ def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
+ """
+ Derive the desired score numbers from summarized COCOeval.
+
+ Args:
+ coco_eval (None or COCOEval): None represents no predictions from model.
+ iou_type (str):
+ class_names (None or list[str]): if provided, will use it to predict
+ per-category AP.
+
+ Returns:
+ a dict of {metric name: score}
+ """
+
+ metrics = {
+ "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
+ "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
+ "keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
+ }[iou_type]
+
+ if coco_eval is None:
+ self._logger.warn("No predictions from the model!")
+ return {metric: float("nan") for metric in metrics}
+
+ # the standard metrics
+ results = {
+ metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
+ for idx, metric in enumerate(metrics)
+ }
+ self._logger.info(
+ "Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
+ )
+ if not np.isfinite(sum(results.values())):
+ self._logger.info("Some metrics cannot be computed and is shown as NaN.")
+
+ if class_names is None or len(class_names) <= 1:
+ return results
+ # Compute per-category AP
+ # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
+ precisions = coco_eval.eval["precision"]
+ # precision has dims (iou, recall, cls, area range, max dets)
+ assert len(class_names) == precisions.shape[2]
+
+ results_per_category = []
+ for idx, name in enumerate(class_names):
+ # area range index 0: all area ranges
+ # max dets index -1: typically 100 per image
+ precision = precisions[:, :, idx, 0, -1]
+ precision = precision[precision > -1]
+ ap = np.mean(precision) if precision.size else float("nan")
+ results_per_category.append(("{}".format(name), float(ap * 100)))
+
+ # tabulate it
+ N_COLS = min(6, len(results_per_category) * 2)
+ results_flatten = list(itertools.chain(*results_per_category))
+ results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
+ table = tabulate(
+ results_2d,
+ tablefmt="pipe",
+ floatfmt=".3f",
+ headers=["category", "AP"] * (N_COLS // 2),
+ numalign="left",
+ )
+ self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
+
+ results.update({"AP-" + name: ap for name, ap in results_per_category})
+ return results
+
+
+def instances_to_coco_json(instances, img_id):
+ """
+ Dump an "Instances" object to a COCO-format json that's used for evaluation.
+
+ Args:
+ instances (Instances):
+ img_id (int): the image id
+
+ Returns:
+ list[dict]: list of json annotations in COCO format.
+ """
+ num_instance = len(instances)
+ if num_instance == 0:
+ return []
+
+ boxes = instances.pred_boxes.tensor.numpy()
+ boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
+ boxes = boxes.tolist()
+ scores = instances.scores.tolist()
+ classes = instances.pred_classes.tolist()
+
+ has_mask = instances.has("pred_masks")
+ if has_mask:
+ # use RLE to encode the masks, because they are too large and takes memory
+ # since this evaluator stores outputs of the entire dataset
+ rles = [
+ mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
+ for mask in instances.pred_masks
+ ]
+ for rle in rles:
+ # "counts" is an array encoded by mask_util as a byte-stream. Python3's
+ # json writer which always produces strings cannot serialize a bytestream
+ # unless you decode it. Thankfully, utf-8 works out (which is also what
+ # the pycocotools/_mask.pyx does).
+ rle["counts"] = rle["counts"].decode("utf-8")
+
+ has_keypoints = instances.has("pred_keypoints")
+ if has_keypoints:
+ keypoints = instances.pred_keypoints
+
+ results = []
+ for k in range(num_instance):
+ result = {
+ "image_id": img_id,
+ "category_id": classes[k],
+ "bbox": boxes[k],
+ "score": scores[k],
+ }
+ if has_mask:
+ result["segmentation"] = rles[k]
+ if has_keypoints:
+ # In COCO annotations,
+ # keypoints coordinates are pixel indices.
+ # However our predictions are floating point coordinates.
+ # Therefore we subtract 0.5 to be consistent with the annotation format.
+ # This is the inverse of data loading logic in `datasets/coco.py`.
+ keypoints[k][:, :2] -= 0.5
+ result["keypoints"] = keypoints[k].flatten().tolist()
+ results.append(result)
+ return results
+
+
+# inspired from Detectron:
+# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
+def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None):
+ """
+ Evaluate detection proposal recall metrics. This function is a much
+ faster alternative to the official COCO API recall evaluation code. However,
+ it produces slightly different results.
+ """
+ # Record max overlap value for each gt box
+ # Return vector of overlap values
+ areas = {
+ "all": 0,
+ "small": 1,
+ "medium": 2,
+ "large": 3,
+ "96-128": 4,
+ "128-256": 5,
+ "256-512": 6,
+ "512-inf": 7,
+ }
+ area_ranges = [
+ [0 ** 2, 1e5 ** 2], # all
+ [0 ** 2, 32 ** 2], # small
+ [32 ** 2, 96 ** 2], # medium
+ [96 ** 2, 1e5 ** 2], # large
+ [96 ** 2, 128 ** 2], # 96-128
+ [128 ** 2, 256 ** 2], # 128-256
+ [256 ** 2, 512 ** 2], # 256-512
+ [512 ** 2, 1e5 ** 2],
+ ] # 512-inf
+ assert area in areas, "Unknown area range: {}".format(area)
+ area_range = area_ranges[areas[area]]
+ gt_overlaps = []
+ num_pos = 0
+
+ for prediction_dict in dataset_predictions:
+ predictions = prediction_dict["proposals"]
+
+ # sort predictions in descending order
+ # TODO maybe remove this and make it explicit in the documentation
+ inds = predictions.objectness_logits.sort(descending=True)[1]
+ predictions = predictions[inds]
+
+ ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"])
+ anno = coco_api.loadAnns(ann_ids)
+ gt_boxes = [
+ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
+ for obj in anno
+ if obj["iscrowd"] == 0
+ ]
+ gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
+ gt_boxes = Boxes(gt_boxes)
+ gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
+
+ if len(gt_boxes) == 0 or len(predictions) == 0:
+ continue
+
+ valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
+ gt_boxes = gt_boxes[valid_gt_inds]
+
+ num_pos += len(gt_boxes)
+
+ if len(gt_boxes) == 0:
+ continue
+
+ if limit is not None and len(predictions) > limit:
+ predictions = predictions[:limit]
+
+ overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
+
+ _gt_overlaps = torch.zeros(len(gt_boxes))
+ for j in range(min(len(predictions), len(gt_boxes))):
+ # find which proposal box maximally covers each gt box
+ # and get the iou amount of coverage for each gt box
+ max_overlaps, argmax_overlaps = overlaps.max(dim=0)
+
+ # find which gt box is 'best' covered (i.e. 'best' = most iou)
+ gt_ovr, gt_ind = max_overlaps.max(dim=0)
+ assert gt_ovr >= 0
+ # find the proposal box that covers the best covered gt box
+ box_ind = argmax_overlaps[gt_ind]
+ # record the iou coverage of this gt box
+ _gt_overlaps[j] = overlaps[box_ind, gt_ind]
+ assert _gt_overlaps[j] == gt_ovr
+ # mark the proposal box and the gt box as used
+ overlaps[box_ind, :] = -1
+ overlaps[:, gt_ind] = -1
+
+ # append recorded iou coverage level
+ gt_overlaps.append(_gt_overlaps)
+ gt_overlaps = (
+ torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
+ )
+ gt_overlaps, _ = torch.sort(gt_overlaps)
+
+ if thresholds is None:
+ step = 0.05
+ thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
+ recalls = torch.zeros_like(thresholds)
+ # compute recall for each iou threshold
+ for i, t in enumerate(thresholds):
+ recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
+ # ar = 2 * np.trapz(recalls, thresholds)
+ ar = recalls.mean()
+ return {
+ "ar": ar,
+ "recalls": recalls,
+ "thresholds": thresholds,
+ "gt_overlaps": gt_overlaps,
+ "num_pos": num_pos,
+ }
+
+
+def _evaluate_predictions_on_coco(
+ coco_gt,
+ coco_results,
+ iou_type,
+ kpt_oks_sigmas=None,
+ use_fast_impl=True,
+ img_ids=None,
+ max_dets_per_image=None,
+):
+ """
+ Evaluate the coco results using COCOEval API.
+ """
+ assert len(coco_results) > 0
+
+ if iou_type == "segm":
+ coco_results = copy.deepcopy(coco_results)
+ # When evaluating mask AP, if the results contain bbox, cocoapi will
+ # use the box area as the area of the instance, instead of the mask area.
+ # This leads to a different definition of small/medium/large.
+ # We remove the bbox field to let mask AP use mask area.
+ for c in coco_results:
+ c.pop("bbox", None)
+
+ coco_dt = coco_gt.loadRes(coco_results)
+ coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type)
+ # For COCO, the default max_dets_per_image is [1, 10, 100].
+ if max_dets_per_image is None:
+ max_dets_per_image = [1, 10, 100] # Default from COCOEval
+ else:
+ assert (
+ len(max_dets_per_image) >= 3
+ ), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3"
+ # In the case that user supplies a custom input for max_dets_per_image,
+ # apply COCOevalMaxDets to evaluate AP with the custom input.
+ if max_dets_per_image[2] != 100:
+ coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type)
+ if iou_type != "keypoints":
+ coco_eval.params.maxDets = max_dets_per_image
+
+ if img_ids is not None:
+ coco_eval.params.imgIds = img_ids
+
+ if iou_type == "keypoints":
+ # Use the COCO default keypoint OKS sigmas unless overrides are specified
+ if kpt_oks_sigmas:
+ assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!"
+ coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)
+ # COCOAPI requires every detection and every gt to have keypoints, so
+ # we just take the first entry from both
+ num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3
+ num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3
+ num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas)
+ assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, (
+ f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. "
+ f"Ground truth contains {num_keypoints_gt} keypoints. "
+ f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. "
+ "They have to agree with each other. For meaning of OKS, please refer to "
+ "http://cocodataset.org/#keypoints-eval."
+ )
+
+ coco_eval.evaluate()
+ coco_eval.accumulate()
+ coco_eval.summarize()
+
+ return coco_eval
+
+
+class COCOevalMaxDets(COCOeval):
+ """
+ Modified version of COCOeval for evaluating AP with a custom
+ maxDets (by default for COCO, maxDets is 100)
+ """
+
+ def summarize(self):
+ """
+ Compute and display summary metrics for evaluation results given
+ a custom value for max_dets_per_image
+ """
+
+ def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100):
+ p = self.params
+ iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}"
+ titleStr = "Average Precision" if ap == 1 else "Average Recall"
+ typeStr = "(AP)" if ap == 1 else "(AR)"
+ iouStr = (
+ "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
+ if iouThr is None
+ else "{:0.2f}".format(iouThr)
+ )
+
+ aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
+ mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
+ if ap == 1:
+ # dimension of precision: [TxRxKxAxM]
+ s = self.eval["precision"]
+ # IoU
+ if iouThr is not None:
+ t = np.where(iouThr == p.iouThrs)[0]
+ s = s[t]
+ s = s[:, :, :, aind, mind]
+ else:
+ # dimension of recall: [TxKxAxM]
+ s = self.eval["recall"]
+ if iouThr is not None:
+ t = np.where(iouThr == p.iouThrs)[0]
+ s = s[t]
+ s = s[:, :, aind, mind]
+ if len(s[s > -1]) == 0:
+ mean_s = -1
+ else:
+ mean_s = np.mean(s[s > -1])
+ print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
+ return mean_s
+
+ def _summarizeDets():
+ stats = np.zeros((12,))
+ # Evaluate AP using the custom limit on maximum detections per image
+ stats[0] = _summarize(1, maxDets=self.params.maxDets[2])
+ stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2])
+ stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2])
+ stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2])
+ stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2])
+ stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2])
+ stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
+ stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
+ stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
+ stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2])
+ stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2])
+ stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2])
+ return stats
+
+ def _summarizeKps():
+ stats = np.zeros((10,))
+ stats[0] = _summarize(1, maxDets=20)
+ stats[1] = _summarize(1, maxDets=20, iouThr=0.5)
+ stats[2] = _summarize(1, maxDets=20, iouThr=0.75)
+ stats[3] = _summarize(1, maxDets=20, areaRng="medium")
+ stats[4] = _summarize(1, maxDets=20, areaRng="large")
+ stats[5] = _summarize(0, maxDets=20)
+ stats[6] = _summarize(0, maxDets=20, iouThr=0.5)
+ stats[7] = _summarize(0, maxDets=20, iouThr=0.75)
+ stats[8] = _summarize(0, maxDets=20, areaRng="medium")
+ stats[9] = _summarize(0, maxDets=20, areaRng="large")
+ return stats
+
+ if not self.eval:
+ raise Exception("Please run accumulate() first")
+ iouType = self.params.iouType
+ if iouType == "segm" or iouType == "bbox":
+ summarize = _summarizeDets
+ elif iouType == "keypoints":
+ summarize = _summarizeKps
+ self.stats = summarize()
+
+ def __str__(self):
+ self.summarize()
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/evaluator.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..baf996002b2fddc8c1952408d450b5bf69394f0a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/evaluator.py
@@ -0,0 +1,224 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import datetime
+import logging
+import time
+from collections import OrderedDict, abc
+from contextlib import ExitStack, contextmanager
+from typing import List, Union
+import torch
+from torch import nn
+
+from detectron2.utils.comm import get_world_size, is_main_process
+from detectron2.utils.logger import log_every_n_seconds
+
+
+class DatasetEvaluator:
+ """
+ Base class for a dataset evaluator.
+
+ The function :func:`inference_on_dataset` runs the model over
+ all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.
+
+ This class will accumulate information of the inputs/outputs (by :meth:`process`),
+ and produce evaluation results in the end (by :meth:`evaluate`).
+ """
+
+ def reset(self):
+ """
+ Preparation for a new round of evaluation.
+ Should be called before starting a round of evaluation.
+ """
+ pass
+
+ def process(self, inputs, outputs):
+ """
+ Process the pair of inputs and outputs.
+ If they contain batches, the pairs can be consumed one-by-one using `zip`:
+
+ .. code-block:: python
+
+ for input_, output in zip(inputs, outputs):
+ # do evaluation on single input/output pair
+ ...
+
+ Args:
+ inputs (list): the inputs that's used to call the model.
+ outputs (list): the return value of `model(inputs)`
+ """
+ pass
+
+ def evaluate(self):
+ """
+ Evaluate/summarize the performance, after processing all input/output pairs.
+
+ Returns:
+ dict:
+ A new evaluator class can return a dict of arbitrary format
+ as long as the user can process the results.
+ In our train_net.py, we expect the following format:
+
+ * key: the name of the task (e.g., bbox)
+ * value: a dict of {metric name: score}, e.g.: {"AP50": 80}
+ """
+ pass
+
+
+class DatasetEvaluators(DatasetEvaluator):
+ """
+ Wrapper class to combine multiple :class:`DatasetEvaluator` instances.
+
+ This class dispatches every evaluation call to
+ all of its :class:`DatasetEvaluator`.
+ """
+
+ def __init__(self, evaluators):
+ """
+ Args:
+ evaluators (list): the evaluators to combine.
+ """
+ super().__init__()
+ self._evaluators = evaluators
+
+ def reset(self):
+ for evaluator in self._evaluators:
+ evaluator.reset()
+
+ def process(self, inputs, outputs):
+ for evaluator in self._evaluators:
+ evaluator.process(inputs, outputs)
+
+ def evaluate(self):
+ results = OrderedDict()
+ for evaluator in self._evaluators:
+ result = evaluator.evaluate()
+ if is_main_process() and result is not None:
+ for k, v in result.items():
+ assert (
+ k not in results
+ ), "Different evaluators produce results with the same key {}".format(k)
+ results[k] = v
+ return results
+
+
+def inference_on_dataset(
+ model, data_loader, evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None]
+):
+ """
+ Run model on the data_loader and evaluate the metrics with evaluator.
+ Also benchmark the inference speed of `model.__call__` accurately.
+ The model will be used in eval mode.
+
+ Args:
+ model (callable): a callable which takes an object from
+ `data_loader` and returns some outputs.
+
+ If it's an nn.Module, it will be temporarily set to `eval` mode.
+ If you wish to evaluate a model in `training` mode instead, you can
+ wrap the given model and override its behavior of `.eval()` and `.train()`.
+ data_loader: an iterable object with a length.
+ The elements it generates will be the inputs to the model.
+ evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark,
+ but don't want to do any evaluation.
+
+ Returns:
+ The return value of `evaluator.evaluate()`
+ """
+ num_devices = get_world_size()
+ logger = logging.getLogger(__name__)
+ logger.info("Start inference on {} batches".format(len(data_loader)))
+
+ total = len(data_loader) # inference data loader must have a fixed length
+ if evaluator is None:
+ # create a no-op evaluator
+ evaluator = DatasetEvaluators([])
+ if isinstance(evaluator, abc.MutableSequence):
+ evaluator = DatasetEvaluators(evaluator)
+ evaluator.reset()
+
+ num_warmup = min(5, total - 1)
+ start_time = time.perf_counter()
+ total_data_time = 0
+ total_compute_time = 0
+ total_eval_time = 0
+ with ExitStack() as stack:
+ if isinstance(model, nn.Module):
+ stack.enter_context(inference_context(model))
+ stack.enter_context(torch.no_grad())
+
+ start_data_time = time.perf_counter()
+ for idx, inputs in enumerate(data_loader):
+ total_data_time += time.perf_counter() - start_data_time
+ if idx == num_warmup:
+ start_time = time.perf_counter()
+ total_data_time = 0
+ total_compute_time = 0
+ total_eval_time = 0
+
+ start_compute_time = time.perf_counter()
+ outputs = model(inputs)
+ if torch.cuda.is_available():
+ torch.cuda.synchronize()
+ total_compute_time += time.perf_counter() - start_compute_time
+
+ start_eval_time = time.perf_counter()
+ evaluator.process(inputs, outputs)
+ total_eval_time += time.perf_counter() - start_eval_time
+
+ iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
+ data_seconds_per_iter = total_data_time / iters_after_start
+ compute_seconds_per_iter = total_compute_time / iters_after_start
+ eval_seconds_per_iter = total_eval_time / iters_after_start
+ total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start
+ if idx >= num_warmup * 2 or compute_seconds_per_iter > 5:
+ eta = datetime.timedelta(seconds=int(total_seconds_per_iter * (total - idx - 1)))
+ log_every_n_seconds(
+ logging.INFO,
+ (
+ f"Inference done {idx + 1}/{total}. "
+ f"Dataloading: {data_seconds_per_iter:.4f} s/iter. "
+ f"Inference: {compute_seconds_per_iter:.4f} s/iter. "
+ f"Eval: {eval_seconds_per_iter:.4f} s/iter. "
+ f"Total: {total_seconds_per_iter:.4f} s/iter. "
+ f"ETA={eta}"
+ ),
+ n=5,
+ )
+ start_data_time = time.perf_counter()
+
+ # Measure the time only for this worker (before the synchronization barrier)
+ total_time = time.perf_counter() - start_time
+ total_time_str = str(datetime.timedelta(seconds=total_time))
+ # NOTE this format is parsed by grep
+ logger.info(
+ "Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format(
+ total_time_str, total_time / (total - num_warmup), num_devices
+ )
+ )
+ total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
+ logger.info(
+ "Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format(
+ total_compute_time_str, total_compute_time / (total - num_warmup), num_devices
+ )
+ )
+
+ results = evaluator.evaluate()
+ # An evaluator may return None when not in main process.
+ # Replace it by an empty dict instead to make it easier for downstream code to handle
+ if results is None:
+ results = {}
+ return results
+
+
+@contextmanager
+def inference_context(model):
+ """
+ A context where the model is temporarily changed to eval mode,
+ and restored to previous mode afterwards.
+
+ Args:
+ model: a torch Module
+ """
+ training_mode = model.training
+ model.eval()
+ yield
+ model.train(training_mode)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/fast_eval_api.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/fast_eval_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..2eb202bd5efa3ec3d366027b1debffc269ae8b17
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/fast_eval_api.py
@@ -0,0 +1,121 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import logging
+import numpy as np
+import time
+from pycocotools.cocoeval import COCOeval
+
+from detectron2 import _C
+
+logger = logging.getLogger(__name__)
+
+
+class COCOeval_opt(COCOeval):
+ """
+ This is a slightly modified version of the original COCO API, where the functions evaluateImg()
+ and accumulate() are implemented in C++ to speedup evaluation
+ """
+
+ def evaluate(self):
+ """
+ Run per image evaluation on given images and store results in self.evalImgs_cpp, a
+ datastructure that isn't readable from Python but is used by a c++ implementation of
+ accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure
+ self.evalImgs because this datastructure is a computational bottleneck.
+ :return: None
+ """
+ tic = time.time()
+
+ p = self.params
+ # add backward compatibility if useSegm is specified in params
+ if p.useSegm is not None:
+ p.iouType = "segm" if p.useSegm == 1 else "bbox"
+ logger.info("Evaluate annotation type *{}*".format(p.iouType))
+ p.imgIds = list(np.unique(p.imgIds))
+ if p.useCats:
+ p.catIds = list(np.unique(p.catIds))
+ p.maxDets = sorted(p.maxDets)
+ self.params = p
+
+ self._prepare() # bottleneck
+
+ # loop through images, area range, max detection number
+ catIds = p.catIds if p.useCats else [-1]
+
+ if p.iouType == "segm" or p.iouType == "bbox":
+ computeIoU = self.computeIoU
+ elif p.iouType == "keypoints":
+ computeIoU = self.computeOks
+ self.ious = {
+ (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds
+ } # bottleneck
+
+ maxDet = p.maxDets[-1]
+
+ # <<<< Beginning of code differences with original COCO API
+ def convert_instances_to_cpp(instances, is_det=False):
+ # Convert annotations for a list of instances in an image to a format that's fast
+ # to access in C++
+ instances_cpp = []
+ for instance in instances:
+ instance_cpp = _C.InstanceAnnotation(
+ int(instance["id"]),
+ instance["score"] if is_det else instance.get("score", 0.0),
+ instance["area"],
+ bool(instance.get("iscrowd", 0)),
+ bool(instance.get("ignore", 0)),
+ )
+ instances_cpp.append(instance_cpp)
+ return instances_cpp
+
+ # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++
+ ground_truth_instances = [
+ [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]
+ for imgId in p.imgIds
+ ]
+ detected_instances = [
+ [convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds]
+ for imgId in p.imgIds
+ ]
+ ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]
+
+ if not p.useCats:
+ # For each image, flatten per-category lists into a single list
+ ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances]
+ detected_instances = [[[o for c in i for o in c]] for i in detected_instances]
+
+ # Call C++ implementation of self.evaluateImgs()
+ self._evalImgs_cpp = _C.COCOevalEvaluateImages(
+ p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances
+ )
+ self._evalImgs = None
+
+ self._paramsEval = copy.deepcopy(self.params)
+ toc = time.time()
+ logger.info("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic))
+ # >>>> End of code differences with original COCO API
+
+ def accumulate(self):
+ """
+ Accumulate per image evaluation results and store the result in self.eval. Does not
+ support changing parameter settings from those used by self.evaluate()
+ """
+ logger.info("Accumulating evaluation results...")
+ tic = time.time()
+ assert hasattr(
+ self, "_evalImgs_cpp"
+ ), "evaluate() must be called before accmulate() is called."
+
+ self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)
+
+ # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections
+ self.eval["recall"] = np.array(self.eval["recall"]).reshape(
+ self.eval["counts"][:1] + self.eval["counts"][2:]
+ )
+
+ # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X
+ # num_area_ranges X num_max_detections
+ self.eval["precision"] = np.array(self.eval["precision"]).reshape(self.eval["counts"])
+ self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"])
+ toc = time.time()
+ logger.info("COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic))
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/lvis_evaluation.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/lvis_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..0604feaaf42ffd072e3cb91f395204f818fa709a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/lvis_evaluation.py
@@ -0,0 +1,380 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import itertools
+import json
+import logging
+import os
+import pickle
+from collections import OrderedDict
+import torch
+
+import detectron2.utils.comm as comm
+from detectron2.config import CfgNode
+from detectron2.data import MetadataCatalog
+from detectron2.structures import Boxes, BoxMode, pairwise_iou
+from detectron2.utils.file_io import PathManager
+from detectron2.utils.logger import create_small_table
+
+from .coco_evaluation import instances_to_coco_json
+from .evaluator import DatasetEvaluator
+
+
+class LVISEvaluator(DatasetEvaluator):
+ """
+ Evaluate object proposal and instance detection/segmentation outputs using
+ LVIS's metrics and evaluation API.
+ """
+
+ def __init__(
+ self,
+ dataset_name,
+ tasks=None,
+ distributed=True,
+ output_dir=None,
+ *,
+ max_dets_per_image=None,
+ ):
+ """
+ Args:
+ dataset_name (str): name of the dataset to be evaluated.
+ It must have the following corresponding metadata:
+ "json_file": the path to the LVIS format annotation
+ tasks (tuple[str]): tasks that can be evaluated under the given
+ configuration. A task is one of "bbox", "segm".
+ By default, will infer this automatically from predictions.
+ distributed (True): if True, will collect results from all ranks for evaluation.
+ Otherwise, will evaluate the results in the current process.
+ output_dir (str): optional, an output directory to dump results.
+ max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP
+ This limit, by default of the LVIS dataset, is 300.
+ """
+ from lvis import LVIS
+
+ self._logger = logging.getLogger(__name__)
+
+ if tasks is not None and isinstance(tasks, CfgNode):
+ self._logger.warn(
+ "COCO Evaluator instantiated using config, this is deprecated behavior."
+ " Please pass in explicit arguments instead."
+ )
+ self._tasks = None # Infering it from predictions should be better
+ else:
+ self._tasks = tasks
+
+ self._distributed = distributed
+ self._output_dir = output_dir
+ self._max_dets_per_image = max_dets_per_image
+
+ self._cpu_device = torch.device("cpu")
+
+ self._metadata = MetadataCatalog.get(dataset_name)
+ json_file = PathManager.get_local_path(self._metadata.json_file)
+ self._lvis_api = LVIS(json_file)
+ # Test set json files do not contain annotations (evaluation must be
+ # performed using the LVIS evaluation server).
+ self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0
+
+ def reset(self):
+ self._predictions = []
+
+ def process(self, inputs, outputs):
+ """
+ Args:
+ inputs: the inputs to a LVIS model (e.g., GeneralizedRCNN).
+ It is a list of dict. Each dict corresponds to an image and
+ contains keys like "height", "width", "file_name", "image_id".
+ outputs: the outputs of a LVIS model. It is a list of dicts with key
+ "instances" that contains :class:`Instances`.
+ """
+ for input, output in zip(inputs, outputs):
+ prediction = {"image_id": input["image_id"]}
+
+ if "instances" in output:
+ instances = output["instances"].to(self._cpu_device)
+ prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
+ if "proposals" in output:
+ prediction["proposals"] = output["proposals"].to(self._cpu_device)
+ self._predictions.append(prediction)
+
+ def evaluate(self):
+ if self._distributed:
+ comm.synchronize()
+ predictions = comm.gather(self._predictions, dst=0)
+ predictions = list(itertools.chain(*predictions))
+
+ if not comm.is_main_process():
+ return
+ else:
+ predictions = self._predictions
+
+ if len(predictions) == 0:
+ self._logger.warning("[LVISEvaluator] Did not receive valid predictions.")
+ return {}
+
+ if self._output_dir:
+ PathManager.mkdirs(self._output_dir)
+ file_path = os.path.join(self._output_dir, "instances_predictions.pth")
+ with PathManager.open(file_path, "wb") as f:
+ torch.save(predictions, f)
+
+ self._results = OrderedDict()
+ if "proposals" in predictions[0]:
+ self._eval_box_proposals(predictions)
+ if "instances" in predictions[0]:
+ self._eval_predictions(predictions)
+ # Copy so the caller can do whatever with results
+ return copy.deepcopy(self._results)
+
+ def _tasks_from_predictions(self, predictions):
+ for pred in predictions:
+ if "segmentation" in pred:
+ return ("bbox", "segm")
+ return ("bbox",)
+
+ def _eval_predictions(self, predictions):
+ """
+ Evaluate predictions. Fill self._results with the metrics of the tasks.
+
+ Args:
+ predictions (list[dict]): list of outputs from the model
+ """
+ self._logger.info("Preparing results in the LVIS format ...")
+ lvis_results = list(itertools.chain(*[x["instances"] for x in predictions]))
+ tasks = self._tasks or self._tasks_from_predictions(lvis_results)
+
+ # LVIS evaluator can be used to evaluate results for COCO dataset categories.
+ # In this case `_metadata` variable will have a field with COCO-specific category mapping.
+ if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
+ reverse_id_mapping = {
+ v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
+ }
+ for result in lvis_results:
+ result["category_id"] = reverse_id_mapping[result["category_id"]]
+ else:
+ # unmap the category ids for LVIS (from 0-indexed to 1-indexed)
+ for result in lvis_results:
+ result["category_id"] += 1
+
+ if self._output_dir:
+ file_path = os.path.join(self._output_dir, "lvis_instances_results.json")
+ self._logger.info("Saving results to {}".format(file_path))
+ with PathManager.open(file_path, "w") as f:
+ f.write(json.dumps(lvis_results))
+ f.flush()
+
+ if not self._do_evaluation:
+ self._logger.info("Annotations are not available for evaluation.")
+ return
+
+ self._logger.info("Evaluating predictions ...")
+ for task in sorted(tasks):
+ res = _evaluate_predictions_on_lvis(
+ self._lvis_api,
+ lvis_results,
+ task,
+ max_dets_per_image=self._max_dets_per_image,
+ class_names=self._metadata.get("thing_classes"),
+ )
+ self._results[task] = res
+
+ def _eval_box_proposals(self, predictions):
+ """
+ Evaluate the box proposals in predictions.
+ Fill self._results with the metrics for "box_proposals" task.
+ """
+ if self._output_dir:
+ # Saving generated box proposals to file.
+ # Predicted box_proposals are in XYXY_ABS mode.
+ bbox_mode = BoxMode.XYXY_ABS.value
+ ids, boxes, objectness_logits = [], [], []
+ for prediction in predictions:
+ ids.append(prediction["image_id"])
+ boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
+ objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
+
+ proposal_data = {
+ "boxes": boxes,
+ "objectness_logits": objectness_logits,
+ "ids": ids,
+ "bbox_mode": bbox_mode,
+ }
+ with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
+ pickle.dump(proposal_data, f)
+
+ if not self._do_evaluation:
+ self._logger.info("Annotations are not available for evaluation.")
+ return
+
+ self._logger.info("Evaluating bbox proposals ...")
+ res = {}
+ areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
+ for limit in [100, 1000]:
+ for area, suffix in areas.items():
+ stats = _evaluate_box_proposals(predictions, self._lvis_api, area=area, limit=limit)
+ key = "AR{}@{:d}".format(suffix, limit)
+ res[key] = float(stats["ar"].item() * 100)
+ self._logger.info("Proposal metrics: \n" + create_small_table(res))
+ self._results["box_proposals"] = res
+
+
+# inspired from Detectron:
+# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
+def _evaluate_box_proposals(dataset_predictions, lvis_api, thresholds=None, area="all", limit=None):
+ """
+ Evaluate detection proposal recall metrics. This function is a much
+ faster alternative to the official LVIS API recall evaluation code. However,
+ it produces slightly different results.
+ """
+ # Record max overlap value for each gt box
+ # Return vector of overlap values
+ areas = {
+ "all": 0,
+ "small": 1,
+ "medium": 2,
+ "large": 3,
+ "96-128": 4,
+ "128-256": 5,
+ "256-512": 6,
+ "512-inf": 7,
+ }
+ area_ranges = [
+ [0 ** 2, 1e5 ** 2], # all
+ [0 ** 2, 32 ** 2], # small
+ [32 ** 2, 96 ** 2], # medium
+ [96 ** 2, 1e5 ** 2], # large
+ [96 ** 2, 128 ** 2], # 96-128
+ [128 ** 2, 256 ** 2], # 128-256
+ [256 ** 2, 512 ** 2], # 256-512
+ [512 ** 2, 1e5 ** 2],
+ ] # 512-inf
+ assert area in areas, "Unknown area range: {}".format(area)
+ area_range = area_ranges[areas[area]]
+ gt_overlaps = []
+ num_pos = 0
+
+ for prediction_dict in dataset_predictions:
+ predictions = prediction_dict["proposals"]
+
+ # sort predictions in descending order
+ # TODO maybe remove this and make it explicit in the documentation
+ inds = predictions.objectness_logits.sort(descending=True)[1]
+ predictions = predictions[inds]
+
+ ann_ids = lvis_api.get_ann_ids(img_ids=[prediction_dict["image_id"]])
+ anno = lvis_api.load_anns(ann_ids)
+ gt_boxes = [
+ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno
+ ]
+ gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
+ gt_boxes = Boxes(gt_boxes)
+ gt_areas = torch.as_tensor([obj["area"] for obj in anno])
+
+ if len(gt_boxes) == 0 or len(predictions) == 0:
+ continue
+
+ valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
+ gt_boxes = gt_boxes[valid_gt_inds]
+
+ num_pos += len(gt_boxes)
+
+ if len(gt_boxes) == 0:
+ continue
+
+ if limit is not None and len(predictions) > limit:
+ predictions = predictions[:limit]
+
+ overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
+
+ _gt_overlaps = torch.zeros(len(gt_boxes))
+ for j in range(min(len(predictions), len(gt_boxes))):
+ # find which proposal box maximally covers each gt box
+ # and get the iou amount of coverage for each gt box
+ max_overlaps, argmax_overlaps = overlaps.max(dim=0)
+
+ # find which gt box is 'best' covered (i.e. 'best' = most iou)
+ gt_ovr, gt_ind = max_overlaps.max(dim=0)
+ assert gt_ovr >= 0
+ # find the proposal box that covers the best covered gt box
+ box_ind = argmax_overlaps[gt_ind]
+ # record the iou coverage of this gt box
+ _gt_overlaps[j] = overlaps[box_ind, gt_ind]
+ assert _gt_overlaps[j] == gt_ovr
+ # mark the proposal box and the gt box as used
+ overlaps[box_ind, :] = -1
+ overlaps[:, gt_ind] = -1
+
+ # append recorded iou coverage level
+ gt_overlaps.append(_gt_overlaps)
+ gt_overlaps = (
+ torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
+ )
+ gt_overlaps, _ = torch.sort(gt_overlaps)
+
+ if thresholds is None:
+ step = 0.05
+ thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
+ recalls = torch.zeros_like(thresholds)
+ # compute recall for each iou threshold
+ for i, t in enumerate(thresholds):
+ recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
+ # ar = 2 * np.trapz(recalls, thresholds)
+ ar = recalls.mean()
+ return {
+ "ar": ar,
+ "recalls": recalls,
+ "thresholds": thresholds,
+ "gt_overlaps": gt_overlaps,
+ "num_pos": num_pos,
+ }
+
+
+def _evaluate_predictions_on_lvis(
+ lvis_gt, lvis_results, iou_type, max_dets_per_image=None, class_names=None
+):
+ """
+ Args:
+ iou_type (str):
+ max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP
+ This limit, by default of the LVIS dataset, is 300.
+ class_names (None or list[str]): if provided, will use it to predict
+ per-category AP.
+
+ Returns:
+ a dict of {metric name: score}
+ """
+ metrics = {
+ "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
+ "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
+ }[iou_type]
+
+ logger = logging.getLogger(__name__)
+
+ if len(lvis_results) == 0: # TODO: check if needed
+ logger.warn("No predictions from the model!")
+ return {metric: float("nan") for metric in metrics}
+
+ if iou_type == "segm":
+ lvis_results = copy.deepcopy(lvis_results)
+ # When evaluating mask AP, if the results contain bbox, LVIS API will
+ # use the box area as the area of the instance, instead of the mask area.
+ # This leads to a different definition of small/medium/large.
+ # We remove the bbox field to let mask AP use mask area.
+ for c in lvis_results:
+ c.pop("bbox", None)
+
+ if max_dets_per_image is None:
+ max_dets_per_image = 300 # Default for LVIS dataset
+
+ from lvis import LVISEval, LVISResults
+
+ logger.info(f"Evaluating with max detections per image = {max_dets_per_image}")
+ lvis_results = LVISResults(lvis_gt, lvis_results, max_dets=max_dets_per_image)
+ lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type)
+ lvis_eval.run()
+ lvis_eval.print_results()
+
+ # Pull the standard metrics from the LVIS results
+ results = lvis_eval.get_results()
+ results = {metric: float(results[metric] * 100) for metric in metrics}
+ logger.info("Evaluation results for {}: \n".format(iou_type) + create_small_table(results))
+ return results
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/panoptic_evaluation.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/panoptic_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fb3462b7f9abf6feaa499976bfed526ebd17e31
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/panoptic_evaluation.py
@@ -0,0 +1,199 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import contextlib
+import io
+import itertools
+import json
+import logging
+import numpy as np
+import os
+import tempfile
+from collections import OrderedDict
+from typing import Optional
+from PIL import Image
+from tabulate import tabulate
+
+from detectron2.data import MetadataCatalog
+from detectron2.utils import comm
+from detectron2.utils.file_io import PathManager
+
+from .evaluator import DatasetEvaluator
+
+logger = logging.getLogger(__name__)
+
+
+class COCOPanopticEvaluator(DatasetEvaluator):
+ """
+ Evaluate Panoptic Quality metrics on COCO using PanopticAPI.
+ It saves panoptic segmentation prediction in `output_dir`
+
+ It contains a synchronize call and has to be called from all workers.
+ """
+
+ def __init__(self, dataset_name: str, output_dir: Optional[str] = None):
+ """
+ Args:
+ dataset_name: name of the dataset
+ output_dir: output directory to save results for evaluation.
+ """
+ self._metadata = MetadataCatalog.get(dataset_name)
+ self._thing_contiguous_id_to_dataset_id = {
+ v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
+ }
+ self._stuff_contiguous_id_to_dataset_id = {
+ v: k for k, v in self._metadata.stuff_dataset_id_to_contiguous_id.items()
+ }
+
+ self._output_dir = output_dir
+ if self._output_dir is not None:
+ PathManager.mkdirs(self._output_dir)
+
+ def reset(self):
+ self._predictions = []
+
+ def _convert_category_id(self, segment_info):
+ isthing = segment_info.pop("isthing", None)
+ if isthing is None:
+ # the model produces panoptic category id directly. No more conversion needed
+ return segment_info
+ if isthing is True:
+ segment_info["category_id"] = self._thing_contiguous_id_to_dataset_id[
+ segment_info["category_id"]
+ ]
+ else:
+ segment_info["category_id"] = self._stuff_contiguous_id_to_dataset_id[
+ segment_info["category_id"]
+ ]
+ return segment_info
+
+ def process(self, inputs, outputs):
+ from panopticapi.utils import id2rgb
+
+ for input, output in zip(inputs, outputs):
+ panoptic_img, segments_info = output["panoptic_seg"]
+ panoptic_img = panoptic_img.cpu().numpy()
+ if segments_info is None:
+ # If "segments_info" is None, we assume "panoptic_img" is a
+ # H*W int32 image storing the panoptic_id in the format of
+ # category_id * label_divisor + instance_id. We reserve -1 for
+ # VOID label, and add 1 to panoptic_img since the official
+ # evaluation script uses 0 for VOID label.
+ label_divisor = self._metadata.label_divisor
+ segments_info = []
+ for panoptic_label in np.unique(panoptic_img):
+ if panoptic_label == -1:
+ # VOID region.
+ continue
+ pred_class = panoptic_label // label_divisor
+ isthing = (
+ pred_class in self._metadata.thing_dataset_id_to_contiguous_id.values()
+ )
+ segments_info.append(
+ {
+ "id": int(panoptic_label) + 1,
+ "category_id": int(pred_class),
+ "isthing": bool(isthing),
+ }
+ )
+ # Official evaluation script uses 0 for VOID label.
+ panoptic_img += 1
+
+ file_name = os.path.basename(input["file_name"])
+ file_name_png = os.path.splitext(file_name)[0] + ".png"
+ with io.BytesIO() as out:
+ Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG")
+ segments_info = [self._convert_category_id(x) for x in segments_info]
+ self._predictions.append(
+ {
+ "image_id": input["image_id"],
+ "file_name": file_name_png,
+ "png_string": out.getvalue(),
+ "segments_info": segments_info,
+ }
+ )
+
+ def evaluate(self):
+ comm.synchronize()
+
+ self._predictions = comm.gather(self._predictions)
+ self._predictions = list(itertools.chain(*self._predictions))
+ if not comm.is_main_process():
+ return
+
+ # PanopticApi requires local files
+ gt_json = PathManager.get_local_path(self._metadata.panoptic_json)
+ gt_folder = PathManager.get_local_path(self._metadata.panoptic_root)
+
+ with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir:
+ logger.info("Writing all panoptic predictions to {} ...".format(pred_dir))
+ for p in self._predictions:
+ with open(os.path.join(pred_dir, p["file_name"]), "wb") as f:
+ f.write(p.pop("png_string"))
+
+ with open(gt_json, "r") as f:
+ json_data = json.load(f)
+ json_data["annotations"] = self._predictions
+
+ output_dir = self._output_dir or pred_dir
+ predictions_json = os.path.join(output_dir, "predictions.json")
+ with PathManager.open(predictions_json, "w") as f:
+ f.write(json.dumps(json_data))
+
+ from panopticapi.evaluation import pq_compute
+
+ with contextlib.redirect_stdout(io.StringIO()):
+ pq_res = pq_compute(
+ gt_json,
+ PathManager.get_local_path(predictions_json),
+ gt_folder=gt_folder,
+ pred_folder=pred_dir,
+ )
+
+ res = {}
+ res["PQ"] = 100 * pq_res["All"]["pq"]
+ res["SQ"] = 100 * pq_res["All"]["sq"]
+ res["RQ"] = 100 * pq_res["All"]["rq"]
+ res["PQ_th"] = 100 * pq_res["Things"]["pq"]
+ res["SQ_th"] = 100 * pq_res["Things"]["sq"]
+ res["RQ_th"] = 100 * pq_res["Things"]["rq"]
+ res["PQ_st"] = 100 * pq_res["Stuff"]["pq"]
+ res["SQ_st"] = 100 * pq_res["Stuff"]["sq"]
+ res["RQ_st"] = 100 * pq_res["Stuff"]["rq"]
+
+ results = OrderedDict({"panoptic_seg": res})
+ _print_panoptic_results(pq_res)
+
+ return results
+
+
+def _print_panoptic_results(pq_res):
+ headers = ["", "PQ", "SQ", "RQ", "#categories"]
+ data = []
+ for name in ["All", "Things", "Stuff"]:
+ row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]]
+ data.append(row)
+ table = tabulate(
+ data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center"
+ )
+ logger.info("Panoptic Evaluation Results:\n" + table)
+
+
+if __name__ == "__main__":
+ from detectron2.utils.logger import setup_logger
+
+ logger = setup_logger()
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--gt-json")
+ parser.add_argument("--gt-dir")
+ parser.add_argument("--pred-json")
+ parser.add_argument("--pred-dir")
+ args = parser.parse_args()
+
+ from panopticapi.evaluation import pq_compute
+
+ with contextlib.redirect_stdout(io.StringIO()):
+ pq_res = pq_compute(
+ args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir
+ )
+ _print_panoptic_results(pq_res)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/pascal_voc_evaluation.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/pascal_voc_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d1abcde2f87bb5f103e73cb364aaabbecb6e619
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/pascal_voc_evaluation.py
@@ -0,0 +1,300 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import logging
+import numpy as np
+import os
+import tempfile
+import xml.etree.ElementTree as ET
+from collections import OrderedDict, defaultdict
+from functools import lru_cache
+import torch
+
+from detectron2.data import MetadataCatalog
+from detectron2.utils import comm
+from detectron2.utils.file_io import PathManager
+
+from .evaluator import DatasetEvaluator
+
+
+class PascalVOCDetectionEvaluator(DatasetEvaluator):
+ """
+ Evaluate Pascal VOC style AP for Pascal VOC dataset.
+ It contains a synchronization, therefore has to be called from all ranks.
+
+ Note that the concept of AP can be implemented in different ways and may not
+ produce identical results. This class mimics the implementation of the official
+ Pascal VOC Matlab API, and should produce similar but not identical results to the
+ official API.
+ """
+
+ def __init__(self, dataset_name):
+ """
+ Args:
+ dataset_name (str): name of the dataset, e.g., "voc_2007_test"
+ """
+ self._dataset_name = dataset_name
+ meta = MetadataCatalog.get(dataset_name)
+
+ # Too many tiny files, download all to local for speed.
+ annotation_dir_local = PathManager.get_local_path(
+ os.path.join(meta.dirname, "Annotations/")
+ )
+ self._anno_file_template = os.path.join(annotation_dir_local, "{}.xml")
+ self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt")
+ self._class_names = meta.thing_classes
+ assert meta.year in [2007, 2012], meta.year
+ self._is_2007 = meta.year == 2007
+ self._cpu_device = torch.device("cpu")
+ self._logger = logging.getLogger(__name__)
+
+ def reset(self):
+ self._predictions = defaultdict(list) # class name -> list of prediction strings
+
+ def process(self, inputs, outputs):
+ for input, output in zip(inputs, outputs):
+ image_id = input["image_id"]
+ instances = output["instances"].to(self._cpu_device)
+ boxes = instances.pred_boxes.tensor.numpy()
+ scores = instances.scores.tolist()
+ classes = instances.pred_classes.tolist()
+ for box, score, cls in zip(boxes, scores, classes):
+ xmin, ymin, xmax, ymax = box
+ # The inverse of data loading logic in `datasets/pascal_voc.py`
+ xmin += 1
+ ymin += 1
+ self._predictions[cls].append(
+ f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}"
+ )
+
+ def evaluate(self):
+ """
+ Returns:
+ dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75".
+ """
+ all_predictions = comm.gather(self._predictions, dst=0)
+ if not comm.is_main_process():
+ return
+ predictions = defaultdict(list)
+ for predictions_per_rank in all_predictions:
+ for clsid, lines in predictions_per_rank.items():
+ predictions[clsid].extend(lines)
+ del all_predictions
+
+ self._logger.info(
+ "Evaluating {} using {} metric. "
+ "Note that results do not use the official Matlab API.".format(
+ self._dataset_name, 2007 if self._is_2007 else 2012
+ )
+ )
+
+ with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname:
+ res_file_template = os.path.join(dirname, "{}.txt")
+
+ aps = defaultdict(list) # iou -> ap per class
+ for cls_id, cls_name in enumerate(self._class_names):
+ lines = predictions.get(cls_id, [""])
+
+ with open(res_file_template.format(cls_name), "w") as f:
+ f.write("\n".join(lines))
+
+ for thresh in range(50, 100, 5):
+ rec, prec, ap = voc_eval(
+ res_file_template,
+ self._anno_file_template,
+ self._image_set_path,
+ cls_name,
+ ovthresh=thresh / 100.0,
+ use_07_metric=self._is_2007,
+ )
+ aps[thresh].append(ap * 100)
+
+ ret = OrderedDict()
+ mAP = {iou: np.mean(x) for iou, x in aps.items()}
+ ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]}
+ return ret
+
+
+##############################################################################
+#
+# Below code is modified from
+# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
+# --------------------------------------------------------
+# Fast/er R-CNN
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Bharath Hariharan
+# --------------------------------------------------------
+
+"""Python implementation of the PASCAL VOC devkit's AP evaluation code."""
+
+
+@lru_cache(maxsize=None)
+def parse_rec(filename):
+ """Parse a PASCAL VOC xml file."""
+ with PathManager.open(filename) as f:
+ tree = ET.parse(f)
+ objects = []
+ for obj in tree.findall("object"):
+ obj_struct = {}
+ obj_struct["name"] = obj.find("name").text
+ obj_struct["pose"] = obj.find("pose").text
+ obj_struct["truncated"] = int(obj.find("truncated").text)
+ obj_struct["difficult"] = int(obj.find("difficult").text)
+ bbox = obj.find("bndbox")
+ obj_struct["bbox"] = [
+ int(bbox.find("xmin").text),
+ int(bbox.find("ymin").text),
+ int(bbox.find("xmax").text),
+ int(bbox.find("ymax").text),
+ ]
+ objects.append(obj_struct)
+
+ return objects
+
+
+def voc_ap(rec, prec, use_07_metric=False):
+ """Compute VOC AP given precision and recall. If use_07_metric is true, uses
+ the VOC 07 11-point method (default:False).
+ """
+ if use_07_metric:
+ # 11 point metric
+ ap = 0.0
+ for t in np.arange(0.0, 1.1, 0.1):
+ if np.sum(rec >= t) == 0:
+ p = 0
+ else:
+ p = np.max(prec[rec >= t])
+ ap = ap + p / 11.0
+ else:
+ # correct AP calculation
+ # first append sentinel values at the end
+ mrec = np.concatenate(([0.0], rec, [1.0]))
+ mpre = np.concatenate(([0.0], prec, [0.0]))
+
+ # compute the precision envelope
+ for i in range(mpre.size - 1, 0, -1):
+ mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
+
+ # to calculate area under PR curve, look for points
+ # where X axis (recall) changes value
+ i = np.where(mrec[1:] != mrec[:-1])[0]
+
+ # and sum (\Delta recall) * prec
+ ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
+ return ap
+
+
+def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False):
+ """rec, prec, ap = voc_eval(detpath,
+ annopath,
+ imagesetfile,
+ classname,
+ [ovthresh],
+ [use_07_metric])
+
+ Top level function that does the PASCAL VOC evaluation.
+
+ detpath: Path to detections
+ detpath.format(classname) should produce the detection results file.
+ annopath: Path to annotations
+ annopath.format(imagename) should be the xml annotations file.
+ imagesetfile: Text file containing the list of images, one image per line.
+ classname: Category name (duh)
+ [ovthresh]: Overlap threshold (default = 0.5)
+ [use_07_metric]: Whether to use VOC07's 11 point AP computation
+ (default False)
+ """
+ # assumes detections are in detpath.format(classname)
+ # assumes annotations are in annopath.format(imagename)
+ # assumes imagesetfile is a text file with each line an image name
+
+ # first load gt
+ # read list of images
+ with PathManager.open(imagesetfile, "r") as f:
+ lines = f.readlines()
+ imagenames = [x.strip() for x in lines]
+
+ # load annots
+ recs = {}
+ for imagename in imagenames:
+ recs[imagename] = parse_rec(annopath.format(imagename))
+
+ # extract gt objects for this class
+ class_recs = {}
+ npos = 0
+ for imagename in imagenames:
+ R = [obj for obj in recs[imagename] if obj["name"] == classname]
+ bbox = np.array([x["bbox"] for x in R])
+ difficult = np.array([x["difficult"] for x in R]).astype(np.bool)
+ # difficult = np.array([False for x in R]).astype(np.bool) # treat all "difficult" as GT
+ det = [False] * len(R)
+ npos = npos + sum(~difficult)
+ class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
+
+ # read dets
+ detfile = detpath.format(classname)
+ with open(detfile, "r") as f:
+ lines = f.readlines()
+
+ splitlines = [x.strip().split(" ") for x in lines]
+ image_ids = [x[0] for x in splitlines]
+ confidence = np.array([float(x[1]) for x in splitlines])
+ BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)
+
+ # sort by confidence
+ sorted_ind = np.argsort(-confidence)
+ BB = BB[sorted_ind, :]
+ image_ids = [image_ids[x] for x in sorted_ind]
+
+ # go down dets and mark TPs and FPs
+ nd = len(image_ids)
+ tp = np.zeros(nd)
+ fp = np.zeros(nd)
+ for d in range(nd):
+ R = class_recs[image_ids[d]]
+ bb = BB[d, :].astype(float)
+ ovmax = -np.inf
+ BBGT = R["bbox"].astype(float)
+
+ if BBGT.size > 0:
+ # compute overlaps
+ # intersection
+ ixmin = np.maximum(BBGT[:, 0], bb[0])
+ iymin = np.maximum(BBGT[:, 1], bb[1])
+ ixmax = np.minimum(BBGT[:, 2], bb[2])
+ iymax = np.minimum(BBGT[:, 3], bb[3])
+ iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
+ ih = np.maximum(iymax - iymin + 1.0, 0.0)
+ inters = iw * ih
+
+ # union
+ uni = (
+ (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
+ - inters
+ )
+
+ overlaps = inters / uni
+ ovmax = np.max(overlaps)
+ jmax = np.argmax(overlaps)
+
+ if ovmax > ovthresh:
+ if not R["difficult"][jmax]:
+ if not R["det"][jmax]:
+ tp[d] = 1.0
+ R["det"][jmax] = 1
+ else:
+ fp[d] = 1.0
+ else:
+ fp[d] = 1.0
+
+ # compute precision recall
+ fp = np.cumsum(fp)
+ tp = np.cumsum(tp)
+ rec = tp / float(npos)
+ # avoid divide by zero in case the first detection matches a difficult
+ # ground truth
+ prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
+ ap = voc_ap(rec, prec, use_07_metric)
+
+ return rec, prec, ap
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/rotated_coco_evaluation.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/rotated_coco_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea6d1b381dcf106339a03f08577df673ad439c46
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/rotated_coco_evaluation.py
@@ -0,0 +1,207 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import itertools
+import json
+import numpy as np
+import os
+import torch
+from pycocotools.cocoeval import COCOeval, maskUtils
+
+from detectron2.structures import BoxMode, RotatedBoxes, pairwise_iou_rotated
+from detectron2.utils.file_io import PathManager
+
+from .coco_evaluation import COCOEvaluator
+
+
+class RotatedCOCOeval(COCOeval):
+ @staticmethod
+ def is_rotated(box_list):
+ if type(box_list) == np.ndarray:
+ return box_list.shape[1] == 5
+ elif type(box_list) == list:
+ if box_list == []: # cannot decide the box_dim
+ return False
+ return np.all(
+ np.array(
+ [
+ (len(obj) == 5) and ((type(obj) == list) or (type(obj) == np.ndarray))
+ for obj in box_list
+ ]
+ )
+ )
+ return False
+
+ @staticmethod
+ def boxlist_to_tensor(boxlist, output_box_dim):
+ if type(boxlist) == np.ndarray:
+ box_tensor = torch.from_numpy(boxlist)
+ elif type(boxlist) == list:
+ if boxlist == []:
+ return torch.zeros((0, output_box_dim), dtype=torch.float32)
+ else:
+ box_tensor = torch.FloatTensor(boxlist)
+ else:
+ raise Exception("Unrecognized boxlist type")
+
+ input_box_dim = box_tensor.shape[1]
+ if input_box_dim != output_box_dim:
+ if input_box_dim == 4 and output_box_dim == 5:
+ box_tensor = BoxMode.convert(box_tensor, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS)
+ else:
+ raise Exception(
+ "Unable to convert from {}-dim box to {}-dim box".format(
+ input_box_dim, output_box_dim
+ )
+ )
+ return box_tensor
+
+ def compute_iou_dt_gt(self, dt, gt, is_crowd):
+ if self.is_rotated(dt) or self.is_rotated(gt):
+ # TODO: take is_crowd into consideration
+ assert all(c == 0 for c in is_crowd)
+ dt = RotatedBoxes(self.boxlist_to_tensor(dt, output_box_dim=5))
+ gt = RotatedBoxes(self.boxlist_to_tensor(gt, output_box_dim=5))
+ return pairwise_iou_rotated(dt, gt)
+ else:
+ # This is the same as the classical COCO evaluation
+ return maskUtils.iou(dt, gt, is_crowd)
+
+ def computeIoU(self, imgId, catId):
+ p = self.params
+ if p.useCats:
+ gt = self._gts[imgId, catId]
+ dt = self._dts[imgId, catId]
+ else:
+ gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
+ dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
+ if len(gt) == 0 and len(dt) == 0:
+ return []
+ inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
+ dt = [dt[i] for i in inds]
+ if len(dt) > p.maxDets[-1]:
+ dt = dt[0 : p.maxDets[-1]]
+
+ assert p.iouType == "bbox", "unsupported iouType for iou computation"
+
+ g = [g["bbox"] for g in gt]
+ d = [d["bbox"] for d in dt]
+
+ # compute iou between each dt and gt region
+ iscrowd = [int(o["iscrowd"]) for o in gt]
+
+ # Note: this function is copied from cocoeval.py in cocoapi
+ # and the major difference is here.
+ ious = self.compute_iou_dt_gt(d, g, iscrowd)
+ return ious
+
+
+class RotatedCOCOEvaluator(COCOEvaluator):
+ """
+ Evaluate object proposal/instance detection outputs using COCO-like metrics and APIs,
+ with rotated boxes support.
+ Note: this uses IOU only and does not consider angle differences.
+ """
+
+ def process(self, inputs, outputs):
+ """
+ Args:
+ inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
+ It is a list of dict. Each dict corresponds to an image and
+ contains keys like "height", "width", "file_name", "image_id".
+ outputs: the outputs of a COCO model. It is a list of dicts with key
+ "instances" that contains :class:`Instances`.
+ """
+ for input, output in zip(inputs, outputs):
+ prediction = {"image_id": input["image_id"]}
+
+ if "instances" in output:
+ instances = output["instances"].to(self._cpu_device)
+
+ prediction["instances"] = self.instances_to_json(instances, input["image_id"])
+ if "proposals" in output:
+ prediction["proposals"] = output["proposals"].to(self._cpu_device)
+ self._predictions.append(prediction)
+
+ def instances_to_json(self, instances, img_id):
+ num_instance = len(instances)
+ if num_instance == 0:
+ return []
+
+ boxes = instances.pred_boxes.tensor.numpy()
+ if boxes.shape[1] == 4:
+ boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
+ boxes = boxes.tolist()
+ scores = instances.scores.tolist()
+ classes = instances.pred_classes.tolist()
+
+ results = []
+ for k in range(num_instance):
+ result = {
+ "image_id": img_id,
+ "category_id": classes[k],
+ "bbox": boxes[k],
+ "score": scores[k],
+ }
+
+ results.append(result)
+ return results
+
+ def _eval_predictions(self, predictions, img_ids=None): # img_ids: unused
+ """
+ Evaluate predictions on the given tasks.
+ Fill self._results with the metrics of the tasks.
+ """
+ self._logger.info("Preparing results for COCO format ...")
+ coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
+
+ # unmap the category ids for COCO
+ if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
+ reverse_id_mapping = {
+ v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
+ }
+ for result in coco_results:
+ result["category_id"] = reverse_id_mapping[result["category_id"]]
+
+ if self._output_dir:
+ file_path = os.path.join(self._output_dir, "coco_instances_results.json")
+ self._logger.info("Saving results to {}".format(file_path))
+ with PathManager.open(file_path, "w") as f:
+ f.write(json.dumps(coco_results))
+ f.flush()
+
+ if not self._do_evaluation:
+ self._logger.info("Annotations are not available for evaluation.")
+ return
+
+ self._logger.info("Evaluating predictions ...")
+
+ assert self._tasks is None or set(self._tasks) == {
+ "bbox"
+ }, "[RotatedCOCOEvaluator] Only bbox evaluation is supported"
+ coco_eval = (
+ self._evaluate_predictions_on_coco(self._coco_api, coco_results)
+ if len(coco_results) > 0
+ else None # cocoapi does not handle empty results very well
+ )
+
+ task = "bbox"
+ res = self._derive_coco_results(
+ coco_eval, task, class_names=self._metadata.get("thing_classes")
+ )
+ self._results[task] = res
+
+ def _evaluate_predictions_on_coco(self, coco_gt, coco_results):
+ """
+ Evaluate the coco results using COCOEval API.
+ """
+ assert len(coco_results) > 0
+
+ coco_dt = coco_gt.loadRes(coco_results)
+
+ # Only bbox is supported for now
+ coco_eval = RotatedCOCOeval(coco_gt, coco_dt, iouType="bbox")
+
+ coco_eval.evaluate()
+ coco_eval.accumulate()
+ coco_eval.summarize()
+
+ return coco_eval
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/sem_seg_evaluation.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/sem_seg_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a19db71562ef47569dc7f77ec616af85447f0ec
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/sem_seg_evaluation.py
@@ -0,0 +1,184 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import itertools
+import json
+import logging
+import numpy as np
+import os
+from collections import OrderedDict
+import PIL.Image as Image
+import pycocotools.mask as mask_util
+import torch
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from detectron2.utils.comm import all_gather, is_main_process, synchronize
+from detectron2.utils.file_io import PathManager
+
+from .evaluator import DatasetEvaluator
+
+
+class SemSegEvaluator(DatasetEvaluator):
+ """
+ Evaluate semantic segmentation metrics.
+ """
+
+ def __init__(
+ self,
+ dataset_name,
+ distributed=True,
+ output_dir=None,
+ *,
+ num_classes=None,
+ ignore_label=None,
+ ):
+ """
+ Args:
+ dataset_name (str): name of the dataset to be evaluated.
+ distributed (bool): if True, will collect results from all ranks for evaluation.
+ Otherwise, will evaluate the results in the current process.
+ output_dir (str): an output directory to dump results.
+ num_classes, ignore_label: deprecated argument
+ """
+ self._logger = logging.getLogger(__name__)
+ if num_classes is not None:
+ self._logger.warn(
+ "SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata."
+ )
+ if ignore_label is not None:
+ self._logger.warn(
+ "SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata."
+ )
+ self._dataset_name = dataset_name
+ self._distributed = distributed
+ self._output_dir = output_dir
+
+ self._cpu_device = torch.device("cpu")
+
+ self.input_file_to_gt_file = {
+ dataset_record["file_name"]: dataset_record["sem_seg_file_name"]
+ for dataset_record in DatasetCatalog.get(dataset_name)
+ }
+
+ meta = MetadataCatalog.get(dataset_name)
+ # Dict that maps contiguous training ids to COCO category ids
+ try:
+ c2d = meta.stuff_dataset_id_to_contiguous_id
+ self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}
+ except AttributeError:
+ self._contiguous_id_to_dataset_id = None
+ self._class_names = meta.stuff_classes
+ self._num_classes = len(meta.stuff_classes)
+ if num_classes is not None:
+ assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}"
+ self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label
+
+ def reset(self):
+ self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64)
+ self._predictions = []
+
+ def process(self, inputs, outputs):
+ """
+ Args:
+ inputs: the inputs to a model.
+ It is a list of dicts. Each dict corresponds to an image and
+ contains keys like "height", "width", "file_name".
+ outputs: the outputs of a model. It is either list of semantic segmentation predictions
+ (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
+ segmentation prediction in the same format.
+ """
+ for input, output in zip(inputs, outputs):
+ output = output["sem_seg"].argmax(dim=0).to(self._cpu_device)
+ pred = np.array(output, dtype=np.int)
+ with PathManager.open(self.input_file_to_gt_file[input["file_name"]], "rb") as f:
+ gt = np.array(Image.open(f), dtype=np.int)
+
+ gt[gt == self._ignore_label] = self._num_classes
+
+ self._conf_matrix += np.bincount(
+ (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),
+ minlength=self._conf_matrix.size,
+ ).reshape(self._conf_matrix.shape)
+
+ self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"]))
+
+ def evaluate(self):
+ """
+ Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):
+
+ * Mean intersection-over-union averaged across classes (mIoU)
+ * Frequency Weighted IoU (fwIoU)
+ * Mean pixel accuracy averaged across classes (mACC)
+ * Pixel Accuracy (pACC)
+ """
+ if self._distributed:
+ synchronize()
+ conf_matrix_list = all_gather(self._conf_matrix)
+ self._predictions = all_gather(self._predictions)
+ self._predictions = list(itertools.chain(*self._predictions))
+ if not is_main_process():
+ return
+
+ self._conf_matrix = np.zeros_like(self._conf_matrix)
+ for conf_matrix in conf_matrix_list:
+ self._conf_matrix += conf_matrix
+
+ if self._output_dir:
+ PathManager.mkdirs(self._output_dir)
+ file_path = os.path.join(self._output_dir, "sem_seg_predictions.json")
+ with PathManager.open(file_path, "w") as f:
+ f.write(json.dumps(self._predictions))
+
+ acc = np.full(self._num_classes, np.nan, dtype=np.float)
+ iou = np.full(self._num_classes, np.nan, dtype=np.float)
+ tp = self._conf_matrix.diagonal()[:-1].astype(np.float)
+ pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float)
+ class_weights = pos_gt / np.sum(pos_gt)
+ pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float)
+ acc_valid = pos_gt > 0
+ acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
+ iou_valid = (pos_gt + pos_pred) > 0
+ union = pos_gt + pos_pred - tp
+ iou[acc_valid] = tp[acc_valid] / union[acc_valid]
+ macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)
+ miou = np.sum(iou[acc_valid]) / np.sum(iou_valid)
+ fiou = np.sum(iou[acc_valid] * class_weights[acc_valid])
+ pacc = np.sum(tp) / np.sum(pos_gt)
+
+ res = {}
+ res["mIoU"] = 100 * miou
+ res["fwIoU"] = 100 * fiou
+ for i, name in enumerate(self._class_names):
+ res["IoU-{}".format(name)] = 100 * iou[i]
+ res["mACC"] = 100 * macc
+ res["pACC"] = 100 * pacc
+ for i, name in enumerate(self._class_names):
+ res["ACC-{}".format(name)] = 100 * acc[i]
+
+ if self._output_dir:
+ file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth")
+ with PathManager.open(file_path, "wb") as f:
+ torch.save(res, f)
+ results = OrderedDict({"sem_seg": res})
+ self._logger.info(results)
+ return results
+
+ def encode_json_sem_seg(self, sem_seg, input_file_name):
+ """
+ Convert semantic segmentation to COCO stuff format with segments encoded as RLEs.
+ See http://cocodataset.org/#format-results
+ """
+ json_list = []
+ for label in np.unique(sem_seg):
+ if self._contiguous_id_to_dataset_id is not None:
+ assert (
+ label in self._contiguous_id_to_dataset_id
+ ), "Label {} is not in the metadata info for {}".format(label, self._dataset_name)
+ dataset_id = self._contiguous_id_to_dataset_id[label]
+ else:
+ dataset_id = int(label)
+ mask = (sem_seg == label).astype(np.uint8)
+ mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F"))[0]
+ mask_rle["counts"] = mask_rle["counts"].decode("utf-8")
+ json_list.append(
+ {"file_name": input_file_name, "category_id": dataset_id, "segmentation": mask_rle}
+ )
+ return json_list
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/testing.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e5ae625bb0593fc20739dd3ea549157e4df4f3d
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/testing.py
@@ -0,0 +1,85 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+import numpy as np
+import pprint
+import sys
+from collections.abc import Mapping
+
+
+def print_csv_format(results):
+ """
+ Print main metrics in a format similar to Detectron,
+ so that they are easy to copypaste into a spreadsheet.
+
+ Args:
+ results (OrderedDict[dict]): task_name -> {metric -> score}
+ unordered dict can also be printed, but in arbitrary order
+ """
+ assert isinstance(results, Mapping) or not len(results), results
+ logger = logging.getLogger(__name__)
+ for task, res in results.items():
+ if isinstance(res, Mapping):
+ # Don't print "AP-category" metrics since they are usually not tracked.
+ important_res = [(k, v) for k, v in res.items() if "-" not in k]
+ logger.info("copypaste: Task: {}".format(task))
+ logger.info("copypaste: " + ",".join([k[0] for k in important_res]))
+ logger.info("copypaste: " + ",".join(["{0:.4f}".format(k[1]) for k in important_res]))
+ else:
+ logger.info(f"copypaste: {task}={res}")
+
+
+def verify_results(cfg, results):
+ """
+ Args:
+ results (OrderedDict[dict]): task_name -> {metric -> score}
+
+ Returns:
+ bool: whether the verification succeeds or not
+ """
+ expected_results = cfg.TEST.EXPECTED_RESULTS
+ if not len(expected_results):
+ return True
+
+ ok = True
+ for task, metric, expected, tolerance in expected_results:
+ actual = results[task].get(metric, None)
+ if actual is None:
+ ok = False
+ continue
+ if not np.isfinite(actual):
+ ok = False
+ continue
+ diff = abs(actual - expected)
+ if diff > tolerance:
+ ok = False
+
+ logger = logging.getLogger(__name__)
+ if not ok:
+ logger.error("Result verification failed!")
+ logger.error("Expected Results: " + str(expected_results))
+ logger.error("Actual Results: " + pprint.pformat(results))
+
+ sys.exit(1)
+ else:
+ logger.info("Results verification passed.")
+ return ok
+
+
+def flatten_results_dict(results):
+ """
+ Expand a hierarchical dict of scalars into a flat dict of scalars.
+ If results[k1][k2][k3] = v, the returned dict will have the entry
+ {"k1/k2/k3": v}.
+
+ Args:
+ results (dict):
+ """
+ r = {}
+ for k, v in results.items():
+ if isinstance(v, Mapping):
+ v = flatten_results_dict(v)
+ for kk, vv in v.items():
+ r[k + "/" + kk] = vv
+ else:
+ r[k] = v
+ return r
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/export/README.md b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fcd33513fb81ef3aeb4d3c8d9732324dffa2646
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/README.md
@@ -0,0 +1,13 @@
+
+This directory contains code to prepare a detectron2 model for deployment.
+Currently it supports exporting a detectron2 model to Caffe2 format through ONNX.
+
+Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage.
+
+
+### Acknowledgements
+
+Thanks to Mobile Vision team at Facebook for developing the Caffe2 conversion tools.
+
+Thanks to Computing Platform Department - PAI team at Alibaba Group (@bddpqq, @chenbohua3) who
+help export Detectron2 models to TorchScript.
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/export/__init__.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..25e5c94618a71cc584756ca2e17d6233a072dd87
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/__init__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+try:
+ from caffe2.proto import caffe2_pb2 as _tmp
+
+ # caffe2 is optional
+except ImportError:
+ pass
+else:
+ from .api import *
+
+from .flatten import TracingAdapter
+from .torchscript import scripting_with_instances, dump_torchscript_IR
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/export/api.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad4272183f2a533dbb68f6e65cf42144f4b69fc4
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/api.py
@@ -0,0 +1,235 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import logging
+import os
+import torch
+from caffe2.proto import caffe2_pb2
+from torch import nn
+
+from detectron2.config import CfgNode
+from detectron2.utils.file_io import PathManager
+
+from .caffe2_inference import ProtobufDetectionModel
+from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
+from .shared import get_pb_arg_vali, get_pb_arg_vals, save_graph
+
+__all__ = [
+ "add_export_config",
+ "Caffe2Model",
+ "Caffe2Tracer",
+]
+
+
+def add_export_config(cfg):
+ return cfg
+
+
+class Caffe2Tracer:
+ """
+ Make a detectron2 model traceable with Caffe2 operators.
+ This class creates a traceable version of a detectron2 model which:
+
+ 1. Rewrite parts of the model using ops in Caffe2. Note that some ops do
+ not have GPU implementation in Caffe2.
+ 2. Remove post-processing and only produce raw layer outputs
+
+ After making a traceable model, the class provide methods to export such a
+ model to different deployment formats.
+ Exported graph produced by this class take two input tensors:
+
+ 1. (1, C, H, W) float "data" which is an image (usually in [0, 255]).
+ (H, W) often has to be padded to multiple of 32 (depend on the model
+ architecture).
+ 2. 1x3 float "im_info", each row of which is (height, width, 1.0).
+ Height and width are true image shapes before padding.
+
+ The class currently only supports models using builtin meta architectures.
+ Batch inference is not supported, and contributions are welcome.
+ """
+
+ def __init__(self, cfg: CfgNode, model: nn.Module, inputs):
+ """
+ Args:
+ cfg (CfgNode): a detectron2 config used to construct caffe2-compatible model.
+ model (nn.Module): An original pytorch model. Must be among a few official models
+ in detectron2 that can be converted to become caffe2-compatible automatically.
+ Weights have to be already loaded to this model.
+ inputs: sample inputs that the given model takes for inference.
+ Will be used to trace the model. For most models, random inputs with
+ no detected objects will not work as they lead to wrong traces.
+ """
+ assert isinstance(cfg, CfgNode), cfg
+ assert isinstance(model, torch.nn.Module), type(model)
+
+ # TODO make it support custom models, by passing in c2 model directly
+ C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE]
+ self.traceable_model = C2MetaArch(cfg, copy.deepcopy(model))
+ self.inputs = inputs
+ self.traceable_inputs = self.traceable_model.get_caffe2_inputs(inputs)
+
+ def export_caffe2(self):
+ """
+ Export the model to Caffe2's protobuf format.
+ The returned object can be saved with its :meth:`.save_protobuf()` method.
+ The result can be loaded and executed using Caffe2 runtime.
+
+ Returns:
+ :class:`Caffe2Model`
+ """
+ from .caffe2_export import export_caffe2_detection_model
+
+ predict_net, init_net = export_caffe2_detection_model(
+ self.traceable_model, self.traceable_inputs
+ )
+ return Caffe2Model(predict_net, init_net)
+
+ def export_onnx(self):
+ """
+ Export the model to ONNX format.
+ Note that the exported model contains custom ops only available in caffe2, therefore it
+ cannot be directly executed by other runtime (such as onnxruntime or TensorRT).
+ Post-processing or transformation passes may be applied on the model to accommodate
+ different runtimes, but we currently do not provide support for them.
+
+ Returns:
+ onnx.ModelProto: an onnx model.
+ """
+ from .caffe2_export import export_onnx_model as export_onnx_model_impl
+
+ return export_onnx_model_impl(self.traceable_model, (self.traceable_inputs,))
+
+ def export_torchscript(self):
+ """
+ Export the model to a ``torch.jit.TracedModule`` by tracing.
+ The returned object can be saved to a file by ``.save()``.
+
+ Returns:
+ torch.jit.TracedModule: a torch TracedModule
+ """
+ logger = logging.getLogger(__name__)
+ logger.info("Tracing the model with torch.jit.trace ...")
+ with torch.no_grad():
+ return torch.jit.trace(self.traceable_model, (self.traceable_inputs,))
+
+
+class Caffe2Model(nn.Module):
+ """
+ A wrapper around the traced model in Caffe2's protobuf format.
+ The exported graph has different inputs/outputs from the original Pytorch
+ model, as explained in :class:`Caffe2Tracer`. This class wraps around the
+ exported graph to simulate the same interface as the original Pytorch model.
+ It also provides functions to save/load models in Caffe2's format.'
+
+ Examples:
+ ::
+ c2_model = Caffe2Tracer(cfg, torch_model, inputs).export_caffe2()
+ inputs = [{"image": img_tensor_CHW}]
+ outputs = c2_model(inputs)
+ orig_outputs = torch_model(inputs)
+ """
+
+ def __init__(self, predict_net, init_net):
+ super().__init__()
+ self.eval() # always in eval mode
+ self._predict_net = predict_net
+ self._init_net = init_net
+ self._predictor = None
+
+ __init__.__HIDE_SPHINX_DOC__ = True
+
+ @property
+ def predict_net(self):
+ """
+ caffe2.core.Net: the underlying caffe2 predict net
+ """
+ return self._predict_net
+
+ @property
+ def init_net(self):
+ """
+ caffe2.core.Net: the underlying caffe2 init net
+ """
+ return self._init_net
+
+ def save_protobuf(self, output_dir):
+ """
+ Save the model as caffe2's protobuf format.
+ It saves the following files:
+
+ * "model.pb": definition of the graph. Can be visualized with
+ tools like `netron `_.
+ * "model_init.pb": model parameters
+ * "model.pbtxt": human-readable definition of the graph. Not
+ needed for deployment.
+
+ Args:
+ output_dir (str): the output directory to save protobuf files.
+ """
+ logger = logging.getLogger(__name__)
+ logger.info("Saving model to {} ...".format(output_dir))
+ if not PathManager.exists(output_dir):
+ PathManager.mkdirs(output_dir)
+
+ with PathManager.open(os.path.join(output_dir, "model.pb"), "wb") as f:
+ f.write(self._predict_net.SerializeToString())
+ with PathManager.open(os.path.join(output_dir, "model.pbtxt"), "w") as f:
+ f.write(str(self._predict_net))
+ with PathManager.open(os.path.join(output_dir, "model_init.pb"), "wb") as f:
+ f.write(self._init_net.SerializeToString())
+
+ def save_graph(self, output_file, inputs=None):
+ """
+ Save the graph as SVG format.
+
+ Args:
+ output_file (str): a SVG file
+ inputs: optional inputs given to the model.
+ If given, the inputs will be used to run the graph to record
+ shape of every tensor. The shape information will be
+ saved together with the graph.
+ """
+ from .caffe2_export import run_and_save_graph
+
+ if inputs is None:
+ save_graph(self._predict_net, output_file, op_only=False)
+ else:
+ size_divisibility = get_pb_arg_vali(self._predict_net, "size_divisibility", 0)
+ device = get_pb_arg_vals(self._predict_net, "device", b"cpu").decode("ascii")
+ inputs = convert_batched_inputs_to_c2_format(inputs, size_divisibility, device)
+ inputs = [x.cpu().numpy() for x in inputs]
+ run_and_save_graph(self._predict_net, self._init_net, inputs, output_file)
+
+ @staticmethod
+ def load_protobuf(dir):
+ """
+ Args:
+ dir (str): a directory used to save Caffe2Model with
+ :meth:`save_protobuf`.
+ The files "model.pb" and "model_init.pb" are needed.
+
+ Returns:
+ Caffe2Model: the caffe2 model loaded from this directory.
+ """
+ predict_net = caffe2_pb2.NetDef()
+ with PathManager.open(os.path.join(dir, "model.pb"), "rb") as f:
+ predict_net.ParseFromString(f.read())
+
+ init_net = caffe2_pb2.NetDef()
+ with PathManager.open(os.path.join(dir, "model_init.pb"), "rb") as f:
+ init_net.ParseFromString(f.read())
+
+ return Caffe2Model(predict_net, init_net)
+
+ def __call__(self, inputs):
+ """
+ An interface that wraps around a Caffe2 model and mimics detectron2's models'
+ input/output format. See details about the format at :doc:`/tutorials/models`.
+ This is used to compare the outputs of caffe2 model with its original torch model.
+
+ Due to the extra conversion between Pytorch/Caffe2, this method is not meant for
+ benchmark. Because of the conversion, this method also has dependency
+ on detectron2 in order to convert to detectron2's output format.
+ """
+ if self._predictor is None:
+ self._predictor = ProtobufDetectionModel(self._predict_net, self._init_net)
+ return self._predictor(inputs)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/export/c10.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/c10.py
new file mode 100644
index 0000000000000000000000000000000000000000..25ee23009547913733dc528fb8a39ca995fd9e31
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/c10.py
@@ -0,0 +1,534 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import math
+import torch
+import torch.nn.functional as F
+
+from detectron2.layers import cat
+from detectron2.layers.roi_align_rotated import ROIAlignRotated
+from detectron2.modeling import poolers
+from detectron2.modeling.proposal_generator import rpn
+from detectron2.modeling.roi_heads.mask_head import mask_rcnn_inference
+from detectron2.structures import Boxes, ImageList, Instances, Keypoints
+
+from .shared import alias, to_device
+
+
+"""
+This file contains caffe2-compatible implementation of several detectron2 components.
+"""
+
+
+class Caffe2Boxes(Boxes):
+ """
+ Representing a list of detectron2.structures.Boxes from minibatch, each box
+ is represented by a 5d vector (batch index + 4 coordinates), or a 6d vector
+ (batch index + 5 coordinates) for RotatedBoxes.
+ """
+
+ def __init__(self, tensor):
+ assert isinstance(tensor, torch.Tensor)
+ assert tensor.dim() == 2 and tensor.size(-1) in [4, 5, 6], tensor.size()
+ # TODO: make tensor immutable when dim is Nx5 for Boxes,
+ # and Nx6 for RotatedBoxes?
+ self.tensor = tensor
+
+
+# TODO clean up this class, maybe just extend Instances
+class InstancesList(object):
+ """
+ Tensor representation of a list of Instances object for a batch of images.
+
+ When dealing with a batch of images with Caffe2 ops, a list of bboxes
+ (instances) are usually represented by single Tensor with size
+ (sigma(Ni), 5) or (sigma(Ni), 4) plus a batch split Tensor. This class is
+ for providing common functions to convert between these two representations.
+ """
+
+ def __init__(self, im_info, indices, extra_fields=None):
+ # [N, 3] -> (H, W, Scale)
+ self.im_info = im_info
+ # [N,] -> indice of batch to which the instance belongs
+ self.indices = indices
+ # [N, ...]
+ self.batch_extra_fields = extra_fields or {}
+
+ self.image_size = self.im_info
+
+ def get_fields(self):
+ """like `get_fields` in the Instances object,
+ but return each field in tensor representations"""
+ ret = {}
+ for k, v in self.batch_extra_fields.items():
+ # if isinstance(v, torch.Tensor):
+ # tensor_rep = v
+ # elif isinstance(v, (Boxes, Keypoints)):
+ # tensor_rep = v.tensor
+ # else:
+ # raise ValueError("Can't find tensor representation for: {}".format())
+ ret[k] = v
+ return ret
+
+ def has(self, name):
+ return name in self.batch_extra_fields
+
+ def set(self, name, value):
+ data_len = len(value)
+ if len(self.batch_extra_fields):
+ assert (
+ len(self) == data_len
+ ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self))
+ self.batch_extra_fields[name] = value
+
+ def __setattr__(self, name, val):
+ if name in ["im_info", "indices", "batch_extra_fields", "image_size"]:
+ super().__setattr__(name, val)
+ else:
+ self.set(name, val)
+
+ def __getattr__(self, name):
+ if name not in self.batch_extra_fields:
+ raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
+ return self.batch_extra_fields[name]
+
+ def __len__(self):
+ return len(self.indices)
+
+ def flatten(self):
+ ret = []
+ for _, v in self.batch_extra_fields.items():
+ if isinstance(v, (Boxes, Keypoints)):
+ ret.append(v.tensor)
+ else:
+ ret.append(v)
+ return ret
+
+ @staticmethod
+ def to_d2_instances_list(instances_list):
+ """
+ Convert InstancesList to List[Instances]. The input `instances_list` can
+ also be a List[Instances], in this case this method is a non-op.
+ """
+ if not isinstance(instances_list, InstancesList):
+ assert all(isinstance(x, Instances) for x in instances_list)
+ return instances_list
+
+ ret = []
+ for i, info in enumerate(instances_list.im_info):
+ instances = Instances(torch.Size([int(info[0].item()), int(info[1].item())]))
+
+ ids = instances_list.indices == i
+ for k, v in instances_list.batch_extra_fields.items():
+ if isinstance(v, torch.Tensor):
+ instances.set(k, v[ids])
+ continue
+ elif isinstance(v, Boxes):
+ instances.set(k, v[ids, -4:])
+ continue
+
+ target_type, tensor_source = v
+ assert isinstance(tensor_source, torch.Tensor)
+ assert tensor_source.shape[0] == instances_list.indices.shape[0]
+ tensor_source = tensor_source[ids]
+
+ if issubclass(target_type, Boxes):
+ instances.set(k, Boxes(tensor_source[:, -4:]))
+ elif issubclass(target_type, Keypoints):
+ instances.set(k, Keypoints(tensor_source))
+ elif issubclass(target_type, torch.Tensor):
+ instances.set(k, tensor_source)
+ else:
+ raise ValueError("Can't handle targe type: {}".format(target_type))
+
+ ret.append(instances)
+ return ret
+
+
+class Caffe2Compatible(object):
+ """
+ A model can inherit this class to indicate that it can be traced and deployed with caffe2.
+ """
+
+ def _get_tensor_mode(self):
+ return self._tensor_mode
+
+ def _set_tensor_mode(self, v):
+ self._tensor_mode = v
+
+ tensor_mode = property(_get_tensor_mode, _set_tensor_mode)
+ """
+ If true, the model expects C2-style tensor only inputs/outputs format.
+ """
+
+
+class Caffe2RPN(Caffe2Compatible, rpn.RPN):
+ def _generate_proposals(
+ self, images, objectness_logits_pred, anchor_deltas_pred, gt_instances=None
+ ):
+ assert isinstance(images, ImageList)
+ if self.tensor_mode:
+ im_info = images.image_sizes
+ else:
+ im_info = torch.tensor([[im_sz[0], im_sz[1], 1.0] for im_sz in images.image_sizes]).to(
+ images.tensor.device
+ )
+ assert isinstance(im_info, torch.Tensor)
+
+ rpn_rois_list = []
+ rpn_roi_probs_list = []
+ for scores, bbox_deltas, cell_anchors_tensor, feat_stride in zip(
+ objectness_logits_pred,
+ anchor_deltas_pred,
+ iter(self.anchor_generator.cell_anchors),
+ self.anchor_generator.strides,
+ ):
+ scores = scores.detach()
+ bbox_deltas = bbox_deltas.detach()
+
+ rpn_rois, rpn_roi_probs = torch.ops._caffe2.GenerateProposals(
+ scores,
+ bbox_deltas,
+ im_info,
+ cell_anchors_tensor,
+ spatial_scale=1.0 / feat_stride,
+ pre_nms_topN=self.pre_nms_topk[self.training],
+ post_nms_topN=self.post_nms_topk[self.training],
+ nms_thresh=self.nms_thresh,
+ min_size=self.min_box_size,
+ # correct_transform_coords=True, # deprecated argument
+ angle_bound_on=True, # Default
+ angle_bound_lo=-180,
+ angle_bound_hi=180,
+ clip_angle_thresh=1.0, # Default
+ legacy_plus_one=False,
+ )
+ rpn_rois_list.append(rpn_rois)
+ rpn_roi_probs_list.append(rpn_roi_probs)
+
+ # For FPN in D2, in RPN all proposals from different levels are concated
+ # together, ranked and picked by top post_nms_topk. Then in ROIPooler
+ # it calculates level_assignments and calls the RoIAlign from
+ # the corresponding level.
+
+ if len(objectness_logits_pred) == 1:
+ rpn_rois = rpn_rois_list[0]
+ rpn_roi_probs = rpn_roi_probs_list[0]
+ else:
+ assert len(rpn_rois_list) == len(rpn_roi_probs_list)
+ rpn_post_nms_topN = self.post_nms_topk[self.training]
+
+ device = rpn_rois_list[0].device
+ input_list = [to_device(x, "cpu") for x in (rpn_rois_list + rpn_roi_probs_list)]
+
+ # TODO remove this after confirming rpn_max_level/rpn_min_level
+ # is not needed in CollectRpnProposals.
+ feature_strides = list(self.anchor_generator.strides)
+ rpn_min_level = int(math.log2(feature_strides[0]))
+ rpn_max_level = int(math.log2(feature_strides[-1]))
+ assert (rpn_max_level - rpn_min_level + 1) == len(
+ rpn_rois_list
+ ), "CollectRpnProposals requires continuous levels"
+
+ rpn_rois = torch.ops._caffe2.CollectRpnProposals(
+ input_list,
+ # NOTE: in current implementation, rpn_max_level and rpn_min_level
+ # are not needed, only the subtraction of two matters and it
+ # can be infer from the number of inputs. Keep them now for
+ # consistency.
+ rpn_max_level=2 + len(rpn_rois_list) - 1,
+ rpn_min_level=2,
+ rpn_post_nms_topN=rpn_post_nms_topN,
+ )
+ rpn_rois = to_device(rpn_rois, device)
+ rpn_roi_probs = []
+
+ proposals = self.c2_postprocess(im_info, rpn_rois, rpn_roi_probs, self.tensor_mode)
+ return proposals, {}
+
+ def forward(self, images, features, gt_instances=None):
+ assert not self.training
+ features = [features[f] for f in self.in_features]
+ objectness_logits_pred, anchor_deltas_pred = self.rpn_head(features)
+ return self._generate_proposals(
+ images,
+ objectness_logits_pred,
+ anchor_deltas_pred,
+ gt_instances,
+ )
+
+ @staticmethod
+ def c2_postprocess(im_info, rpn_rois, rpn_roi_probs, tensor_mode):
+ proposals = InstancesList(
+ im_info=im_info,
+ indices=rpn_rois[:, 0],
+ extra_fields={
+ "proposal_boxes": Caffe2Boxes(rpn_rois),
+ "objectness_logits": (torch.Tensor, rpn_roi_probs),
+ },
+ )
+ if not tensor_mode:
+ proposals = InstancesList.to_d2_instances_list(proposals)
+ else:
+ proposals = [proposals]
+ return proposals
+
+
+class Caffe2ROIPooler(Caffe2Compatible, poolers.ROIPooler):
+ @staticmethod
+ def c2_preprocess(box_lists):
+ assert all(isinstance(x, Boxes) for x in box_lists)
+ if all(isinstance(x, Caffe2Boxes) for x in box_lists):
+ # input is pure-tensor based
+ assert len(box_lists) == 1
+ pooler_fmt_boxes = box_lists[0].tensor
+ else:
+ pooler_fmt_boxes = poolers.convert_boxes_to_pooler_format(box_lists)
+ return pooler_fmt_boxes
+
+ def forward(self, x, box_lists):
+ assert not self.training
+
+ pooler_fmt_boxes = self.c2_preprocess(box_lists)
+ num_level_assignments = len(self.level_poolers)
+
+ if num_level_assignments == 1:
+ if isinstance(self.level_poolers[0], ROIAlignRotated):
+ c2_roi_align = torch.ops._caffe2.RoIAlignRotated
+ aligned = True
+ else:
+ c2_roi_align = torch.ops._caffe2.RoIAlign
+ aligned = self.level_poolers[0].aligned
+
+ x0 = x[0]
+ if x0.is_quantized:
+ x0 = x0.dequantize()
+
+ out = c2_roi_align(
+ x0,
+ pooler_fmt_boxes,
+ order="NCHW",
+ spatial_scale=float(self.level_poolers[0].spatial_scale),
+ pooled_h=int(self.output_size[0]),
+ pooled_w=int(self.output_size[1]),
+ sampling_ratio=int(self.level_poolers[0].sampling_ratio),
+ aligned=aligned,
+ )
+ return out
+
+ device = pooler_fmt_boxes.device
+ assert (
+ self.max_level - self.min_level + 1 == 4
+ ), "Currently DistributeFpnProposals only support 4 levels"
+ fpn_outputs = torch.ops._caffe2.DistributeFpnProposals(
+ to_device(pooler_fmt_boxes, "cpu"),
+ roi_canonical_scale=self.canonical_box_size,
+ roi_canonical_level=self.canonical_level,
+ roi_max_level=self.max_level,
+ roi_min_level=self.min_level,
+ legacy_plus_one=False,
+ )
+ fpn_outputs = [to_device(x, device) for x in fpn_outputs]
+
+ rois_fpn_list = fpn_outputs[:-1]
+ rois_idx_restore_int32 = fpn_outputs[-1]
+
+ roi_feat_fpn_list = []
+ for roi_fpn, x_level, pooler in zip(rois_fpn_list, x, self.level_poolers):
+ if isinstance(pooler, ROIAlignRotated):
+ c2_roi_align = torch.ops._caffe2.RoIAlignRotated
+ aligned = True
+ else:
+ c2_roi_align = torch.ops._caffe2.RoIAlign
+ aligned = bool(pooler.aligned)
+
+ if x_level.is_quantized:
+ x_level = x_level.dequantize()
+
+ roi_feat_fpn = c2_roi_align(
+ x_level,
+ roi_fpn,
+ order="NCHW",
+ spatial_scale=float(pooler.spatial_scale),
+ pooled_h=int(self.output_size[0]),
+ pooled_w=int(self.output_size[1]),
+ sampling_ratio=int(pooler.sampling_ratio),
+ aligned=aligned,
+ )
+ roi_feat_fpn_list.append(roi_feat_fpn)
+
+ roi_feat_shuffled = cat(roi_feat_fpn_list, dim=0)
+ assert roi_feat_shuffled.numel() > 0 and rois_idx_restore_int32.numel() > 0, (
+ "Caffe2 export requires tracing with a model checkpoint + input that can produce valid"
+ " detections. But no detections were obtained with the given checkpoint and input!"
+ )
+ roi_feat = torch.ops._caffe2.BatchPermutation(roi_feat_shuffled, rois_idx_restore_int32)
+ return roi_feat
+
+
+class Caffe2FastRCNNOutputsInference:
+ def __init__(self, tensor_mode):
+ self.tensor_mode = tensor_mode # whether the output is caffe2 tensor mode
+
+ def __call__(self, box_predictor, predictions, proposals):
+ """equivalent to FastRCNNOutputLayers.inference"""
+ num_classes = box_predictor.num_classes
+ score_thresh = box_predictor.test_score_thresh
+ nms_thresh = box_predictor.test_nms_thresh
+ topk_per_image = box_predictor.test_topk_per_image
+ is_rotated = len(box_predictor.box2box_transform.weights) == 5
+
+ if is_rotated:
+ box_dim = 5
+ assert box_predictor.box2box_transform.weights[4] == 1, (
+ "The weights for Rotated BBoxTransform in C2 have only 4 dimensions,"
+ + " thus enforcing the angle weight to be 1 for now"
+ )
+ box2box_transform_weights = box_predictor.box2box_transform.weights[:4]
+ else:
+ box_dim = 4
+ box2box_transform_weights = box_predictor.box2box_transform.weights
+
+ class_logits, box_regression = predictions
+ if num_classes + 1 == class_logits.shape[1]:
+ class_prob = F.softmax(class_logits, -1)
+ else:
+ assert num_classes == class_logits.shape[1]
+ class_prob = F.sigmoid(class_logits)
+ # BoxWithNMSLimit will infer num_classes from the shape of the class_prob
+ # So append a zero column as placeholder for the background class
+ class_prob = torch.cat((class_prob, torch.zeros(class_prob.shape[0], 1)), dim=1)
+
+ assert box_regression.shape[1] % box_dim == 0
+ cls_agnostic_bbox_reg = box_regression.shape[1] // box_dim == 1
+
+ input_tensor_mode = proposals[0].proposal_boxes.tensor.shape[1] == box_dim + 1
+
+ rois = type(proposals[0].proposal_boxes).cat([p.proposal_boxes for p in proposals])
+ device, dtype = rois.tensor.device, rois.tensor.dtype
+ if input_tensor_mode:
+ im_info = proposals[0].image_size
+ rois = rois.tensor
+ else:
+ im_info = torch.tensor(
+ [[sz[0], sz[1], 1.0] for sz in [x.image_size for x in proposals]]
+ )
+ batch_ids = cat(
+ [
+ torch.full((b, 1), i, dtype=dtype, device=device)
+ for i, b in enumerate(len(p) for p in proposals)
+ ],
+ dim=0,
+ )
+ rois = torch.cat([batch_ids, rois.tensor], dim=1)
+
+ roi_pred_bbox, roi_batch_splits = torch.ops._caffe2.BBoxTransform(
+ to_device(rois, "cpu"),
+ to_device(box_regression, "cpu"),
+ to_device(im_info, "cpu"),
+ weights=box2box_transform_weights,
+ apply_scale=True,
+ rotated=is_rotated,
+ angle_bound_on=True,
+ angle_bound_lo=-180,
+ angle_bound_hi=180,
+ clip_angle_thresh=1.0,
+ legacy_plus_one=False,
+ )
+ roi_pred_bbox = to_device(roi_pred_bbox, device)
+ roi_batch_splits = to_device(roi_batch_splits, device)
+
+ nms_outputs = torch.ops._caffe2.BoxWithNMSLimit(
+ to_device(class_prob, "cpu"),
+ to_device(roi_pred_bbox, "cpu"),
+ to_device(roi_batch_splits, "cpu"),
+ score_thresh=float(score_thresh),
+ nms=float(nms_thresh),
+ detections_per_im=int(topk_per_image),
+ soft_nms_enabled=False,
+ soft_nms_method="linear",
+ soft_nms_sigma=0.5,
+ soft_nms_min_score_thres=0.001,
+ rotated=is_rotated,
+ cls_agnostic_bbox_reg=cls_agnostic_bbox_reg,
+ input_boxes_include_bg_cls=False,
+ output_classes_include_bg_cls=False,
+ legacy_plus_one=False,
+ )
+ roi_score_nms = to_device(nms_outputs[0], device)
+ roi_bbox_nms = to_device(nms_outputs[1], device)
+ roi_class_nms = to_device(nms_outputs[2], device)
+ roi_batch_splits_nms = to_device(nms_outputs[3], device)
+ roi_keeps_nms = to_device(nms_outputs[4], device)
+ roi_keeps_size_nms = to_device(nms_outputs[5], device)
+ if not self.tensor_mode:
+ roi_class_nms = roi_class_nms.to(torch.int64)
+
+ roi_batch_ids = cat(
+ [
+ torch.full((b, 1), i, dtype=dtype, device=device)
+ for i, b in enumerate(int(x.item()) for x in roi_batch_splits_nms)
+ ],
+ dim=0,
+ )
+
+ roi_class_nms = alias(roi_class_nms, "class_nms")
+ roi_score_nms = alias(roi_score_nms, "score_nms")
+ roi_bbox_nms = alias(roi_bbox_nms, "bbox_nms")
+ roi_batch_splits_nms = alias(roi_batch_splits_nms, "batch_splits_nms")
+ roi_keeps_nms = alias(roi_keeps_nms, "keeps_nms")
+ roi_keeps_size_nms = alias(roi_keeps_size_nms, "keeps_size_nms")
+
+ results = InstancesList(
+ im_info=im_info,
+ indices=roi_batch_ids[:, 0],
+ extra_fields={
+ "pred_boxes": Caffe2Boxes(roi_bbox_nms),
+ "scores": roi_score_nms,
+ "pred_classes": roi_class_nms,
+ },
+ )
+
+ if not self.tensor_mode:
+ results = InstancesList.to_d2_instances_list(results)
+ batch_splits = roi_batch_splits_nms.int().tolist()
+ kept_indices = list(roi_keeps_nms.to(torch.int64).split(batch_splits))
+ else:
+ results = [results]
+ kept_indices = [roi_keeps_nms]
+
+ return results, kept_indices
+
+
+class Caffe2MaskRCNNInference:
+ def __call__(self, pred_mask_logits, pred_instances):
+ """equivalent to mask_head.mask_rcnn_inference"""
+ if all(isinstance(x, InstancesList) for x in pred_instances):
+ assert len(pred_instances) == 1
+ mask_probs_pred = pred_mask_logits.sigmoid()
+ mask_probs_pred = alias(mask_probs_pred, "mask_fcn_probs")
+ pred_instances[0].pred_masks = mask_probs_pred
+ else:
+ mask_rcnn_inference(pred_mask_logits, pred_instances)
+
+
+class Caffe2KeypointRCNNInference:
+ def __init__(self, use_heatmap_max_keypoint):
+ self.use_heatmap_max_keypoint = use_heatmap_max_keypoint
+
+ def __call__(self, pred_keypoint_logits, pred_instances):
+ # just return the keypoint heatmap for now,
+ # there will be option to call HeatmapMaxKeypointOp
+ output = alias(pred_keypoint_logits, "kps_score")
+ if all(isinstance(x, InstancesList) for x in pred_instances):
+ assert len(pred_instances) == 1
+ if self.use_heatmap_max_keypoint:
+ device = output.device
+ output = torch.ops._caffe2.HeatmapMaxKeypoint(
+ to_device(output, "cpu"),
+ pred_instances[0].pred_boxes.tensor,
+ should_output_softmax=True, # worth make it configerable?
+ )
+ output = to_device(output, device)
+ output = alias(output, "keypoints_out")
+ pred_instances[0].pred_keypoints = output
+ return pred_keypoint_logits
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/export/caffe2_export.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/caffe2_export.py
new file mode 100644
index 0000000000000000000000000000000000000000..74ac123a7aed6cd77d6d833446a831d9048745b2
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/caffe2_export.py
@@ -0,0 +1,207 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import copy
+import io
+import logging
+import numpy as np
+from typing import List
+import onnx
+import torch
+from caffe2.proto import caffe2_pb2
+from caffe2.python import core
+from caffe2.python.onnx.backend import Caffe2Backend
+from tabulate import tabulate
+from termcolor import colored
+from torch.onnx import OperatorExportTypes
+
+from .shared import (
+ ScopedWS,
+ construct_init_net_from_params,
+ fuse_alias_placeholder,
+ fuse_copy_between_cpu_and_gpu,
+ get_params_from_init_net,
+ group_norm_replace_aten_with_caffe2,
+ infer_device_type,
+ remove_dead_end_ops,
+ remove_reshape_for_fc,
+ save_graph,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def export_onnx_model(model, inputs):
+ """
+ Trace and export a model to onnx format.
+
+ Args:
+ model (nn.Module):
+ inputs (tuple[args]): the model will be called by `model(*inputs)`
+
+ Returns:
+ an onnx model
+ """
+ assert isinstance(model, torch.nn.Module)
+
+ # make sure all modules are in eval mode, onnx may change the training state
+ # of the module if the states are not consistent
+ def _check_eval(module):
+ assert not module.training
+
+ model.apply(_check_eval)
+
+ # Export the model to ONNX
+ with torch.no_grad():
+ with io.BytesIO() as f:
+ torch.onnx.export(
+ model,
+ inputs,
+ f,
+ operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
+ # verbose=True, # NOTE: uncomment this for debugging
+ # export_params=True,
+ )
+ onnx_model = onnx.load_from_string(f.getvalue())
+
+ # Apply ONNX's Optimization
+ all_passes = onnx.optimizer.get_available_passes()
+ passes = ["fuse_bn_into_conv"]
+ assert all(p in all_passes for p in passes)
+ onnx_model = onnx.optimizer.optimize(onnx_model, passes)
+ return onnx_model
+
+
+def _op_stats(net_def):
+ type_count = {}
+ for t in [op.type for op in net_def.op]:
+ type_count[t] = type_count.get(t, 0) + 1
+ type_count_list = sorted(type_count.items(), key=lambda kv: kv[0]) # alphabet
+ type_count_list = sorted(type_count_list, key=lambda kv: -kv[1]) # count
+ return "\n".join("{:>4}x {}".format(count, name) for name, count in type_count_list)
+
+
+def _assign_device_option(
+ predict_net: caffe2_pb2.NetDef, init_net: caffe2_pb2.NetDef, tensor_inputs: List[torch.Tensor]
+):
+ """
+ ONNX exported network doesn't have concept of device, assign necessary
+ device option for each op in order to make it runable on GPU runtime.
+ """
+
+ def _get_device_type(torch_tensor):
+ assert torch_tensor.device.type in ["cpu", "cuda"]
+ assert torch_tensor.device.index == 0
+ return torch_tensor.device.type
+
+ def _assign_op_device_option(net_proto, net_ssa, blob_device_types):
+ for op, ssa_i in zip(net_proto.op, net_ssa):
+ if op.type in ["CopyCPUToGPU", "CopyGPUToCPU"]:
+ op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
+ else:
+ devices = [blob_device_types[b] for b in ssa_i[0] + ssa_i[1]]
+ assert all(d == devices[0] for d in devices)
+ if devices[0] == "cuda":
+ op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
+
+ # update ops in predict_net
+ predict_net_input_device_types = {
+ (name, 0): _get_device_type(tensor)
+ for name, tensor in zip(predict_net.external_input, tensor_inputs)
+ }
+ predict_net_device_types = infer_device_type(
+ predict_net, known_status=predict_net_input_device_types, device_name_style="pytorch"
+ )
+ predict_net_ssa, _ = core.get_ssa(predict_net)
+ _assign_op_device_option(predict_net, predict_net_ssa, predict_net_device_types)
+
+ # update ops in init_net
+ init_net_ssa, versions = core.get_ssa(init_net)
+ init_net_output_device_types = {
+ (name, versions[name]): predict_net_device_types[(name, 0)]
+ for name in init_net.external_output
+ }
+ init_net_device_types = infer_device_type(
+ init_net, known_status=init_net_output_device_types, device_name_style="pytorch"
+ )
+ _assign_op_device_option(init_net, init_net_ssa, init_net_device_types)
+
+
+def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]):
+ """
+ Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX.
+
+ Arg:
+ model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py
+ tensor_inputs: a list of tensors that caffe2 model takes as input.
+ """
+ model = copy.deepcopy(model)
+ assert isinstance(model, torch.nn.Module)
+ assert hasattr(model, "encode_additional_info")
+
+ # Export via ONNX
+ logger.info(
+ "Exporting a {} model via ONNX ...".format(type(model).__name__)
+ + " Some warnings from ONNX are expected and are usually not to worry about."
+ )
+ onnx_model = export_onnx_model(model, (tensor_inputs,))
+ # Convert ONNX model to Caffe2 protobuf
+ init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
+ ops_table = [[op.type, op.input, op.output] for op in predict_net.op]
+ table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe")
+ logger.info(
+ "ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan")
+ )
+
+ # Apply protobuf optimization
+ fuse_alias_placeholder(predict_net, init_net)
+ if any(t.device.type != "cpu" for t in tensor_inputs):
+ fuse_copy_between_cpu_and_gpu(predict_net)
+ remove_dead_end_ops(init_net)
+ _assign_device_option(predict_net, init_net, tensor_inputs)
+ params, device_options = get_params_from_init_net(init_net)
+ predict_net, params = remove_reshape_for_fc(predict_net, params)
+ init_net = construct_init_net_from_params(params, device_options)
+ group_norm_replace_aten_with_caffe2(predict_net)
+
+ # Record necessary information for running the pb model in Detectron2 system.
+ model.encode_additional_info(predict_net, init_net)
+
+ logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net)))
+ logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net)))
+
+ return predict_net, init_net
+
+
+def run_and_save_graph(predict_net, init_net, tensor_inputs, graph_save_path):
+ """
+ Run the caffe2 model on given inputs, recording the shape and draw the graph.
+
+ predict_net/init_net: caffe2 model.
+ tensor_inputs: a list of tensors that caffe2 model takes as input.
+ graph_save_path: path for saving graph of exported model.
+ """
+
+ logger.info("Saving graph of ONNX exported model to {} ...".format(graph_save_path))
+ save_graph(predict_net, graph_save_path, op_only=False)
+
+ # Run the exported Caffe2 net
+ logger.info("Running ONNX exported model ...")
+ with ScopedWS("__ws_tmp__", True) as ws:
+ ws.RunNetOnce(init_net)
+ initialized_blobs = set(ws.Blobs())
+ uninitialized = [inp for inp in predict_net.external_input if inp not in initialized_blobs]
+ for name, blob in zip(uninitialized, tensor_inputs):
+ ws.FeedBlob(name, blob)
+
+ try:
+ ws.RunNetOnce(predict_net)
+ except RuntimeError as e:
+ logger.warning("Encountered RuntimeError: \n{}".format(str(e)))
+
+ ws_blobs = {b: ws.FetchBlob(b) for b in ws.Blobs()}
+ blob_sizes = {b: ws_blobs[b].shape for b in ws_blobs if isinstance(ws_blobs[b], np.ndarray)}
+
+ logger.info("Saving graph with blob shapes to {} ...".format(graph_save_path))
+ save_graph(predict_net, graph_save_path, op_only=False, blob_sizes=blob_sizes)
+
+ return ws_blobs
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/export/caffe2_inference.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/caffe2_inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..deb886c0417285ed1d5ad85eb941fa1ac757cdab
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/caffe2_inference.py
@@ -0,0 +1,161 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import logging
+import numpy as np
+from itertools import count
+import torch
+from caffe2.proto import caffe2_pb2
+from caffe2.python import core
+
+from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
+from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type
+
+logger = logging.getLogger(__name__)
+
+
+# ===== ref: mobile-vision predictor's 'Caffe2Wrapper' class ======
+class ProtobufModel(torch.nn.Module):
+ """
+ Wrapper of a caffe2's protobuf model.
+ It works just like nn.Module, but running caffe2 under the hood.
+ Input/Output are tuple[tensor] that match the caffe2 net's external_input/output.
+ """
+
+ _ids = count(0)
+
+ def __init__(self, predict_net, init_net):
+ logger.info(f"Initializing ProtobufModel for: {predict_net.name} ...")
+ super().__init__()
+ assert isinstance(predict_net, caffe2_pb2.NetDef)
+ assert isinstance(init_net, caffe2_pb2.NetDef)
+ # create unique temporary workspace for each instance
+ self.ws_name = "__tmp_ProtobufModel_{}__".format(next(self._ids))
+ self.net = core.Net(predict_net)
+
+ logger.info("Running init_net once to fill the parameters ...")
+ with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws:
+ ws.RunNetOnce(init_net)
+ uninitialized_external_input = []
+ for blob in self.net.Proto().external_input:
+ if blob not in ws.Blobs():
+ uninitialized_external_input.append(blob)
+ ws.CreateBlob(blob)
+ ws.CreateNet(self.net)
+
+ self._error_msgs = set()
+ self._input_blobs = uninitialized_external_input
+
+ def _infer_output_devices(self, inputs):
+ """
+ Returns:
+ list[str]: list of device for each external output
+ """
+
+ def _get_device_type(torch_tensor):
+ assert torch_tensor.device.type in ["cpu", "cuda"]
+ assert torch_tensor.device.index == 0
+ return torch_tensor.device.type
+
+ predict_net = self.net.Proto()
+ input_device_types = {
+ (name, 0): _get_device_type(tensor) for name, tensor in zip(self._input_blobs, inputs)
+ }
+ device_type_map = infer_device_type(
+ predict_net, known_status=input_device_types, device_name_style="pytorch"
+ )
+ ssa, versions = core.get_ssa(predict_net)
+ versioned_outputs = [(name, versions[name]) for name in predict_net.external_output]
+ output_devices = [device_type_map[outp] for outp in versioned_outputs]
+ return output_devices
+
+ def forward(self, inputs):
+ """
+ Args:
+ inputs (tuple[torch.Tensor])
+
+ Returns:
+ tuple[torch.Tensor]
+ """
+ assert len(inputs) == len(self._input_blobs), (
+ f"Length of inputs ({len(inputs)}) "
+ f"doesn't match the required input blobs: {self._input_blobs}"
+ )
+
+ with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws:
+ for b, tensor in zip(self._input_blobs, inputs):
+ ws.FeedBlob(b, tensor)
+
+ try:
+ ws.RunNet(self.net.Proto().name)
+ except RuntimeError as e:
+ if not str(e) in self._error_msgs:
+ self._error_msgs.add(str(e))
+ logger.warning("Encountered new RuntimeError: \n{}".format(str(e)))
+ logger.warning("Catch the error and use partial results.")
+
+ c2_outputs = [ws.FetchBlob(b) for b in self.net.Proto().external_output]
+ # Remove outputs of current run, this is necessary in order to
+ # prevent fetching the result from previous run if the model fails
+ # in the middle.
+ for b in self.net.Proto().external_output:
+ # Needs to create uninitialized blob to make the net runable.
+ # This is "equivalent" to: ws.RemoveBlob(b) then ws.CreateBlob(b),
+ # but there'no such API.
+ ws.FeedBlob(b, f"{b}, a C++ native class of type nullptr (uninitialized).")
+
+ # Cast output to torch.Tensor on the desired device
+ output_devices = (
+ self._infer_output_devices(inputs)
+ if any(t.device.type != "cpu" for t in inputs)
+ else ["cpu" for _ in self.net.Proto().external_output]
+ )
+
+ outputs = []
+ for name, c2_output, device in zip(
+ self.net.Proto().external_output, c2_outputs, output_devices
+ ):
+ if not isinstance(c2_output, np.ndarray):
+ raise RuntimeError(
+ "Invalid output for blob {}, received: {}".format(name, c2_output)
+ )
+ outputs.append(torch.tensor(c2_output).to(device=device))
+ return tuple(outputs)
+
+
+class ProtobufDetectionModel(torch.nn.Module):
+ """
+ A class works just like a pytorch meta arch in terms of inference, but running
+ caffe2 model under the hood.
+ """
+
+ def __init__(self, predict_net, init_net, *, convert_outputs=None):
+ """
+ Args:
+ predict_net, init_net (core.Net): caffe2 nets
+ convert_outptus (callable): a function that converts caffe2
+ outputs to the same format of the original pytorch model.
+ By default, use the one defined in the caffe2 meta_arch.
+ """
+ super().__init__()
+ self.protobuf_model = ProtobufModel(predict_net, init_net)
+ self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0)
+ self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii")
+
+ if convert_outputs is None:
+ meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN")
+ meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")]
+ self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net)
+ else:
+ self._convert_outputs = convert_outputs
+
+ def _convert_inputs(self, batched_inputs):
+ # currently all models convert inputs in the same way
+ return convert_batched_inputs_to_c2_format(
+ batched_inputs, self.size_divisibility, self.device
+ )
+
+ def forward(self, batched_inputs):
+ c2_inputs = self._convert_inputs(batched_inputs)
+ c2_results = self.protobuf_model(c2_inputs)
+ c2_results = dict(zip(self.protobuf_model.net.Proto().external_output, c2_results))
+ return self._convert_outputs(batched_inputs, c2_inputs, c2_results)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/export/caffe2_modeling.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/caffe2_modeling.py
new file mode 100644
index 0000000000000000000000000000000000000000..e00de4ad28fd81483c9e1161394b7b508fdad91f
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/caffe2_modeling.py
@@ -0,0 +1,419 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import functools
+import io
+import struct
+import types
+import torch
+
+from detectron2.modeling import meta_arch
+from detectron2.modeling.box_regression import Box2BoxTransform
+from detectron2.modeling.roi_heads import keypoint_head
+from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
+
+from .c10 import Caffe2Compatible
+from .caffe2_patch import ROIHeadsPatcher, patch_generalized_rcnn
+from .shared import (
+ alias,
+ check_set_pb_arg,
+ get_pb_arg_floats,
+ get_pb_arg_valf,
+ get_pb_arg_vali,
+ get_pb_arg_vals,
+ mock_torch_nn_functional_interpolate,
+)
+
+
+def assemble_rcnn_outputs_by_name(image_sizes, tensor_outputs, force_mask_on=False):
+ """
+ A function to assemble caffe2 model's outputs (i.e. Dict[str, Tensor])
+ to detectron2's format (i.e. list of Instances instance).
+ This only works when the model follows the Caffe2 detectron's naming convention.
+
+ Args:
+ image_sizes (List[List[int, int]]): [H, W] of every image.
+ tensor_outputs (Dict[str, Tensor]): external_output to its tensor.
+
+ force_mask_on (Bool): if true, the it make sure there'll be pred_masks even
+ if the mask is not found from tensor_outputs (usually due to model crash)
+ """
+
+ results = [Instances(image_size) for image_size in image_sizes]
+
+ batch_splits = tensor_outputs.get("batch_splits", None)
+ if batch_splits:
+ raise NotImplementedError()
+ assert len(image_sizes) == 1
+ result = results[0]
+
+ bbox_nms = tensor_outputs["bbox_nms"]
+ score_nms = tensor_outputs["score_nms"]
+ class_nms = tensor_outputs["class_nms"]
+ # Detection will always success because Conv support 0-batch
+ assert bbox_nms is not None
+ assert score_nms is not None
+ assert class_nms is not None
+ if bbox_nms.shape[1] == 5:
+ result.pred_boxes = RotatedBoxes(bbox_nms)
+ else:
+ result.pred_boxes = Boxes(bbox_nms)
+ result.scores = score_nms
+ result.pred_classes = class_nms.to(torch.int64)
+
+ mask_fcn_probs = tensor_outputs.get("mask_fcn_probs", None)
+ if mask_fcn_probs is not None:
+ # finish the mask pred
+ mask_probs_pred = mask_fcn_probs
+ num_masks = mask_probs_pred.shape[0]
+ class_pred = result.pred_classes
+ indices = torch.arange(num_masks, device=class_pred.device)
+ mask_probs_pred = mask_probs_pred[indices, class_pred][:, None]
+ result.pred_masks = mask_probs_pred
+ elif force_mask_on:
+ # NOTE: there's no way to know the height/width of mask here, it won't be
+ # used anyway when batch size is 0, so just set them to 0.
+ result.pred_masks = torch.zeros([0, 1, 0, 0], dtype=torch.uint8)
+
+ keypoints_out = tensor_outputs.get("keypoints_out", None)
+ kps_score = tensor_outputs.get("kps_score", None)
+ if keypoints_out is not None:
+ # keypoints_out: [N, 4, #kypoints], where 4 is in order of (x, y, score, prob)
+ keypoints_tensor = keypoints_out
+ # NOTE: it's possible that prob is not calculated if "should_output_softmax"
+ # is set to False in HeatmapMaxKeypoint, so just using raw score, seems
+ # it doesn't affect mAP. TODO: check more carefully.
+ keypoint_xyp = keypoints_tensor.transpose(1, 2)[:, :, [0, 1, 2]]
+ result.pred_keypoints = keypoint_xyp
+ elif kps_score is not None:
+ # keypoint heatmap to sparse data structure
+ pred_keypoint_logits = kps_score
+ keypoint_head.keypoint_rcnn_inference(pred_keypoint_logits, [result])
+
+ return results
+
+
+def _cast_to_f32(f64):
+ return struct.unpack("f", struct.pack("f", f64))[0]
+
+
+def set_caffe2_compatible_tensor_mode(model, enable=True):
+ def _fn(m):
+ if isinstance(m, Caffe2Compatible):
+ m.tensor_mode = enable
+
+ model.apply(_fn)
+
+
+def convert_batched_inputs_to_c2_format(batched_inputs, size_divisibility, device):
+ """
+ See get_caffe2_inputs() below.
+ """
+ assert all(isinstance(x, dict) for x in batched_inputs)
+ assert all(x["image"].dim() == 3 for x in batched_inputs)
+
+ images = [x["image"] for x in batched_inputs]
+ images = ImageList.from_tensors(images, size_divisibility)
+
+ im_info = []
+ for input_per_image, image_size in zip(batched_inputs, images.image_sizes):
+ target_height = input_per_image.get("height", image_size[0])
+ target_width = input_per_image.get("width", image_size[1]) # noqa
+ # NOTE: The scale inside im_info is kept as convention and for providing
+ # post-processing information if further processing is needed. For
+ # current Caffe2 model definitions that don't include post-processing inside
+ # the model, this number is not used.
+ # NOTE: There can be a slight difference between width and height
+ # scales, using a single number can results in numerical difference
+ # compared with D2's post-processing.
+ scale = target_height / image_size[0]
+ im_info.append([image_size[0], image_size[1], scale])
+ im_info = torch.Tensor(im_info)
+
+ return images.tensor.to(device), im_info.to(device)
+
+
+class Caffe2MetaArch(Caffe2Compatible, torch.nn.Module):
+ """
+ Base class for caffe2-compatible implementation of a meta architecture.
+ The forward is traceable and its traced graph can be converted to caffe2
+ graph through ONNX.
+ """
+
+ def __init__(self, cfg, torch_model):
+ """
+ Args:
+ cfg (CfgNode):
+ torch_model (nn.Module): the detectron2 model (meta_arch) to be
+ converted.
+ """
+ super().__init__()
+ self._wrapped_model = torch_model
+ self.eval()
+ set_caffe2_compatible_tensor_mode(self, True)
+
+ def get_caffe2_inputs(self, batched_inputs):
+ """
+ Convert pytorch-style structured inputs to caffe2-style inputs that
+ are tuples of tensors.
+
+ Args:
+ batched_inputs (list[dict]): inputs to a detectron2 model
+ in its standard format. Each dict has "image" (CHW tensor), and optionally
+ "height" and "width".
+
+ Returns:
+ tuple[Tensor]:
+ tuple of tensors that will be the inputs to the
+ :meth:`forward` method. For existing models, the first
+ is an NCHW tensor (padded and batched); the second is
+ a im_info Nx3 tensor, where the rows are
+ (height, width, unused legacy parameter)
+ """
+ return convert_batched_inputs_to_c2_format(
+ batched_inputs,
+ self._wrapped_model.backbone.size_divisibility,
+ self._wrapped_model.device,
+ )
+
+ def encode_additional_info(self, predict_net, init_net):
+ """
+ Save extra metadata that will be used by inference in the output protobuf.
+ """
+ pass
+
+ def forward(self, inputs):
+ """
+ Run the forward in caffe2-style. It has to use caffe2-compatible ops
+ and the method will be used for tracing.
+
+ Args:
+ inputs (tuple[Tensor]): inputs defined by :meth:`get_caffe2_input`.
+ They will be the inputs of the converted caffe2 graph.
+
+ Returns:
+ tuple[Tensor]: output tensors. They will be the outputs of the
+ converted caffe2 graph.
+ """
+ raise NotImplementedError
+
+ def _caffe2_preprocess_image(self, inputs):
+ """
+ Caffe2 implementation of preprocess_image, which is called inside each MetaArch's forward.
+ It normalizes the input images, and the final caffe2 graph assumes the
+ inputs have been batched already.
+ """
+ data, im_info = inputs
+ data = alias(data, "data")
+ im_info = alias(im_info, "im_info")
+ mean, std = self._wrapped_model.pixel_mean, self._wrapped_model.pixel_std
+ normalized_data = (data - mean) / std
+ normalized_data = alias(normalized_data, "normalized_data")
+
+ # Pack (data, im_info) into ImageList which is recognized by self.inference.
+ images = ImageList(tensor=normalized_data, image_sizes=im_info)
+ return images
+
+ @staticmethod
+ def get_outputs_converter(predict_net, init_net):
+ """
+ Creates a function that converts outputs of the caffe2 model to
+ detectron2's standard format.
+ The function uses information in `predict_net` and `init_net` that are
+ available at inferene time. Therefore the function logic can be used in inference.
+
+ The returned function has the following signature:
+
+ def convert(batched_inputs, c2_inputs, c2_results) -> detectron2_outputs
+
+ Where
+
+ * batched_inputs (list[dict]): the original input format of the meta arch
+ * c2_inputs (tuple[Tensor]): the caffe2 inputs.
+ * c2_results (dict[str, Tensor]): the caffe2 output format,
+ corresponding to the outputs of the :meth:`forward` function.
+ * detectron2_outputs: the original output format of the meta arch.
+
+ This function can be used to compare the outputs of the original meta arch and
+ the converted caffe2 graph.
+
+ Returns:
+ callable: a callable of the above signature.
+ """
+ raise NotImplementedError
+
+
+class Caffe2GeneralizedRCNN(Caffe2MetaArch):
+ def __init__(self, cfg, torch_model):
+ assert isinstance(torch_model, meta_arch.GeneralizedRCNN)
+ torch_model = patch_generalized_rcnn(torch_model)
+ super().__init__(cfg, torch_model)
+
+ try:
+ use_heatmap_max_keypoint = cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT
+ except AttributeError:
+ use_heatmap_max_keypoint = False
+ self.roi_heads_patcher = ROIHeadsPatcher(
+ self._wrapped_model.roi_heads, use_heatmap_max_keypoint
+ )
+
+ def encode_additional_info(self, predict_net, init_net):
+ size_divisibility = self._wrapped_model.backbone.size_divisibility
+ check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
+ check_set_pb_arg(
+ predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
+ )
+ check_set_pb_arg(predict_net, "meta_architecture", "s", b"GeneralizedRCNN")
+
+ @mock_torch_nn_functional_interpolate()
+ def forward(self, inputs):
+ if not self.tensor_mode:
+ return self._wrapped_model.inference(inputs)
+ images = self._caffe2_preprocess_image(inputs)
+ features = self._wrapped_model.backbone(images.tensor)
+ proposals, _ = self._wrapped_model.proposal_generator(images, features)
+ with self.roi_heads_patcher.mock_roi_heads():
+ detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals)
+ return tuple(detector_results[0].flatten())
+
+ @staticmethod
+ def get_outputs_converter(predict_net, init_net):
+ def f(batched_inputs, c2_inputs, c2_results):
+ _, im_info = c2_inputs
+ image_sizes = [[int(im[0]), int(im[1])] for im in im_info]
+ results = assemble_rcnn_outputs_by_name(image_sizes, c2_results)
+ return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
+
+ return f
+
+
+class Caffe2RetinaNet(Caffe2MetaArch):
+ def __init__(self, cfg, torch_model):
+ assert isinstance(torch_model, meta_arch.RetinaNet)
+ super().__init__(cfg, torch_model)
+
+ @mock_torch_nn_functional_interpolate()
+ def forward(self, inputs):
+ assert self.tensor_mode
+ images = self._caffe2_preprocess_image(inputs)
+
+ # explicitly return the images sizes to avoid removing "im_info" by ONNX
+ # since it's not used in the forward path
+ return_tensors = [images.image_sizes]
+
+ features = self._wrapped_model.backbone(images.tensor)
+ features = [features[f] for f in self._wrapped_model.head_in_features]
+ for i, feature_i in enumerate(features):
+ features[i] = alias(feature_i, "feature_{}".format(i), is_backward=True)
+ return_tensors.append(features[i])
+
+ pred_logits, pred_anchor_deltas = self._wrapped_model.head(features)
+ for i, (box_cls_i, box_delta_i) in enumerate(zip(pred_logits, pred_anchor_deltas)):
+ return_tensors.append(alias(box_cls_i, "box_cls_{}".format(i)))
+ return_tensors.append(alias(box_delta_i, "box_delta_{}".format(i)))
+
+ return tuple(return_tensors)
+
+ def encode_additional_info(self, predict_net, init_net):
+ size_divisibility = self._wrapped_model.backbone.size_divisibility
+ check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
+ check_set_pb_arg(
+ predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
+ )
+ check_set_pb_arg(predict_net, "meta_architecture", "s", b"RetinaNet")
+
+ # Inference parameters:
+ check_set_pb_arg(
+ predict_net, "score_threshold", "f", _cast_to_f32(self._wrapped_model.test_score_thresh)
+ )
+ check_set_pb_arg(
+ predict_net, "topk_candidates", "i", self._wrapped_model.test_topk_candidates
+ )
+ check_set_pb_arg(
+ predict_net, "nms_threshold", "f", _cast_to_f32(self._wrapped_model.test_nms_thresh)
+ )
+ check_set_pb_arg(
+ predict_net,
+ "max_detections_per_image",
+ "i",
+ self._wrapped_model.max_detections_per_image,
+ )
+
+ check_set_pb_arg(
+ predict_net,
+ "bbox_reg_weights",
+ "floats",
+ [_cast_to_f32(w) for w in self._wrapped_model.box2box_transform.weights],
+ )
+ self._encode_anchor_generator_cfg(predict_net)
+
+ def _encode_anchor_generator_cfg(self, predict_net):
+ # serialize anchor_generator for future use
+ serialized_anchor_generator = io.BytesIO()
+ torch.save(self._wrapped_model.anchor_generator, serialized_anchor_generator)
+ # Ideally we can put anchor generating inside the model, then we don't
+ # need to store this information.
+ bytes = serialized_anchor_generator.getvalue()
+ check_set_pb_arg(predict_net, "serialized_anchor_generator", "s", bytes)
+
+ @staticmethod
+ def get_outputs_converter(predict_net, init_net):
+ self = types.SimpleNamespace()
+ serialized_anchor_generator = io.BytesIO(
+ get_pb_arg_vals(predict_net, "serialized_anchor_generator", None)
+ )
+ self.anchor_generator = torch.load(serialized_anchor_generator)
+ bbox_reg_weights = get_pb_arg_floats(predict_net, "bbox_reg_weights", None)
+ self.box2box_transform = Box2BoxTransform(weights=tuple(bbox_reg_weights))
+ self.test_score_thresh = get_pb_arg_valf(predict_net, "score_threshold", None)
+ self.test_topk_candidates = get_pb_arg_vali(predict_net, "topk_candidates", None)
+ self.test_nms_thresh = get_pb_arg_valf(predict_net, "nms_threshold", None)
+ self.max_detections_per_image = get_pb_arg_vali(
+ predict_net, "max_detections_per_image", None
+ )
+
+ # hack to reuse inference code from RetinaNet
+ for meth in [
+ "forward_inference",
+ "inference_single_image",
+ "_transpose_dense_predictions",
+ "_decode_multi_level_predictions",
+ "_decode_per_level_predictions",
+ ]:
+ setattr(self, meth, functools.partial(getattr(meta_arch.RetinaNet, meth), self))
+
+ def f(batched_inputs, c2_inputs, c2_results):
+ _, im_info = c2_inputs
+ image_sizes = [[int(im[0]), int(im[1])] for im in im_info]
+ dummy_images = ImageList(
+ torch.randn(
+ (
+ len(im_info),
+ 3,
+ )
+ + tuple(image_sizes[0])
+ ),
+ image_sizes,
+ )
+
+ num_features = len([x for x in c2_results.keys() if x.startswith("box_cls_")])
+ pred_logits = [c2_results["box_cls_{}".format(i)] for i in range(num_features)]
+ pred_anchor_deltas = [c2_results["box_delta_{}".format(i)] for i in range(num_features)]
+
+ # For each feature level, feature should have the same batch size and
+ # spatial dimension as the box_cls and box_delta.
+ dummy_features = [x.clone()[:, 0:0, :, :] for x in pred_logits]
+ # self.num_classess can be inferred
+ self.num_classes = pred_logits[0].shape[1] // (pred_anchor_deltas[0].shape[1] // 4)
+
+ results = self.forward_inference(
+ dummy_images, dummy_features, [pred_logits, pred_anchor_deltas]
+ )
+ return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
+
+ return f
+
+
+META_ARCH_CAFFE2_EXPORT_TYPE_MAP = {
+ "GeneralizedRCNN": Caffe2GeneralizedRCNN,
+ "RetinaNet": Caffe2RetinaNet,
+}
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/export/caffe2_patch.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/caffe2_patch.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9eee594a27cdec29ce5f2b6f7730171eda3805e
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/caffe2_patch.py
@@ -0,0 +1,152 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import contextlib
+from unittest import mock
+import torch
+
+from detectron2.modeling import poolers
+from detectron2.modeling.proposal_generator import rpn
+from detectron2.modeling.roi_heads import keypoint_head, mask_head
+from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
+
+from .c10 import (
+ Caffe2Compatible,
+ Caffe2FastRCNNOutputsInference,
+ Caffe2KeypointRCNNInference,
+ Caffe2MaskRCNNInference,
+ Caffe2ROIPooler,
+ Caffe2RPN,
+)
+
+
+class GenericMixin(object):
+ pass
+
+
+class Caffe2CompatibleConverter(object):
+ """
+ A GenericUpdater which implements the `create_from` interface, by modifying
+ module object and assign it with another class replaceCls.
+ """
+
+ def __init__(self, replaceCls):
+ self.replaceCls = replaceCls
+
+ def create_from(self, module):
+ # update module's class to the new class
+ assert isinstance(module, torch.nn.Module)
+ if issubclass(self.replaceCls, GenericMixin):
+ # replaceCls should act as mixin, create a new class on-the-fly
+ new_class = type(
+ "{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__),
+ (self.replaceCls, module.__class__),
+ {}, # {"new_method": lambda self: ...},
+ )
+ module.__class__ = new_class
+ else:
+ # replaceCls is complete class, this allow arbitrary class swap
+ module.__class__ = self.replaceCls
+
+ # initialize Caffe2Compatible
+ if isinstance(module, Caffe2Compatible):
+ module.tensor_mode = False
+
+ return module
+
+
+def patch(model, target, updater, *args, **kwargs):
+ """
+ recursively (post-order) update all modules with the target type and its
+ subclasses, make a initialization/composition/inheritance/... via the
+ updater.create_from.
+ """
+ for name, module in model.named_children():
+ model._modules[name] = patch(module, target, updater, *args, **kwargs)
+ if isinstance(model, target):
+ return updater.create_from(model, *args, **kwargs)
+ return model
+
+
+def patch_generalized_rcnn(model):
+ ccc = Caffe2CompatibleConverter
+ model = patch(model, rpn.RPN, ccc(Caffe2RPN))
+ model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler))
+
+ return model
+
+
+@contextlib.contextmanager
+def mock_fastrcnn_outputs_inference(
+ tensor_mode, check=True, box_predictor_type=FastRCNNOutputLayers
+):
+ with mock.patch.object(
+ box_predictor_type,
+ "inference",
+ autospec=True,
+ side_effect=Caffe2FastRCNNOutputsInference(tensor_mode),
+ ) as mocked_func:
+ yield
+ if check:
+ assert mocked_func.call_count > 0
+
+
+@contextlib.contextmanager
+def mock_mask_rcnn_inference(tensor_mode, patched_module, check=True):
+ with mock.patch(
+ "{}.mask_rcnn_inference".format(patched_module), side_effect=Caffe2MaskRCNNInference()
+ ) as mocked_func:
+ yield
+ if check:
+ assert mocked_func.call_count > 0
+
+
+@contextlib.contextmanager
+def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True):
+ with mock.patch(
+ "{}.keypoint_rcnn_inference".format(patched_module),
+ side_effect=Caffe2KeypointRCNNInference(use_heatmap_max_keypoint),
+ ) as mocked_func:
+ yield
+ if check:
+ assert mocked_func.call_count > 0
+
+
+class ROIHeadsPatcher:
+ def __init__(self, heads, use_heatmap_max_keypoint):
+ self.heads = heads
+ self.use_heatmap_max_keypoint = use_heatmap_max_keypoint
+
+ @contextlib.contextmanager
+ def mock_roi_heads(self, tensor_mode=True):
+ """
+ Patching several inference functions inside ROIHeads and its subclasses
+
+ Args:
+ tensor_mode (bool): whether the inputs/outputs are caffe2's tensor
+ format or not. Default to True.
+ """
+ # NOTE: this requries the `keypoint_rcnn_inference` and `mask_rcnn_inference`
+ # are called inside the same file as BaseXxxHead due to using mock.patch.
+ kpt_heads_mod = keypoint_head.BaseKeypointRCNNHead.__module__
+ mask_head_mod = mask_head.BaseMaskRCNNHead.__module__
+
+ mock_ctx_managers = [
+ mock_fastrcnn_outputs_inference(
+ tensor_mode=tensor_mode,
+ check=True,
+ box_predictor_type=type(self.heads.box_predictor),
+ )
+ ]
+ if getattr(self.heads, "keypoint_on", False):
+ mock_ctx_managers += [
+ mock_keypoint_rcnn_inference(
+ tensor_mode, kpt_heads_mod, self.use_heatmap_max_keypoint
+ )
+ ]
+ if getattr(self.heads, "mask_on", False):
+ mock_ctx_managers += [mock_mask_rcnn_inference(tensor_mode, mask_head_mod)]
+
+ with contextlib.ExitStack() as stack: # python 3.3+
+ for mgr in mock_ctx_managers:
+ stack.enter_context(mgr)
+ yield
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/export/flatten.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/flatten.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5ba4297567d650f147eebeed361e9d62fab899d
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/flatten.py
@@ -0,0 +1,330 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import collections
+from dataclasses import dataclass
+from typing import Callable, List, Optional, Tuple
+import torch
+from torch import nn
+
+from detectron2.structures import Boxes, Instances, ROIMasks
+from detectron2.utils.registry import _convert_target_to_string, locate
+
+from .torchscript_patch import patch_builtin_len
+
+
+@dataclass
+class Schema:
+ """
+ A Schema defines how to flatten a possibly hierarchical object into tuple of
+ primitive objects, so it can be used as inputs/outputs of PyTorch's tracing.
+
+ PyTorch does not support tracing a function that produces rich output
+ structures (e.g. dict, Instances, Boxes). To trace such a function, we
+ flatten the rich object into tuple of tensors, and return this tuple of tensors
+ instead. Meanwhile, we also need to know how to "rebuild" the original object
+ from the flattened results, so we can evaluate the flattened results.
+ A Schema defines how to flatten an object, and while flattening it, it records
+ necessary schemas so that the object can be rebuilt using the flattened outputs.
+
+ The flattened object and the schema object is returned by ``.flatten`` classmethod.
+ Then the original object can be rebuilt with the ``__call__`` method of schema.
+
+ A Schema is a dataclass that can be serialized easily.
+ """
+
+ # inspired by FetchMapper in tensorflow/python/client/session.py
+
+ @classmethod
+ def flatten(cls, obj):
+ raise NotImplementedError
+
+ def __call__(self, values):
+ raise NotImplementedError
+
+ @staticmethod
+ def _concat(values):
+ ret = ()
+ sizes = []
+ for v in values:
+ assert isinstance(v, tuple), "Flattened results must be a tuple"
+ ret = ret + v
+ sizes.append(len(v))
+ return ret, sizes
+
+ @staticmethod
+ def _split(values, sizes):
+ if len(sizes):
+ expected_len = sum(sizes)
+ assert (
+ len(values) == expected_len
+ ), f"Values has length {len(values)} but expect length {expected_len}."
+ ret = []
+ for k in range(len(sizes)):
+ begin, end = sum(sizes[:k]), sum(sizes[: k + 1])
+ ret.append(values[begin:end])
+ return ret
+
+
+@dataclass
+class ListSchema(Schema):
+ schemas: List[Schema] # the schemas that define how to flatten each element in the list
+ sizes: List[int] # the flattened length of each element
+
+ def __call__(self, values):
+ values = self._split(values, self.sizes)
+ if len(values) != len(self.schemas):
+ raise ValueError(
+ f"Values has length {len(values)} but schemas " f"has length {len(self.schemas)}!"
+ )
+ values = [m(v) for m, v in zip(self.schemas, values)]
+ return list(values)
+
+ @classmethod
+ def flatten(cls, obj):
+ res = [flatten_to_tuple(k) for k in obj]
+ values, sizes = cls._concat([k[0] for k in res])
+ return values, cls([k[1] for k in res], sizes)
+
+
+@dataclass
+class TupleSchema(ListSchema):
+ def __call__(self, values):
+ return tuple(super().__call__(values))
+
+
+@dataclass
+class IdentitySchema(Schema):
+ def __call__(self, values):
+ return values[0]
+
+ @classmethod
+ def flatten(cls, obj):
+ return (obj,), cls()
+
+
+@dataclass
+class DictSchema(ListSchema):
+ keys: List[str]
+
+ def __call__(self, values):
+ values = super().__call__(values)
+ return dict(zip(self.keys, values))
+
+ @classmethod
+ def flatten(cls, obj):
+ for k in obj.keys():
+ if not isinstance(k, str):
+ raise KeyError("Only support flattening dictionaries if keys are str.")
+ keys = sorted(obj.keys())
+ values = [obj[k] for k in keys]
+ ret, schema = ListSchema.flatten(values)
+ return ret, cls(schema.schemas, schema.sizes, keys)
+
+
+@dataclass
+class InstancesSchema(DictSchema):
+ def __call__(self, values):
+ image_size, fields = values[-1], values[:-1]
+ fields = super().__call__(fields)
+ return Instances(image_size, **fields)
+
+ @classmethod
+ def flatten(cls, obj):
+ ret, schema = super().flatten(obj.get_fields())
+ size = obj.image_size
+ if not isinstance(size, torch.Tensor):
+ size = torch.tensor(size)
+ return ret + (size,), schema
+
+
+@dataclass
+class TensorWrapSchema(Schema):
+ """
+ For classes that are simple wrapper of tensors, e.g.
+ Boxes, RotatedBoxes, BitMasks
+ """
+
+ class_name: str
+
+ def __call__(self, values):
+ return locate(self.class_name)(values[0])
+
+ @classmethod
+ def flatten(cls, obj):
+ return (obj.tensor,), cls(_convert_target_to_string(type(obj)))
+
+
+# if more custom structures needed in the future, can allow
+# passing in extra schemas for custom types
+def flatten_to_tuple(obj):
+ """
+ Flatten an object so it can be used for PyTorch tracing.
+ Also returns how to rebuild the original object from the flattened outputs.
+
+ Returns:
+ res (tuple): the flattened results that can be used as tracing outputs
+ schema: an object with a ``__call__`` method such that ``schema(res) == obj``.
+ It is a pure dataclass that can be serialized.
+ """
+ schemas = [
+ ((str, bytes), IdentitySchema),
+ (list, ListSchema),
+ (tuple, TupleSchema),
+ (collections.abc.Mapping, DictSchema),
+ (Instances, InstancesSchema),
+ ((Boxes, ROIMasks), TensorWrapSchema),
+ ]
+ for klass, schema in schemas:
+ if isinstance(obj, klass):
+ F = schema
+ break
+ else:
+ F = IdentitySchema
+
+ return F.flatten(obj)
+
+
+class TracingAdapter(nn.Module):
+ """
+ A model may take rich input/output format (e.g. dict or custom classes),
+ but `torch.jit.trace` requires tuple of tensors as input/output.
+ This adapter flattens input/output format of a model so it becomes traceable.
+
+ It also records the necessary schema to rebuild model's inputs/outputs from flattened
+ inputs/outputs.
+
+ Example:
+ ::
+ outputs = model(inputs) # inputs/outputs may be rich structure
+ adapter = TracingAdapter(model, inputs)
+
+ # can now trace the model, with adapter.flattened_inputs, or another
+ # tuple of tensors with the same length and meaning
+ traced = torch.jit.trace(adapter, adapter.flattened_inputs)
+
+ # traced model can only produce flattened outputs (tuple of tensors)
+ flattened_outputs = traced(*adapter.flattened_inputs)
+ # adapter knows the schema to convert it back (new_outputs == outputs)
+ new_outputs = adapter.outputs_schema(flattened_outputs)
+ """
+
+ flattened_inputs: Tuple[torch.Tensor] = None
+ """
+ Flattened version of inputs given to this class's constructor.
+ """
+
+ inputs_schema: Schema = None
+ """
+ Schema of the inputs given to this class's constructor.
+ """
+
+ outputs_schema: Schema = None
+ """
+ Schema of the output produced by calling the given model with inputs.
+ """
+
+ def __init__(
+ self,
+ model: nn.Module,
+ inputs,
+ inference_func: Optional[Callable] = None,
+ allow_non_tensor: bool = False,
+ ):
+ """
+ Args:
+ model: an nn.Module
+ inputs: An input argument or a tuple of input arguments used to call model.
+ After flattening, it has to only consist of tensors.
+ inference_func: a callable that takes (model, *inputs), calls the
+ model with inputs, and return outputs. By default it
+ is ``lambda model, *inputs: model(*inputs)``. Can be override
+ if you need to call the model differently.
+ allow_non_tensor: allow inputs/outputs to contain non-tensor objects.
+ This option will filter out non-tensor objects to make the
+ model traceable, but ``inputs_schema``/``outputs_schema`` cannot be
+ used anymore because inputs/outputs cannot be rebuilt from pure tensors.
+ This is useful when you're only interested in the single trace of
+ execution (e.g. for flop count), but not interested in
+ generalizing the traced graph to new inputs.
+ """
+ super().__init__()
+ if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)):
+ model = model.module
+ self.model = model
+ if not isinstance(inputs, tuple):
+ inputs = (inputs,)
+ self.inputs = inputs
+ self.allow_non_tensor = allow_non_tensor
+
+ if inference_func is None:
+ inference_func = lambda model, *inputs: model(*inputs) # noqa
+ self.inference_func = inference_func
+
+ self.flattened_inputs, self.inputs_schema = flatten_to_tuple(inputs)
+
+ if all(isinstance(x, torch.Tensor) for x in self.flattened_inputs):
+ return
+ if self.allow_non_tensor:
+ self.flattened_inputs = tuple(
+ [x for x in self.flattened_inputs if isinstance(x, torch.Tensor)]
+ )
+ self.inputs_schema = None
+ else:
+ for input in self.flattened_inputs:
+ if not isinstance(input, torch.Tensor):
+ raise ValueError(
+ "Inputs for tracing must only contain tensors. "
+ f"Got a {type(input)} instead."
+ )
+
+ def forward(self, *args: torch.Tensor):
+ with torch.no_grad(), patch_builtin_len():
+ if self.inputs_schema is not None:
+ inputs_orig_format = self.inputs_schema(args)
+ else:
+ if len(args) != len(self.flattened_inputs) or any(
+ x is not y for x, y in zip(args, self.flattened_inputs)
+ ):
+ raise ValueError(
+ "TracingAdapter does not contain valid inputs_schema."
+ " So it cannot generalize to other inputs and must be"
+ " traced with `.flattened_inputs`."
+ )
+ inputs_orig_format = self.inputs
+
+ outputs = self.inference_func(self.model, *inputs_orig_format)
+ flattened_outputs, schema = flatten_to_tuple(outputs)
+
+ flattened_output_tensors = tuple(
+ [x for x in flattened_outputs if isinstance(x, torch.Tensor)]
+ )
+ if len(flattened_output_tensors) < len(flattened_outputs):
+ if self.allow_non_tensor:
+ flattened_outputs = flattened_output_tensors
+ self.outputs_schema = None
+ else:
+ raise ValueError(
+ "Model cannot be traced because some model outputs "
+ "cannot flatten to tensors."
+ )
+ else: # schema is valid
+ if self.outputs_schema is None:
+ self.outputs_schema = schema
+ else:
+ assert self.outputs_schema == schema, (
+ "Model should always return outputs with the same "
+ "structure so it can be traced!"
+ )
+ return flattened_outputs
+
+ def _create_wrapper(self, traced_model):
+ """
+ Return a function that has an input/output interface the same as the
+ original model, but it calls the given traced model under the hood.
+ """
+
+ def forward(*args):
+ flattened_inputs, _ = flatten_to_tuple(args)
+ flattened_outputs = traced_model(*flattened_inputs)
+ return self.outputs_schema(flattened_outputs)
+
+ return forward
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/export/shared.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/shared.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d0f7bf3999064a68f28a1207d65a2de7ae98c0a
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/shared.py
@@ -0,0 +1,1034 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import collections
+import contextlib
+import copy
+import functools
+import logging
+import numpy as np
+import os
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+from unittest import mock
+import caffe2.python.utils as putils
+import torch
+import torch.nn.functional as F
+from caffe2.proto import caffe2_pb2
+from caffe2.python import core, net_drawer, workspace
+from torch.nn.functional import interpolate as interp
+
+logger = logging.getLogger(__name__)
+
+
+# ==== torch/utils_toffee/cast.py =======================================
+
+
+def to_device(t, device_str):
+ """
+ This function is a replacement of .to(another_device) such that it allows the
+ casting to be traced properly by explicitly calling the underlying copy ops.
+ It also avoids introducing unncessary op when casting to the same device.
+ """
+ src = t.device
+ dst = torch.device(device_str)
+
+ if src == dst:
+ return t
+ elif src.type == "cuda" and dst.type == "cpu":
+ return torch.ops._caffe2.CopyGPUToCPU(t)
+ elif src.type == "cpu" and dst.type == "cuda":
+ return torch.ops._caffe2.CopyCPUToGPU(t)
+ else:
+ raise RuntimeError("Can't cast tensor from device {} to device {}".format(src, dst))
+
+
+# ==== torch/utils_toffee/interpolate.py =======================================
+
+
+# Note: borrowed from vision/detection/fair/detectron/detectron/modeling/detector.py
+def BilinearInterpolation(tensor_in, up_scale):
+ assert up_scale % 2 == 0, "Scale should be even"
+
+ def upsample_filt(size):
+ factor = (size + 1) // 2
+ if size % 2 == 1:
+ center = factor - 1
+ else:
+ center = factor - 0.5
+
+ og = np.ogrid[:size, :size]
+ return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
+
+ kernel_size = int(up_scale) * 2
+ bil_filt = upsample_filt(kernel_size)
+
+ dim = int(tensor_in.shape[1])
+ kernel = np.zeros((dim, dim, kernel_size, kernel_size), dtype=np.float32)
+ kernel[range(dim), range(dim), :, :] = bil_filt
+
+ tensor_out = F.conv_transpose2d(
+ tensor_in,
+ weight=to_device(torch.Tensor(kernel), tensor_in.device),
+ bias=None,
+ stride=int(up_scale),
+ padding=int(up_scale / 2),
+ )
+
+ return tensor_out
+
+
+# NOTE: ONNX is incompatible with traced torch.nn.functional.interpolate if
+# using dynamic `scale_factor` rather than static `size`. (T43166860)
+# NOTE: Caffe2 Int8 conversion might not be able to quantize `size` properly.
+def onnx_compatibale_interpolate(
+ input, size=None, scale_factor=None, mode="nearest", align_corners=None
+):
+ # NOTE: The input dimensions are interpreted in the form:
+ # `mini-batch x channels x [optional depth] x [optional height] x width`.
+ if size is None and scale_factor is not None:
+ if input.dim() == 4:
+ if isinstance(scale_factor, (int, float)):
+ height_scale, width_scale = (scale_factor, scale_factor)
+ else:
+ assert isinstance(scale_factor, (tuple, list))
+ assert len(scale_factor) == 2
+ height_scale, width_scale = scale_factor
+
+ assert not align_corners, "No matching C2 op for align_corners == True"
+ if mode == "nearest":
+ return torch.ops._caffe2.ResizeNearest(
+ input, order="NCHW", width_scale=width_scale, height_scale=height_scale
+ )
+ elif mode == "bilinear":
+ logger.warning(
+ "Use F.conv_transpose2d for bilinear interpolate"
+ " because there's no such C2 op, this may cause significant"
+ " slowdown and the boundary pixels won't be as same as"
+ " using F.interpolate due to padding."
+ )
+ assert height_scale == width_scale
+ return BilinearInterpolation(input, up_scale=height_scale)
+ logger.warning("Output size is not static, it might cause ONNX conversion issue")
+
+ return interp(input, size, scale_factor, mode, align_corners)
+
+
+@contextlib.contextmanager
+def mock_torch_nn_functional_interpolate():
+ if torch.onnx.is_in_onnx_export():
+ with mock.patch(
+ "torch.nn.functional.interpolate", side_effect=onnx_compatibale_interpolate
+ ):
+ yield
+ else:
+ yield
+
+
+# ==== torch/utils_caffe2/ws_utils.py ==========================================
+
+
+class ScopedWS(object):
+ def __init__(self, ws_name, is_reset, is_cleanup=False):
+ self.ws_name = ws_name
+ self.is_reset = is_reset
+ self.is_cleanup = is_cleanup
+ self.org_ws = ""
+
+ def __enter__(self):
+ self.org_ws = workspace.CurrentWorkspace()
+ if self.ws_name is not None:
+ workspace.SwitchWorkspace(self.ws_name, True)
+ if self.is_reset:
+ workspace.ResetWorkspace()
+
+ return workspace
+
+ def __exit__(self, *args):
+ if self.is_cleanup:
+ workspace.ResetWorkspace()
+ if self.ws_name is not None:
+ workspace.SwitchWorkspace(self.org_ws)
+
+
+def fetch_any_blob(name):
+ bb = None
+ try:
+ bb = workspace.FetchBlob(name)
+ except TypeError:
+ bb = workspace.FetchInt8Blob(name)
+ except Exception as e:
+ logger.error("Get blob {} error: {}".format(name, e))
+
+ return bb
+
+
+# ==== torch/utils_caffe2/protobuf.py ==========================================
+
+
+def get_pb_arg(pb, arg_name):
+ for x in pb.arg:
+ if x.name == arg_name:
+ return x
+ return None
+
+
+def get_pb_arg_valf(pb, arg_name, default_val):
+ arg = get_pb_arg(pb, arg_name)
+ return arg.f if arg is not None else default_val
+
+
+def get_pb_arg_floats(pb, arg_name, default_val):
+ arg = get_pb_arg(pb, arg_name)
+ return list(map(float, arg.floats)) if arg is not None else default_val
+
+
+def get_pb_arg_ints(pb, arg_name, default_val):
+ arg = get_pb_arg(pb, arg_name)
+ return list(map(int, arg.ints)) if arg is not None else default_val
+
+
+def get_pb_arg_vali(pb, arg_name, default_val):
+ arg = get_pb_arg(pb, arg_name)
+ return arg.i if arg is not None else default_val
+
+
+def get_pb_arg_vals(pb, arg_name, default_val):
+ arg = get_pb_arg(pb, arg_name)
+ return arg.s if arg is not None else default_val
+
+
+def get_pb_arg_valstrings(pb, arg_name, default_val):
+ arg = get_pb_arg(pb, arg_name)
+ return list(arg.strings) if arg is not None else default_val
+
+
+def check_set_pb_arg(pb, arg_name, arg_attr, arg_value, allow_override=False):
+ arg = get_pb_arg(pb, arg_name)
+ if arg is None:
+ arg = putils.MakeArgument(arg_name, arg_value)
+ assert hasattr(arg, arg_attr)
+ pb.arg.extend([arg])
+ if allow_override and getattr(arg, arg_attr) != arg_value:
+ logger.warning(
+ "Override argument {}: {} -> {}".format(arg_name, getattr(arg, arg_attr), arg_value)
+ )
+ setattr(arg, arg_attr, arg_value)
+ else:
+ assert arg is not None
+ assert getattr(arg, arg_attr) == arg_value, "Existing value {}, new value {}".format(
+ getattr(arg, arg_attr), arg_value
+ )
+
+
+def _create_const_fill_op_from_numpy(name, tensor, device_option=None):
+ assert type(tensor) == np.ndarray
+ kTypeNameMapper = {
+ np.dtype("float32"): "GivenTensorFill",
+ np.dtype("int32"): "GivenTensorIntFill",
+ np.dtype("int64"): "GivenTensorInt64Fill",
+ np.dtype("uint8"): "GivenTensorStringFill",
+ }
+
+ args_dict = {}
+ if tensor.dtype == np.dtype("uint8"):
+ args_dict.update({"values": [str(tensor.data)], "shape": [1]})
+ else:
+ args_dict.update({"values": tensor, "shape": tensor.shape})
+
+ if device_option is not None:
+ args_dict["device_option"] = device_option
+
+ return core.CreateOperator(kTypeNameMapper[tensor.dtype], [], [name], **args_dict)
+
+
+def _create_const_fill_op_from_c2_int8_tensor(name, int8_tensor):
+ assert type(int8_tensor) == workspace.Int8Tensor
+ kTypeNameMapper = {
+ np.dtype("int32"): "Int8GivenIntTensorFill",
+ np.dtype("uint8"): "Int8GivenTensorFill",
+ }
+
+ tensor = int8_tensor.data
+ assert tensor.dtype in [np.dtype("uint8"), np.dtype("int32")]
+ values = tensor.tobytes() if tensor.dtype == np.dtype("uint8") else tensor
+
+ return core.CreateOperator(
+ kTypeNameMapper[tensor.dtype],
+ [],
+ [name],
+ values=values,
+ shape=tensor.shape,
+ Y_scale=int8_tensor.scale,
+ Y_zero_point=int8_tensor.zero_point,
+ )
+
+
+def create_const_fill_op(
+ name: str,
+ blob: Union[np.ndarray, workspace.Int8Tensor],
+ device_option: Optional[caffe2_pb2.DeviceOption] = None,
+) -> caffe2_pb2.OperatorDef:
+ """
+ Given a blob object, return the Caffe2 operator that creates this blob
+ as constant. Currently support NumPy tensor and Caffe2 Int8Tensor.
+ """
+
+ tensor_type = type(blob)
+ assert tensor_type in [
+ np.ndarray,
+ workspace.Int8Tensor,
+ ], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format(
+ name, type(blob)
+ )
+
+ if tensor_type == np.ndarray:
+ return _create_const_fill_op_from_numpy(name, blob, device_option)
+ elif tensor_type == workspace.Int8Tensor:
+ assert device_option is None
+ return _create_const_fill_op_from_c2_int8_tensor(name, blob)
+
+
+def construct_init_net_from_params(
+ params: Dict[str, Any], device_options: Optional[Dict[str, caffe2_pb2.DeviceOption]] = None
+) -> caffe2_pb2.NetDef:
+ """
+ Construct the init_net from params dictionary
+ """
+ init_net = caffe2_pb2.NetDef()
+ device_options = device_options or {}
+ for name, blob in params.items():
+ if isinstance(blob, str):
+ logger.warning(
+ (
+ "Blob {} with type {} is not supported in generating init net,"
+ " skipped.".format(name, type(blob))
+ )
+ )
+ continue
+ init_net.op.extend(
+ [create_const_fill_op(name, blob, device_option=device_options.get(name, None))]
+ )
+ init_net.external_output.append(name)
+ return init_net
+
+
+def get_producer_map(ssa):
+ """
+ Return dict from versioned blob to (i, j),
+ where i is index of producer op, j is the index of output of that op.
+ """
+ producer_map = {}
+ for i in range(len(ssa)):
+ outputs = ssa[i][1]
+ for j, outp in enumerate(outputs):
+ producer_map[outp] = (i, j)
+ return producer_map
+
+
+def get_consumer_map(ssa):
+ """
+ Return dict from versioned blob to list of (i, j),
+ where i is index of consumer op, j is the index of input of that op.
+ """
+ consumer_map = collections.defaultdict(list)
+ for i in range(len(ssa)):
+ inputs = ssa[i][0]
+ for j, inp in enumerate(inputs):
+ consumer_map[inp].append((i, j))
+ return consumer_map
+
+
+def get_params_from_init_net(
+ init_net: caffe2_pb2.NetDef,
+) -> [Dict[str, Any], Dict[str, caffe2_pb2.DeviceOption]]:
+ """
+ Take the output blobs from init_net by running it.
+ Outputs:
+ params: dict from blob name to numpy array
+ device_options: dict from blob name to the device option of its creating op
+ """
+ # NOTE: this assumes that the params is determined by producer op with the
+ # only exception be CopyGPUToCPU which is CUDA op but returns CPU tensor.
+ def _get_device_option(producer_op):
+ if producer_op.type == "CopyGPUToCPU":
+ return caffe2_pb2.DeviceOption()
+ else:
+ return producer_op.device_option
+
+ with ScopedWS("__get_params_from_init_net__", is_reset=True, is_cleanup=True) as ws:
+ ws.RunNetOnce(init_net)
+ params = {b: fetch_any_blob(b) for b in init_net.external_output}
+ ssa, versions = core.get_ssa(init_net)
+ producer_map = get_producer_map(ssa)
+ device_options = {
+ b: _get_device_option(init_net.op[producer_map[(b, versions[b])][0]])
+ for b in init_net.external_output
+ }
+ return params, device_options
+
+
+def _updater_raise(op, input_types, output_types):
+ raise RuntimeError(
+ "Failed to apply updater for op {} given input_types {} and"
+ " output_types {}".format(op, input_types, output_types)
+ )
+
+
+def _generic_status_identifier(
+ predict_net: caffe2_pb2.NetDef,
+ status_updater: Callable,
+ known_status: Dict[Tuple[str, int], Any],
+) -> Dict[Tuple[str, int], Any]:
+ """
+ Statically infer the status of each blob, the status can be such as device type
+ (CPU/GPU), layout (NCHW/NHWC), data type (float32/int8), etc. "Blob" here
+ is versioned blob (Tuple[str, int]) in the format compatible with ssa.
+ Inputs:
+ predict_net: the caffe2 network
+ status_updater: a callable, given an op and the status of its input/output,
+ it returns the updated status of input/output. `None` is used for
+ representing unknown status.
+ known_status: a dict containing known status, used as initialization.
+ Outputs:
+ A dict mapping from versioned blob to its status
+ """
+ ssa, versions = core.get_ssa(predict_net)
+ versioned_ext_input = [(b, 0) for b in predict_net.external_input]
+ versioned_ext_output = [(b, versions[b]) for b in predict_net.external_output]
+ all_versioned_blobs = set().union(*[set(x[0] + x[1]) for x in ssa])
+
+ allowed_vbs = all_versioned_blobs.union(versioned_ext_input).union(versioned_ext_output)
+ assert all(k in allowed_vbs for k in known_status)
+ assert all(v is not None for v in known_status.values())
+ _known_status = copy.deepcopy(known_status)
+
+ def _check_and_update(key, value):
+ assert value is not None
+ if key in _known_status:
+ if not _known_status[key] == value:
+ raise RuntimeError(
+ "Confilict status for {}, existing status {}, new status {}".format(
+ key, _known_status[key], value
+ )
+ )
+ _known_status[key] = value
+
+ def _update_i(op, ssa_i):
+ versioned_inputs = ssa_i[0]
+ versioned_outputs = ssa_i[1]
+
+ inputs_status = [_known_status.get(b, None) for b in versioned_inputs]
+ outputs_status = [_known_status.get(b, None) for b in versioned_outputs]
+
+ new_inputs_status, new_outputs_status = status_updater(op, inputs_status, outputs_status)
+
+ for versioned_blob, status in zip(
+ versioned_inputs + versioned_outputs, new_inputs_status + new_outputs_status
+ ):
+ if status is not None:
+ _check_and_update(versioned_blob, status)
+
+ for op, ssa_i in zip(predict_net.op, ssa):
+ _update_i(op, ssa_i)
+ for op, ssa_i in zip(reversed(predict_net.op), reversed(ssa)):
+ _update_i(op, ssa_i)
+
+ # NOTE: This strictly checks all the blob from predict_net must be assgined
+ # a known status. However sometimes it's impossible (eg. having deadend op),
+ # we may relax this constraint if
+ for k in all_versioned_blobs:
+ if k not in _known_status:
+ raise NotImplementedError(
+ "Can not infer the status for {}. Currently only support the case where"
+ " a single forward and backward pass can identify status for all blobs.".format(k)
+ )
+
+ return _known_status
+
+
+def infer_device_type(
+ predict_net: caffe2_pb2.NetDef,
+ known_status: Dict[Tuple[str, int], Any],
+ device_name_style: str = "caffe2",
+) -> Dict[Tuple[str, int], str]:
+ """Return the device type ("cpu" or "gpu"/"cuda") of each (versioned) blob"""
+
+ assert device_name_style in ["caffe2", "pytorch"]
+ _CPU_STR = "cpu"
+ _GPU_STR = "gpu" if device_name_style == "caffe2" else "cuda"
+
+ def _copy_cpu_to_gpu_updater(op, input_types, output_types):
+ if input_types[0] == _GPU_STR or output_types[0] == _CPU_STR:
+ _updater_raise(op, input_types, output_types)
+ return ([_CPU_STR], [_GPU_STR])
+
+ def _copy_gpu_to_cpu_updater(op, input_types, output_types):
+ if input_types[0] == _CPU_STR or output_types[0] == _GPU_STR:
+ _updater_raise(op, input_types, output_types)
+ return ([_GPU_STR], [_CPU_STR])
+
+ def _other_ops_updater(op, input_types, output_types):
+ non_none_types = [x for x in input_types + output_types if x is not None]
+ if len(non_none_types) > 0:
+ the_type = non_none_types[0]
+ if not all(x == the_type for x in non_none_types):
+ _updater_raise(op, input_types, output_types)
+ else:
+ the_type = None
+ return ([the_type for _ in op.input], [the_type for _ in op.output])
+
+ def _device_updater(op, *args, **kwargs):
+ return {
+ "CopyCPUToGPU": _copy_cpu_to_gpu_updater,
+ "CopyGPUToCPU": _copy_gpu_to_cpu_updater,
+ }.get(op.type, _other_ops_updater)(op, *args, **kwargs)
+
+ return _generic_status_identifier(predict_net, _device_updater, known_status)
+
+
+# ==== torch/utils_caffe2/vis.py ===============================================
+
+
+def _modify_blob_names(ops, blob_rename_f):
+ ret = []
+
+ def _replace_list(blob_list, replaced_list):
+ del blob_list[:]
+ blob_list.extend(replaced_list)
+
+ for x in ops:
+ cur = copy.deepcopy(x)
+ _replace_list(cur.input, list(map(blob_rename_f, cur.input)))
+ _replace_list(cur.output, list(map(blob_rename_f, cur.output)))
+ ret.append(cur)
+
+ return ret
+
+
+def _rename_blob(name, blob_sizes, blob_ranges):
+ def _list_to_str(bsize):
+ ret = ", ".join([str(x) for x in bsize])
+ ret = "[" + ret + "]"
+ return ret
+
+ ret = name
+ if blob_sizes is not None and name in blob_sizes:
+ ret += "\n" + _list_to_str(blob_sizes[name])
+ if blob_ranges is not None and name in blob_ranges:
+ ret += "\n" + _list_to_str(blob_ranges[name])
+
+ return ret
+
+
+# graph_name could not contain word 'graph'
+def save_graph(net, file_name, graph_name="net", op_only=True, blob_sizes=None, blob_ranges=None):
+ blob_rename_f = functools.partial(_rename_blob, blob_sizes=blob_sizes, blob_ranges=blob_ranges)
+ return save_graph_base(net, file_name, graph_name, op_only, blob_rename_f)
+
+
+def save_graph_base(net, file_name, graph_name="net", op_only=True, blob_rename_func=None):
+ graph = None
+ ops = net.op
+ if blob_rename_func is not None:
+ ops = _modify_blob_names(ops, blob_rename_func)
+ if not op_only:
+ graph = net_drawer.GetPydotGraph(ops, graph_name, rankdir="TB")
+ else:
+ graph = net_drawer.GetPydotGraphMinimal(
+ ops, graph_name, rankdir="TB", minimal_dependency=True
+ )
+
+ try:
+ par_dir = os.path.dirname(file_name)
+ if not os.path.exists(par_dir):
+ os.makedirs(par_dir)
+
+ format = os.path.splitext(os.path.basename(file_name))[-1]
+ if format == ".png":
+ graph.write_png(file_name)
+ elif format == ".pdf":
+ graph.write_pdf(file_name)
+ elif format == ".svg":
+ graph.write_svg(file_name)
+ else:
+ print("Incorrect format {}".format(format))
+ except Exception as e:
+ print("Error when writing graph to image {}".format(e))
+
+ return graph
+
+
+# ==== torch/utils_toffee/aten_to_caffe2.py ====================================
+
+
+def group_norm_replace_aten_with_caffe2(predict_net: caffe2_pb2.NetDef):
+ """
+ For ONNX exported model, GroupNorm will be represented as ATen op,
+ this can be a drop in replacement from ATen to GroupNorm
+ """
+ count = 0
+ for op in predict_net.op:
+ if op.type == "ATen":
+ op_name = get_pb_arg_vals(op, "operator", None) # return byte in py3
+ if op_name and op_name.decode() == "group_norm":
+ op.arg.remove(get_pb_arg(op, "operator"))
+
+ if get_pb_arg_vali(op, "cudnn_enabled", None):
+ op.arg.remove(get_pb_arg(op, "cudnn_enabled"))
+
+ num_groups = get_pb_arg_vali(op, "num_groups", None)
+ if num_groups is not None:
+ op.arg.remove(get_pb_arg(op, "num_groups"))
+ check_set_pb_arg(op, "group", "i", num_groups)
+
+ op.type = "GroupNorm"
+ count += 1
+ if count > 1:
+ logger.info("Replaced {} ATen operator to GroupNormOp".format(count))
+
+
+# ==== torch/utils_toffee/alias.py =============================================
+
+
+def alias(x, name, is_backward=False):
+ if not torch.onnx.is_in_onnx_export():
+ return x
+ assert isinstance(x, torch.Tensor)
+ return torch.ops._caffe2.AliasWithName(x, name, is_backward=is_backward)
+
+
+def fuse_alias_placeholder(predict_net, init_net):
+ """Remove AliasWithName placeholder and rename the input/output of it"""
+ # First we finish all the re-naming
+ for i, op in enumerate(predict_net.op):
+ if op.type == "AliasWithName":
+ assert len(op.input) == 1
+ assert len(op.output) == 1
+ name = get_pb_arg_vals(op, "name", None).decode()
+ is_backward = bool(get_pb_arg_vali(op, "is_backward", 0))
+ rename_op_input(predict_net, init_net, i, 0, name, from_producer=is_backward)
+ rename_op_output(predict_net, i, 0, name)
+
+ # Remove AliasWithName, should be very safe since it's a non-op
+ new_ops = []
+ for op in predict_net.op:
+ if op.type != "AliasWithName":
+ new_ops.append(op)
+ else:
+ # safety check
+ assert op.input == op.output
+ assert op.input[0] == op.arg[0].s.decode()
+ del predict_net.op[:]
+ predict_net.op.extend(new_ops)
+
+
+# ==== torch/utils_caffe2/graph_transform.py ===================================
+
+
+class IllegalGraphTransformError(ValueError):
+ """When a graph transform function call can't be executed."""
+
+
+def _rename_versioned_blob_in_proto(
+ proto: caffe2_pb2.NetDef,
+ old_name: str,
+ new_name: str,
+ version: int,
+ ssa: List[Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]],
+ start_versions: Dict[str, int],
+ end_versions: Dict[str, int],
+):
+ """In given proto, rename all blobs with matched version"""
+ # Operater list
+ for op, i_th_ssa in zip(proto.op, ssa):
+ versioned_inputs, versioned_outputs = i_th_ssa
+ for i in range(len(op.input)):
+ if versioned_inputs[i] == (old_name, version):
+ op.input[i] = new_name
+ for i in range(len(op.output)):
+ if versioned_outputs[i] == (old_name, version):
+ op.output[i] = new_name
+ # external_input
+ if start_versions.get(old_name, 0) == version:
+ for i in range(len(proto.external_input)):
+ if proto.external_input[i] == old_name:
+ proto.external_input[i] = new_name
+ # external_output
+ if end_versions.get(old_name, 0) == version:
+ for i in range(len(proto.external_output)):
+ if proto.external_output[i] == old_name:
+ proto.external_output[i] = new_name
+
+
+def rename_op_input(
+ predict_net: caffe2_pb2.NetDef,
+ init_net: caffe2_pb2.NetDef,
+ op_id: int,
+ input_id: int,
+ new_name: str,
+ from_producer: bool = False,
+):
+ """
+ Rename the op_id-th operator in predict_net, change it's input_id-th input's
+ name to the new_name. It also does automatic re-route and change
+ external_input and init_net if necessary.
+ - It requires the input is only consumed by this op.
+ - This function modifies predict_net and init_net in-place.
+ - When from_producer is enable, this also updates other operators that consumes
+ the same input. Be cautious because may trigger unintended behavior.
+ """
+ assert isinstance(predict_net, caffe2_pb2.NetDef)
+ assert isinstance(init_net, caffe2_pb2.NetDef)
+
+ init_net_ssa, init_net_versions = core.get_ssa(init_net)
+ predict_net_ssa, predict_net_versions = core.get_ssa(
+ predict_net, copy.deepcopy(init_net_versions)
+ )
+
+ versioned_inputs, versioned_outputs = predict_net_ssa[op_id]
+ old_name, version = versioned_inputs[input_id]
+
+ if from_producer:
+ producer_map = get_producer_map(predict_net_ssa)
+ if not (old_name, version) in producer_map:
+ raise NotImplementedError(
+ "Can't find producer, the input {} is probably from"
+ " init_net, this is not supported yet.".format(old_name)
+ )
+ producer = producer_map[(old_name, version)]
+ rename_op_output(predict_net, producer[0], producer[1], new_name)
+ return
+
+ def contain_targets(op_ssa):
+ return (old_name, version) in op_ssa[0]
+
+ is_consumer = [contain_targets(op_ssa) for op_ssa in predict_net_ssa]
+ if sum(is_consumer) > 1:
+ raise IllegalGraphTransformError(
+ (
+ "Input '{}' of operator(#{}) are consumed by other ops, please use"
+ + " rename_op_output on the producer instead. Offending op: \n{}"
+ ).format(old_name, op_id, predict_net.op[op_id])
+ )
+
+ # update init_net
+ _rename_versioned_blob_in_proto(
+ init_net, old_name, new_name, version, init_net_ssa, {}, init_net_versions
+ )
+ # update predict_net
+ _rename_versioned_blob_in_proto(
+ predict_net,
+ old_name,
+ new_name,
+ version,
+ predict_net_ssa,
+ init_net_versions,
+ predict_net_versions,
+ )
+
+
+def rename_op_output(predict_net: caffe2_pb2.NetDef, op_id: int, output_id: int, new_name: str):
+ """
+ Rename the op_id-th operator in predict_net, change it's output_id-th input's
+ name to the new_name. It also does automatic re-route and change
+ external_output and if necessary.
+ - It allows multiple consumers of its output.
+ - This function modifies predict_net in-place, doesn't need init_net.
+ """
+ assert isinstance(predict_net, caffe2_pb2.NetDef)
+
+ ssa, blob_versions = core.get_ssa(predict_net)
+
+ versioned_inputs, versioned_outputs = ssa[op_id]
+ old_name, version = versioned_outputs[output_id]
+
+ # update predict_net
+ _rename_versioned_blob_in_proto(
+ predict_net, old_name, new_name, version, ssa, {}, blob_versions
+ )
+
+
+def get_sub_graph_external_input_output(
+ predict_net: caffe2_pb2.NetDef, sub_graph_op_indices: List[int]
+) -> Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]:
+ """
+ Return the list of external input/output of sub-graph,
+ each element is tuple of the name and corresponding version in predict_net.
+
+ external input/output is defined the same way as caffe2 NetDef.
+ """
+ ssa, versions = core.get_ssa(predict_net)
+
+ all_inputs = []
+ all_outputs = []
+ for op_id in sub_graph_op_indices:
+ all_inputs += [inp for inp in ssa[op_id][0] if inp not in all_inputs]
+ all_outputs += list(ssa[op_id][1]) # ssa output won't repeat
+
+ # for versioned blobs, external inputs are just those blob in all_inputs
+ # but not in all_outputs
+ ext_inputs = [inp for inp in all_inputs if inp not in all_outputs]
+
+ # external outputs are essentially outputs of this subgraph that are used
+ # outside of this sub-graph (including predict_net.external_output)
+ all_other_inputs = sum(
+ (ssa[i][0] for i in range(len(ssa)) if i not in sub_graph_op_indices),
+ [(outp, versions[outp]) for outp in predict_net.external_output],
+ )
+ ext_outputs = [outp for outp in all_outputs if outp in set(all_other_inputs)]
+
+ return ext_inputs, ext_outputs
+
+
+class DiGraph:
+ """A DAG representation of caffe2 graph, each vertice is a versioned blob."""
+
+ def __init__(self):
+ self.vertices = set()
+ self.graph = collections.defaultdict(list)
+
+ def add_edge(self, u, v):
+ self.graph[u].append(v)
+ self.vertices.add(u)
+ self.vertices.add(v)
+
+ # grab from https://www.geeksforgeeks.org/find-paths-given-source-destination/
+ def get_all_paths(self, s, d):
+ visited = {k: False for k in self.vertices}
+ path = []
+ all_paths = []
+
+ def _get_all_paths_util(graph, u, d, visited, path):
+ visited[u] = True
+ path.append(u)
+ if u == d:
+ all_paths.append(copy.deepcopy(path))
+ else:
+ for i in graph[u]:
+ if not visited[i]:
+ _get_all_paths_util(graph, i, d, visited, path)
+ path.pop()
+ visited[u] = False
+
+ _get_all_paths_util(self.graph, s, d, visited, path)
+ return all_paths
+
+ @staticmethod
+ def from_ssa(ssa):
+ graph = DiGraph()
+ for op_id in range(len(ssa)):
+ for inp in ssa[op_id][0]:
+ for outp in ssa[op_id][1]:
+ graph.add_edge(inp, outp)
+ return graph
+
+
+def _get_dependency_chain(ssa, versioned_target, versioned_source):
+ """
+ Return the index list of relevant operator to produce target blob from source blob,
+ if there's no dependency, return empty list.
+ """
+
+ # finding all paths between nodes can be O(N!), thus we can only search
+ # in the subgraph using the op starting from the first consumer of source blob
+ # to the producer of the target blob.
+ consumer_map = get_consumer_map(ssa)
+ producer_map = get_producer_map(ssa)
+ start_op = min(x[0] for x in consumer_map[versioned_source]) - 15
+ end_op = (
+ producer_map[versioned_target][0] + 15 if versioned_target in producer_map else start_op
+ )
+ sub_graph_ssa = ssa[start_op : end_op + 1]
+ if len(sub_graph_ssa) > 30:
+ logger.warning(
+ "Subgraph bebetween {} and {} is large (from op#{} to op#{}), it"
+ " might take non-trival time to find all paths between them.".format(
+ versioned_source, versioned_target, start_op, end_op
+ )
+ )
+
+ dag = DiGraph.from_ssa(sub_graph_ssa)
+ paths = dag.get_all_paths(versioned_source, versioned_target) # include two ends
+ ops_in_paths = [[producer_map[blob][0] for blob in path[1:]] for path in paths]
+ return sorted(set().union(*[set(ops) for ops in ops_in_paths]))
+
+
+def identify_reshape_sub_graph(predict_net: caffe2_pb2.NetDef) -> List[List[int]]:
+ """
+ Idenfity the reshape sub-graph in a protobuf.
+ The reshape sub-graph is defined as matching the following pattern:
+
+ (input_blob) -> Op_1 -> ... -> Op_N -> (new_shape) -─┐
+ └-------------------------------------------> Reshape -> (output_blob)
+
+ Return:
+ List of sub-graphs, each sub-graph is represented as a list of indices
+ of the relavent ops, [Op_1, Op_2, ..., Op_N, Reshape]
+ """
+
+ ssa, _ = core.get_ssa(predict_net)
+
+ ret = []
+ for i, op in enumerate(predict_net.op):
+ if op.type == "Reshape":
+ assert len(op.input) == 2
+ input_ssa = ssa[i][0]
+ data_source = input_ssa[0]
+ shape_source = input_ssa[1]
+ op_indices = _get_dependency_chain(ssa, shape_source, data_source)
+ ret.append(op_indices + [i])
+ return ret
+
+
+def remove_reshape_for_fc(predict_net, params):
+ """
+ In PyTorch nn.Linear has to take 2D tensor, this often leads to reshape
+ a 4D tensor to 2D by calling .view(). However this (dynamic) reshaping
+ doesn't work well with ONNX and Int8 tools, and cause using extra
+ ops (eg. ExpandDims) that might not be available on mobile.
+ Luckily Caffe2 supports 4D tensor for FC, so we can remove those reshape
+ after exporting ONNX model.
+ """
+ from caffe2.python import core
+
+ # find all reshape sub-graph that can be removed, which is now all Reshape
+ # sub-graph whose output is only consumed by FC.
+ # TODO: to make it safer, we may need the actually value to better determine
+ # if a Reshape before FC is removable.
+ reshape_sub_graphs = identify_reshape_sub_graph(predict_net)
+ sub_graphs_to_remove = []
+ for reshape_sub_graph in reshape_sub_graphs:
+ reshape_op_id = reshape_sub_graph[-1]
+ assert predict_net.op[reshape_op_id].type == "Reshape"
+ ssa, _ = core.get_ssa(predict_net)
+ reshape_output = ssa[reshape_op_id][1][0]
+ consumers = [i for i in range(len(ssa)) if reshape_output in ssa[i][0]]
+ if all(predict_net.op[consumer].type == "FC" for consumer in consumers):
+ # safety check if the sub-graph is isolated, for this reshape sub-graph,
+ # it means it has one non-param external input and one external output.
+ ext_inputs, ext_outputs = get_sub_graph_external_input_output(
+ predict_net, reshape_sub_graph
+ )
+ non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
+ if len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1:
+ sub_graphs_to_remove.append(reshape_sub_graph)
+
+ # perform removing subgraph by:
+ # 1: rename the Reshape's output to its input, then the graph can be
+ # seen as in-place itentify, meaning whose external input/output are the same.
+ # 2: simply remove those ops.
+ remove_op_ids = []
+ params_to_remove = []
+ for sub_graph in sub_graphs_to_remove:
+ logger.info(
+ "Remove Reshape sub-graph:\n{}".format(
+ "".join(["(#{:>4})\n{}".format(i, predict_net.op[i]) for i in sub_graph])
+ )
+ )
+ reshape_op_id = sub_graph[-1]
+ new_reshap_output = predict_net.op[reshape_op_id].input[0]
+ rename_op_output(predict_net, reshape_op_id, 0, new_reshap_output)
+ ext_inputs, ext_outputs = get_sub_graph_external_input_output(predict_net, sub_graph)
+ non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
+ params_ext_inputs = [inp for inp in ext_inputs if inp[1] == 0]
+ assert len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1
+ assert ext_outputs[0][0] == non_params_ext_inputs[0][0]
+ assert ext_outputs[0][1] == non_params_ext_inputs[0][1] + 1
+ remove_op_ids.extend(sub_graph)
+ params_to_remove.extend(params_ext_inputs)
+
+ predict_net = copy.deepcopy(predict_net)
+ new_ops = [op for i, op in enumerate(predict_net.op) if i not in remove_op_ids]
+ del predict_net.op[:]
+ predict_net.op.extend(new_ops)
+ for versioned_params in params_to_remove:
+ name = versioned_params[0]
+ logger.info("Remove params: {} from init_net and predict_net.external_input".format(name))
+ del params[name]
+ predict_net.external_input.remove(name)
+
+ return predict_net, params
+
+
+def fuse_copy_between_cpu_and_gpu(predict_net: caffe2_pb2.NetDef):
+ """
+ In-place fuse extra copy ops between cpu/gpu for the following case:
+ a -CopyAToB-> b -CopyBToA> c1 -NextOp1-> d1
+ -CopyBToA> c2 -NextOp2-> d2
+ The fused network will look like:
+ a -NextOp1-> d1
+ -NextOp2-> d2
+ """
+
+ _COPY_OPS = ["CopyCPUToGPU", "CopyGPUToCPU"]
+
+ def _fuse_once(predict_net):
+ ssa, blob_versions = core.get_ssa(predict_net)
+ consumer_map = get_consumer_map(ssa)
+ versioned_external_output = [
+ (name, blob_versions[name]) for name in predict_net.external_output
+ ]
+
+ for op_id, op in enumerate(predict_net.op):
+ if op.type in _COPY_OPS:
+ fw_copy_versioned_output = ssa[op_id][1][0]
+ consumer_ids = [x[0] for x in consumer_map[fw_copy_versioned_output]]
+ reverse_op_type = _COPY_OPS[1 - _COPY_OPS.index(op.type)]
+
+ is_fusable = (
+ len(consumer_ids) > 0
+ and fw_copy_versioned_output not in versioned_external_output
+ and all(
+ predict_net.op[_op_id].type == reverse_op_type
+ and ssa[_op_id][1][0] not in versioned_external_output
+ for _op_id in consumer_ids
+ )
+ )
+
+ if is_fusable:
+ for rv_copy_op_id in consumer_ids:
+ # making each NextOp uses "a" directly and removing Copy ops
+ rs_copy_versioned_output = ssa[rv_copy_op_id][1][0]
+ next_op_id, inp_id = consumer_map[rs_copy_versioned_output][0]
+ predict_net.op[next_op_id].input[inp_id] = op.input[0]
+ # remove CopyOps
+ new_ops = [
+ op
+ for i, op in enumerate(predict_net.op)
+ if i != op_id and i not in consumer_ids
+ ]
+ del predict_net.op[:]
+ predict_net.op.extend(new_ops)
+ return True
+
+ return False
+
+ # _fuse_once returns False is nothing can be fused
+ while _fuse_once(predict_net):
+ pass
+
+
+def remove_dead_end_ops(net_def: caffe2_pb2.NetDef):
+ """remove ops if its output is not used or not in external_output"""
+ ssa, versions = core.get_ssa(net_def)
+ versioned_external_output = [(name, versions[name]) for name in net_def.external_output]
+ consumer_map = get_consumer_map(ssa)
+ removed_op_ids = set()
+
+ def _is_dead_end(versioned_blob):
+ return not (
+ versioned_blob in versioned_external_output
+ or (
+ len(consumer_map[versioned_blob]) > 0
+ and all(x[0] not in removed_op_ids for x in consumer_map[versioned_blob])
+ )
+ )
+
+ for i, ssa_i in reversed(list(enumerate(ssa))):
+ versioned_outputs = ssa_i[1]
+ if all(_is_dead_end(outp) for outp in versioned_outputs):
+ removed_op_ids.add(i)
+
+ # simply removing those deadend ops should have no effect to external_output
+ new_ops = [op for i, op in enumerate(net_def.op) if i not in removed_op_ids]
+ del net_def.op[:]
+ net_def.op.extend(new_ops)
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/export/torchscript.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/torchscript.py
new file mode 100644
index 0000000000000000000000000000000000000000..24fe59bda44225324928542df3f2ef1745375dfd
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/torchscript.py
@@ -0,0 +1,132 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import os
+import torch
+
+from detectron2.utils.file_io import PathManager
+
+from .torchscript_patch import freeze_training_mode, patch_instances
+
+__all__ = ["scripting_with_instances", "dump_torchscript_IR"]
+
+
+def scripting_with_instances(model, fields):
+ """
+ Run :func:`torch.jit.script` on a model that uses the :class:`Instances` class. Since
+ attributes of :class:`Instances` are "dynamically" added in eager mode,it is difficult
+ for scripting to support it out of the box. This function is made to support scripting
+ a model that uses :class:`Instances`. It does the following:
+
+ 1. Create a scriptable ``new_Instances`` class which behaves similarly to ``Instances``,
+ but with all attributes been "static".
+ The attributes need to be statically declared in the ``fields`` argument.
+ 2. Register ``new_Instances``, and force scripting compiler to
+ use it when trying to compile ``Instances``.
+
+ After this function, the process will be reverted. User should be able to script another model
+ using different fields.
+
+ Example:
+ Assume that ``Instances`` in the model consist of two attributes named
+ ``proposal_boxes`` and ``objectness_logits`` with type :class:`Boxes` and
+ :class:`Tensor` respectively during inference. You can call this function like:
+ ::
+ fields = {"proposal_boxes": Boxes, "objectness_logits": torch.Tensor}
+ torchscipt_model = scripting_with_instances(model, fields)
+
+ Note:
+ It only support models in evaluation mode.
+
+ Args:
+ model (nn.Module): The input model to be exported by scripting.
+ fields (Dict[str, type]): Attribute names and corresponding type that
+ ``Instances`` will use in the model. Note that all attributes used in ``Instances``
+ need to be added, regardless of whether they are inputs/outputs of the model.
+ Data type not defined in detectron2 is not supported for now.
+
+ Returns:
+ torch.jit.ScriptModule: the model in torchscript format
+ """
+ assert (
+ not model.training
+ ), "Currently we only support exporting models in evaluation mode to torchscript"
+
+ with freeze_training_mode(model), patch_instances(fields):
+ scripted_model = torch.jit.script(model)
+ return scripted_model
+
+
+# alias for old name
+export_torchscript_with_instances = scripting_with_instances
+
+
+def dump_torchscript_IR(model, dir):
+ """
+ Dump IR of a TracedModule/ScriptModule/Function in various format (code, graph,
+ inlined graph). Useful for debugging.
+
+ Args:
+ model (TracedModule/ScriptModule/ScriptFUnction): traced or scripted module
+ dir (str): output directory to dump files.
+ """
+ dir = os.path.expanduser(dir)
+ PathManager.mkdirs(dir)
+
+ def _get_script_mod(mod):
+ if isinstance(mod, torch.jit.TracedModule):
+ return mod._actual_script_module
+ return mod
+
+ # Dump pretty-printed code: https://pytorch.org/docs/stable/jit.html#inspecting-code
+ with PathManager.open(os.path.join(dir, "model_ts_code.txt"), "w") as f:
+
+ def get_code(mod):
+ # Try a few ways to get code using private attributes.
+ try:
+ # This contains more information than just `mod.code`
+ return _get_script_mod(mod)._c.code
+ except AttributeError:
+ pass
+ try:
+ return mod.code
+ except AttributeError:
+ return None
+
+ def dump_code(prefix, mod):
+ code = get_code(mod)
+ name = prefix or "root model"
+ if code is None:
+ f.write(f"Could not found code for {name} (type={mod.original_name})\n")
+ f.write("\n")
+ else:
+ f.write(f"\nCode for {name}, type={mod.original_name}:\n")
+ f.write(code)
+ f.write("\n")
+ f.write("-" * 80)
+
+ for name, m in mod.named_children():
+ dump_code(prefix + "." + name, m)
+
+ if isinstance(model, torch.jit.ScriptFunction):
+ f.write(get_code(model))
+ else:
+ dump_code("", model)
+
+ def _get_graph(model):
+ try:
+ # Recursively dump IR of all modules
+ return _get_script_mod(model)._c.dump_to_str(True, False, False)
+ except AttributeError:
+ return model.graph.str()
+
+ with PathManager.open(os.path.join(dir, "model_ts_IR.txt"), "w") as f:
+ f.write(_get_graph(model))
+
+ # Dump IR of the entire graph (all submodules inlined)
+ with PathManager.open(os.path.join(dir, "model_ts_IR_inlined.txt"), "w") as f:
+ f.write(str(model.inlined_graph))
+
+ if not isinstance(model, torch.jit.ScriptFunction):
+ # Dump the model structure in pytorch style
+ with PathManager.open(os.path.join(dir, "model.txt"), "w") as f:
+ f.write(str(model))
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/export/torchscript_patch.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/torchscript_patch.py
new file mode 100644
index 0000000000000000000000000000000000000000..da9b324f1582e31d1a16d2fe462ac2989bea56ea
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/export/torchscript_patch.py
@@ -0,0 +1,406 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import os
+import sys
+import tempfile
+from contextlib import ExitStack, contextmanager
+from copy import deepcopy
+from unittest import mock
+import torch
+from torch import nn
+
+# need some explicit imports due to https://github.com/pytorch/pytorch/issues/38964
+import detectron2 # noqa F401
+from detectron2.structures import Boxes, Instances
+from detectron2.utils.env import _import_file
+
+_counter = 0
+
+
+def _clear_jit_cache():
+ from torch.jit._recursive import concrete_type_store
+ from torch.jit._state import _jit_caching_layer
+
+ concrete_type_store.type_store.clear() # for modules
+ _jit_caching_layer.clear() # for free functions
+
+
+def _add_instances_conversion_methods(newInstances):
+ """
+ Add from_instances methods to the scripted Instances class.
+ """
+ cls_name = newInstances.__name__
+
+ @torch.jit.unused
+ def from_instances(instances: Instances):
+ """
+ Create scripted Instances from original Instances
+ """
+ fields = instances.get_fields()
+ image_size = instances.image_size
+ ret = newInstances(image_size)
+ for name, val in fields.items():
+ assert hasattr(ret, f"_{name}"), f"No attribute named {name} in {cls_name}"
+ setattr(ret, name, deepcopy(val))
+ return ret
+
+ newInstances.from_instances = from_instances
+
+
+@contextmanager
+def patch_instances(fields):
+ """
+ A contextmanager, under which the Instances class in detectron2 is replaced
+ by a statically-typed scriptable class, defined by `fields`.
+ See more in `scripting_with_instances`.
+ """
+
+ with tempfile.TemporaryDirectory(prefix="detectron2") as dir, tempfile.NamedTemporaryFile(
+ mode="w", encoding="utf-8", suffix=".py", dir=dir, delete=False
+ ) as f:
+ try:
+ # Objects that use Instances should not reuse previously-compiled
+ # results in cache, because `Instances` could be a new class each time.
+ _clear_jit_cache()
+
+ cls_name, s = _gen_instance_module(fields)
+ f.write(s)
+ f.flush()
+ f.close()
+
+ module = _import(f.name)
+ new_instances = getattr(module, cls_name)
+ _ = torch.jit.script(new_instances)
+ # let torchscript think Instances was scripted already
+ Instances.__torch_script_class__ = True
+ # let torchscript find new_instances when looking for the jit type of Instances
+ Instances._jit_override_qualname = torch._jit_internal._qualified_name(new_instances)
+
+ _add_instances_conversion_methods(new_instances)
+ yield new_instances
+ finally:
+ try:
+ del Instances.__torch_script_class__
+ del Instances._jit_override_qualname
+ except AttributeError:
+ pass
+ sys.modules.pop(module.__name__)
+
+
+def _gen_instance_class(fields):
+ """
+ Args:
+ fields (dict[name: type])
+ """
+
+ class _FieldType:
+ def __init__(self, name, type_):
+ assert isinstance(name, str), f"Field name must be str, got {name}"
+ self.name = name
+ self.type_ = type_
+ self.annotation = f"{type_.__module__}.{type_.__name__}"
+
+ fields = [_FieldType(k, v) for k, v in fields.items()]
+
+ def indent(level, s):
+ return " " * 4 * level + s
+
+ lines = []
+
+ global _counter
+ _counter += 1
+
+ cls_name = "ScriptedInstances{}".format(_counter)
+
+ field_names = tuple(x.name for x in fields)
+ extra_args = ", ".join([f"{f.name}: Optional[{f.annotation}] = None" for f in fields])
+ lines.append(
+ f"""
+class {cls_name}:
+ def __init__(self, image_size: Tuple[int, int], {extra_args}):
+ self.image_size = image_size
+ self._field_names = {field_names}
+"""
+ )
+
+ for f in fields:
+ lines.append(
+ indent(2, f"self._{f.name} = torch.jit.annotate(Optional[{f.annotation}], {f.name})")
+ )
+
+ for f in fields:
+ lines.append(
+ f"""
+ @property
+ def {f.name}(self) -> {f.annotation}:
+ # has to use a local for type refinement
+ # https://pytorch.org/docs/stable/jit_language_reference.html#optional-type-refinement
+ t = self._{f.name}
+ assert t is not None, "{f.name} is None and cannot be accessed!"
+ return t
+
+ @{f.name}.setter
+ def {f.name}(self, value: {f.annotation}) -> None:
+ self._{f.name} = value
+"""
+ )
+
+ # support method `__len__`
+ lines.append(
+ """
+ def __len__(self) -> int:
+"""
+ )
+ for f in fields:
+ lines.append(
+ f"""
+ t = self._{f.name}
+ if t is not None:
+ return len(t)
+"""
+ )
+ lines.append(
+ """
+ raise NotImplementedError("Empty Instances does not support __len__!")
+"""
+ )
+
+ # support method `has`
+ lines.append(
+ """
+ def has(self, name: str) -> bool:
+"""
+ )
+ for f in fields:
+ lines.append(
+ f"""
+ if name == "{f.name}":
+ return self._{f.name} is not None
+"""
+ )
+ lines.append(
+ """
+ return False
+"""
+ )
+
+ # support method `to`
+ none_args = ", None" * len(fields)
+ lines.append(
+ f"""
+ def to(self, device: torch.device) -> "{cls_name}":
+ ret = {cls_name}(self.image_size{none_args})
+"""
+ )
+ for f in fields:
+ if hasattr(f.type_, "to"):
+ lines.append(
+ f"""
+ t = self._{f.name}
+ if t is not None:
+ ret._{f.name} = t.to(device)
+"""
+ )
+ else:
+ # For now, ignore fields that cannot be moved to devices.
+ # Maybe can support other tensor-like classes (e.g. __torch_function__)
+ pass
+ lines.append(
+ """
+ return ret
+"""
+ )
+
+ # support method `getitem`
+ none_args = ", None" * len(fields)
+ lines.append(
+ f"""
+ def __getitem__(self, item) -> "{cls_name}":
+ ret = {cls_name}(self.image_size{none_args})
+"""
+ )
+ for f in fields:
+ lines.append(
+ f"""
+ t = self._{f.name}
+ if t is not None:
+ ret._{f.name} = t[item]
+"""
+ )
+ lines.append(
+ """
+ return ret
+"""
+ )
+
+ # support method `cat`
+ # this version does not contain checks that all instances have same size and fields
+ none_args = ", None" * len(fields)
+ lines.append(
+ f"""
+ def cat(self, instances: List["{cls_name}"]) -> "{cls_name}":
+ ret = {cls_name}(self.image_size{none_args})
+"""
+ )
+ for f in fields:
+ lines.append(
+ f"""
+ t = self._{f.name}
+ if t is not None:
+ values: List[{f.annotation}] = [x.{f.name} for x in instances]
+ if torch.jit.isinstance(t, torch.Tensor):
+ ret._{f.name} = torch.cat(values, dim=0)
+ else:
+ ret._{f.name} = t.cat(values)
+"""
+ )
+ lines.append(
+ """
+ return ret"""
+ )
+
+ # support method `get_fields()`
+ lines.append(
+ """
+ def get_fields(self) -> Dict[str, Tensor]:
+ ret = {}
+ """
+ )
+ for f in fields:
+ if f.type_ == Boxes:
+ stmt = "t.tensor"
+ elif f.type_ == torch.Tensor:
+ stmt = "t"
+ else:
+ stmt = f'assert False, "unsupported type {str(f.type_)}"'
+ lines.append(
+ f"""
+ t = self._{f.name}
+ if t is not None:
+ ret["{f.name}"] = {stmt}
+ """
+ )
+ lines.append(
+ """
+ return ret"""
+ )
+ return cls_name, os.linesep.join(lines)
+
+
+def _gen_instance_module(fields):
+ # TODO: find a more automatic way to enable import of other classes
+ s = """
+from copy import deepcopy
+import torch
+from torch import Tensor
+import typing
+from typing import *
+
+import detectron2
+from detectron2.structures import Boxes, Instances
+
+"""
+
+ cls_name, cls_def = _gen_instance_class(fields)
+ s += cls_def
+ return cls_name, s
+
+
+def _import(path):
+ return _import_file(
+ "{}{}".format(sys.modules[__name__].__name__, _counter), path, make_importable=True
+ )
+
+
+@contextmanager
+def patch_builtin_len(modules=()):
+ """
+ Patch the builtin len() function of a few detectron2 modules
+ to use __len__ instead, because __len__ does not convert values to
+ integers and therefore is friendly to tracing.
+
+ Args:
+ modules (list[stsr]): names of extra modules to patch len(), in
+ addition to those in detectron2.
+ """
+
+ def _new_len(obj):
+ return obj.__len__()
+
+ with ExitStack() as stack:
+ MODULES = [
+ "detectron2.modeling.roi_heads.fast_rcnn",
+ "detectron2.modeling.roi_heads.mask_head",
+ "detectron2.modeling.roi_heads.keypoint_head",
+ ] + list(modules)
+ ctxs = [stack.enter_context(mock.patch(mod + ".len")) for mod in MODULES]
+ for m in ctxs:
+ m.side_effect = _new_len
+ yield
+
+
+def patch_nonscriptable_classes():
+ """
+ Apply patches on a few nonscriptable detectron2 classes.
+ Should not have side-effects on eager usage.
+ """
+ # __prepare_scriptable__ can also be added to models for easier maintenance.
+ # But it complicates the clean model code.
+
+ from detectron2.modeling.backbone import ResNet, FPN
+
+ # Due to https://github.com/pytorch/pytorch/issues/36061,
+ # we change backbone to use ModuleList for scripting.
+ # (note: this changes param names in state_dict)
+
+ def prepare_resnet(self):
+ ret = deepcopy(self)
+ ret.stages = nn.ModuleList(ret.stages)
+ for k in self.stage_names:
+ delattr(ret, k)
+ return ret
+
+ ResNet.__prepare_scriptable__ = prepare_resnet
+
+ def prepare_fpn(self):
+ ret = deepcopy(self)
+ ret.lateral_convs = nn.ModuleList(ret.lateral_convs)
+ ret.output_convs = nn.ModuleList(ret.output_convs)
+ for name, _ in self.named_children():
+ if name.startswith("fpn_"):
+ delattr(ret, name)
+ return ret
+
+ FPN.__prepare_scriptable__ = prepare_fpn
+
+ # Annotate some attributes to be constants for the purpose of scripting,
+ # even though they are not constants in eager mode.
+ from detectron2.modeling.roi_heads import StandardROIHeads
+
+ if hasattr(StandardROIHeads, "__annotations__"):
+ # copy first to avoid editing annotations of base class
+ StandardROIHeads.__annotations__ = deepcopy(StandardROIHeads.__annotations__)
+ StandardROIHeads.__annotations__["mask_on"] = torch.jit.Final[bool]
+ StandardROIHeads.__annotations__["keypoint_on"] = torch.jit.Final[bool]
+
+
+# These patches are not supposed to have side-effects.
+patch_nonscriptable_classes()
+
+
+@contextmanager
+def freeze_training_mode(model):
+ """
+ A context manager that annotates the "training" attribute of every submodule
+ to constant, so that the training codepath in these modules can be
+ meta-compiled away. Upon exiting, the annotations are reverted.
+ """
+ classes = {type(x) for x in model.modules()}
+ # __constants__ is the old way to annotate constants and not compatible
+ # with __annotations__ .
+ classes = {x for x in classes if not hasattr(x, "__constants__")}
+ for cls in classes:
+ cls.__annotations__["training"] = torch.jit.Final[bool]
+ yield
+ for cls in classes:
+ cls.__annotations__["training"] = bool
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/__init__.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d015c530b3e33de8ea60943a0a98b135f013dd7
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/__init__.py
@@ -0,0 +1,24 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm, CycleBatchNormList
+from .deform_conv import DeformConv, ModulatedDeformConv
+from .mask_ops import paste_masks_in_image
+from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated
+from .roi_align import ROIAlign, roi_align
+from .roi_align_rotated import ROIAlignRotated, roi_align_rotated
+from .shape_spec import ShapeSpec
+from .wrappers import (
+ BatchNorm2d,
+ Conv2d,
+ ConvTranspose2d,
+ cat,
+ interpolate,
+ Linear,
+ nonzero_tuple,
+ cross_entropy,
+ shapes_to_tensor,
+)
+from .blocks import CNNBlockBase, DepthwiseSeparableConv2d
+from .aspp import ASPP
+from .losses import ciou_loss, diou_loss
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/aspp.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/aspp.py
new file mode 100644
index 0000000000000000000000000000000000000000..14861aa9ede4fea6a69a49f189bcab997b558148
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/aspp.py
@@ -0,0 +1,144 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from copy import deepcopy
+import fvcore.nn.weight_init as weight_init
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from .batch_norm import get_norm
+from .blocks import DepthwiseSeparableConv2d
+from .wrappers import Conv2d
+
+
+class ASPP(nn.Module):
+ """
+ Atrous Spatial Pyramid Pooling (ASPP).
+ """
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ dilations,
+ *,
+ norm,
+ activation,
+ pool_kernel_size=None,
+ dropout: float = 0.0,
+ use_depthwise_separable_conv=False,
+ ):
+ """
+ Args:
+ in_channels (int): number of input channels for ASPP.
+ out_channels (int): number of output channels.
+ dilations (list): a list of 3 dilations in ASPP.
+ norm (str or callable): normalization for all conv layers.
+ See :func:`layers.get_norm` for supported format. norm is
+ applied to all conv layers except the conv following
+ global average pooling.
+ activation (callable): activation function.
+ pool_kernel_size (tuple, list): the average pooling size (kh, kw)
+ for image pooling layer in ASPP. If set to None, it always
+ performs global average pooling. If not None, it must be
+ divisible by the shape of inputs in forward(). It is recommended
+ to use a fixed input feature size in training, and set this
+ option to match this size, so that it performs global average
+ pooling in training, and the size of the pooling window stays
+ consistent in inference.
+ dropout (float): apply dropout on the output of ASPP. It is used in
+ the official DeepLab implementation with a rate of 0.1:
+ https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/model.py#L532 # noqa
+ use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d
+ for 3x3 convs in ASPP, proposed in :paper:`DeepLabV3+`.
+ """
+ super(ASPP, self).__init__()
+ assert len(dilations) == 3, "ASPP expects 3 dilations, got {}".format(len(dilations))
+ self.pool_kernel_size = pool_kernel_size
+ self.dropout = dropout
+ use_bias = norm == ""
+ self.convs = nn.ModuleList()
+ # conv 1x1
+ self.convs.append(
+ Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=1,
+ bias=use_bias,
+ norm=get_norm(norm, out_channels),
+ activation=deepcopy(activation),
+ )
+ )
+ weight_init.c2_xavier_fill(self.convs[-1])
+ # atrous convs
+ for dilation in dilations:
+ if use_depthwise_separable_conv:
+ self.convs.append(
+ DepthwiseSeparableConv2d(
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ padding=dilation,
+ dilation=dilation,
+ norm1=norm,
+ activation1=deepcopy(activation),
+ norm2=norm,
+ activation2=deepcopy(activation),
+ )
+ )
+ else:
+ self.convs.append(
+ Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ padding=dilation,
+ dilation=dilation,
+ bias=use_bias,
+ norm=get_norm(norm, out_channels),
+ activation=deepcopy(activation),
+ )
+ )
+ weight_init.c2_xavier_fill(self.convs[-1])
+ # image pooling
+ # We do not add BatchNorm because the spatial resolution is 1x1,
+ # the original TF implementation has BatchNorm.
+ if pool_kernel_size is None:
+ image_pooling = nn.Sequential(
+ nn.AdaptiveAvgPool2d(1),
+ Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)),
+ )
+ else:
+ image_pooling = nn.Sequential(
+ nn.AvgPool2d(kernel_size=pool_kernel_size, stride=1),
+ Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)),
+ )
+ weight_init.c2_xavier_fill(image_pooling[1])
+ self.convs.append(image_pooling)
+
+ self.project = Conv2d(
+ 5 * out_channels,
+ out_channels,
+ kernel_size=1,
+ bias=use_bias,
+ norm=get_norm(norm, out_channels),
+ activation=deepcopy(activation),
+ )
+ weight_init.c2_xavier_fill(self.project)
+
+ def forward(self, x):
+ size = x.shape[-2:]
+ if self.pool_kernel_size is not None:
+ if size[0] % self.pool_kernel_size[0] or size[1] % self.pool_kernel_size[1]:
+ raise ValueError(
+ "`pool_kernel_size` must be divisible by the shape of inputs. "
+ "Input size: {} `pool_kernel_size`: {}".format(size, self.pool_kernel_size)
+ )
+ res = []
+ for conv in self.convs:
+ res.append(conv(x))
+ res[-1] = F.interpolate(res[-1], size=size, mode="bilinear", align_corners=False)
+ res = torch.cat(res, dim=1)
+ res = self.project(res)
+ res = F.dropout(res, self.dropout, training=self.training) if self.dropout > 0 else res
+ return res
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/batch_norm.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/batch_norm.py
new file mode 100644
index 0000000000000000000000000000000000000000..09a6c66cf6f4b21c38a7829b029f0ab5deda1f9e
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/batch_norm.py
@@ -0,0 +1,276 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import torch
+import torch.distributed as dist
+from fvcore.nn.distributed import differentiable_all_reduce
+from torch import nn
+from torch.nn import functional as F
+
+from detectron2.utils import comm, env
+
+from .wrappers import BatchNorm2d
+
+
+class FrozenBatchNorm2d(nn.Module):
+ """
+ BatchNorm2d where the batch statistics and the affine parameters are fixed.
+
+ It contains non-trainable buffers called
+ "weight" and "bias", "running_mean", "running_var",
+ initialized to perform identity transformation.
+
+ The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
+ which are computed from the original four parameters of BN.
+ The affine transform `x * weight + bias` will perform the equivalent
+ computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
+ When loading a backbone model from Caffe2, "running_mean" and "running_var"
+ will be left unchanged as identity transformation.
+
+ Other pre-trained backbone models may contain all 4 parameters.
+
+ The forward is implemented by `F.batch_norm(..., training=False)`.
+ """
+
+ _version = 3
+
+ def __init__(self, num_features, eps=1e-5):
+ super().__init__()
+ self.num_features = num_features
+ self.eps = eps
+ self.register_buffer("weight", torch.ones(num_features))
+ self.register_buffer("bias", torch.zeros(num_features))
+ self.register_buffer("running_mean", torch.zeros(num_features))
+ self.register_buffer("running_var", torch.ones(num_features) - eps)
+
+ def forward(self, x):
+ if x.requires_grad:
+ # When gradients are needed, F.batch_norm will use extra memory
+ # because its backward op computes gradients for weight/bias as well.
+ scale = self.weight * (self.running_var + self.eps).rsqrt()
+ bias = self.bias - self.running_mean * scale
+ scale = scale.reshape(1, -1, 1, 1)
+ bias = bias.reshape(1, -1, 1, 1)
+ out_dtype = x.dtype # may be half
+ return x * scale.to(out_dtype) + bias.to(out_dtype)
+ else:
+ # When gradients are not needed, F.batch_norm is a single fused op
+ # and provide more optimization opportunities.
+ return F.batch_norm(
+ x,
+ self.running_mean,
+ self.running_var,
+ self.weight,
+ self.bias,
+ training=False,
+ eps=self.eps,
+ )
+
+ def _load_from_state_dict(
+ self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
+ ):
+ version = local_metadata.get("version", None)
+
+ if version is None or version < 2:
+ # No running_mean/var in early versions
+ # This will silent the warnings
+ if prefix + "running_mean" not in state_dict:
+ state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
+ if prefix + "running_var" not in state_dict:
+ state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
+
+ super()._load_from_state_dict(
+ state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
+ )
+
+ def __repr__(self):
+ return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
+
+ @classmethod
+ def convert_frozen_batchnorm(cls, module):
+ """
+ Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
+
+ Args:
+ module (torch.nn.Module):
+
+ Returns:
+ If module is BatchNorm/SyncBatchNorm, returns a new module.
+ Otherwise, in-place convert module and return it.
+
+ Similar to convert_sync_batchnorm in
+ https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
+ """
+ bn_module = nn.modules.batchnorm
+ bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
+ res = module
+ if isinstance(module, bn_module):
+ res = cls(module.num_features)
+ if module.affine:
+ res.weight.data = module.weight.data.clone().detach()
+ res.bias.data = module.bias.data.clone().detach()
+ res.running_mean.data = module.running_mean.data
+ res.running_var.data = module.running_var.data
+ res.eps = module.eps
+ else:
+ for name, child in module.named_children():
+ new_child = cls.convert_frozen_batchnorm(child)
+ if new_child is not child:
+ res.add_module(name, new_child)
+ return res
+
+
+def get_norm(norm, out_channels):
+ """
+ Args:
+ norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
+ or a callable that takes a channel number and returns
+ the normalization layer as a nn.Module.
+
+ Returns:
+ nn.Module or None: the normalization layer
+ """
+ if norm is None:
+ return None
+ if isinstance(norm, str):
+ if len(norm) == 0:
+ return None
+ norm = {
+ "BN": BatchNorm2d,
+ # Fixed in https://github.com/pytorch/pytorch/pull/36382
+ "SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,
+ "FrozenBN": FrozenBatchNorm2d,
+ "GN": lambda channels: nn.GroupNorm(32, channels),
+ # for debugging:
+ "nnSyncBN": nn.SyncBatchNorm,
+ "naiveSyncBN": NaiveSyncBatchNorm,
+ # expose stats_mode N as an option to caller, required for zero-len inputs
+ "naiveSyncBN_N": lambda channels: NaiveSyncBatchNorm(channels, stats_mode="N"),
+ }[norm]
+ return norm(out_channels)
+
+
+class NaiveSyncBatchNorm(BatchNorm2d):
+ """
+ In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient
+ when the batch size on each worker is different.
+ (e.g., when scale augmentation is used, or when it is applied to mask head).
+
+ This is a slower but correct alternative to `nn.SyncBatchNorm`.
+
+ Note:
+ There isn't a single definition of Sync BatchNorm.
+
+ When ``stats_mode==""``, this module computes overall statistics by using
+ statistics of each worker with equal weight. The result is true statistics
+ of all samples (as if they are all on one worker) only when all workers
+ have the same (N, H, W). This mode does not support inputs with zero batch size.
+
+ When ``stats_mode=="N"``, this module computes overall statistics by weighting
+ the statistics of each worker by their ``N``. The result is true statistics
+ of all samples (as if they are all on one worker) only when all workers
+ have the same (H, W). It is slower than ``stats_mode==""``.
+
+ Even though the result of this module may not be the true statistics of all samples,
+ it may still be reasonable because it might be preferrable to assign equal weights
+ to all workers, regardless of their (H, W) dimension, instead of putting larger weight
+ on larger images. From preliminary experiments, little difference is found between such
+ a simplified implementation and an accurate computation of overall mean & variance.
+ """
+
+ def __init__(self, *args, stats_mode="", **kwargs):
+ super().__init__(*args, **kwargs)
+ assert stats_mode in ["", "N"]
+ self._stats_mode = stats_mode
+
+ def forward(self, input):
+ if comm.get_world_size() == 1 or not self.training:
+ return super().forward(input)
+
+ B, C = input.shape[0], input.shape[1]
+
+ half_input = input.dtype == torch.float16
+ if half_input:
+ # fp16 does not have good enough numerics for the reduction here
+ input = input.float()
+ mean = torch.mean(input, dim=[0, 2, 3])
+ meansqr = torch.mean(input * input, dim=[0, 2, 3])
+
+ if self._stats_mode == "":
+ assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.'
+ vec = torch.cat([mean, meansqr], dim=0)
+ vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size())
+ mean, meansqr = torch.split(vec, C)
+ momentum = self.momentum
+ else:
+ if B == 0:
+ vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype)
+ vec = vec + input.sum() # make sure there is gradient w.r.t input
+ else:
+ vec = torch.cat(
+ [mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0
+ )
+ vec = differentiable_all_reduce(vec * B)
+
+ total_batch = vec[-1].detach()
+ momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0
+ mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero
+
+ var = meansqr - mean * mean
+ invstd = torch.rsqrt(var + self.eps)
+ scale = self.weight * invstd
+ bias = self.bias - mean * scale
+ scale = scale.reshape(1, -1, 1, 1)
+ bias = bias.reshape(1, -1, 1, 1)
+
+ self.running_mean += momentum * (mean.detach() - self.running_mean)
+ self.running_var += momentum * (var.detach() - self.running_var)
+ ret = input * scale + bias
+ if half_input:
+ ret = ret.half()
+ return ret
+
+
+class CycleBatchNormList(nn.ModuleList):
+ """
+ Implement domain-specific BatchNorm by cycling.
+
+ When a BatchNorm layer is used for multiple input domains or input
+ features, it might need to maintain a separate test-time statistics
+ for each domain. See Sec 5.2 in :paper:`rethinking-batchnorm`.
+
+ This module implements it by using N separate BN layers
+ and it cycles through them every time a forward() is called.
+
+ NOTE: The caller of this module MUST guarantee to always call
+ this module by multiple of N times. Otherwise its test-time statistics
+ will be incorrect.
+ """
+
+ def __init__(self, length: int, bn_class=nn.BatchNorm2d, **kwargs):
+ """
+ Args:
+ length: number of BatchNorm layers to cycle.
+ bn_class: the BatchNorm class to use
+ kwargs: arguments of the BatchNorm class, such as num_features.
+ """
+ self._affine = kwargs.pop("affine", True)
+ super().__init__([bn_class(**kwargs, affine=False) for k in range(length)])
+ if self._affine:
+ # shared affine, domain-specific BN
+ channels = self[0].num_features
+ self.weight = nn.Parameter(torch.ones(channels))
+ self.bias = nn.Parameter(torch.zeros(channels))
+ self._pos = 0
+
+ def forward(self, x):
+ ret = self[self._pos](x)
+ self._pos = (self._pos + 1) % len(self)
+
+ if self._affine:
+ w = self.weight.reshape(1, -1, 1, 1)
+ b = self.bias.reshape(1, -1, 1, 1)
+ return ret * w + b
+ else:
+ return ret
+
+ def extra_repr(self):
+ return f"affine={self._affine}"
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/blocks.py b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/blocks.py
new file mode 100644
index 0000000000000000000000000000000000000000..1995a4bf7339e8deb7eaaffda4f819dda55e7ac7
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/blocks.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import fvcore.nn.weight_init as weight_init
+from torch import nn
+
+from .batch_norm import FrozenBatchNorm2d, get_norm
+from .wrappers import Conv2d
+
+
+"""
+CNN building blocks.
+"""
+
+
+class CNNBlockBase(nn.Module):
+ """
+ A CNN block is assumed to have input channels, output channels and a stride.
+ The input and output of `forward()` method must be NCHW tensors.
+ The method can perform arbitrary computation but must match the given
+ channels and stride specification.
+
+ Attribute:
+ in_channels (int):
+ out_channels (int):
+ stride (int):
+ """
+
+ def __init__(self, in_channels, out_channels, stride):
+ """
+ The `__init__` method of any subclass should also contain these arguments.
+
+ Args:
+ in_channels (int):
+ out_channels (int):
+ stride (int):
+ """
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.stride = stride
+
+ def freeze(self):
+ """
+ Make this block not trainable.
+ This method sets all parameters to `requires_grad=False`,
+ and convert all BatchNorm layers to FrozenBatchNorm
+
+ Returns:
+ the block itself
+ """
+ for p in self.parameters():
+ p.requires_grad = False
+ FrozenBatchNorm2d.convert_frozen_batchnorm(self)
+ return self
+
+
+class DepthwiseSeparableConv2d(nn.Module):
+ """
+ A kxk depthwise convolution + a 1x1 convolution.
+
+ In :paper:`xception`, norm & activation are applied on the second conv.
+ :paper:`mobilenet` uses norm & activation on both convs.
+ """
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ padding=1,
+ dilation=1,
+ *,
+ norm1=None,
+ activation1=None,
+ norm2=None,
+ activation2=None,
+ ):
+ """
+ Args:
+ norm1, norm2 (str or callable): normalization for the two conv layers.
+ activation1, activation2 (callable(Tensor) -> Tensor): activation
+ function for the two conv layers.
+ """
+ super().__init__()
+ self.depthwise = Conv2d(
+ in_channels,
+ in_channels,
+ kernel_size=kernel_size,
+ padding=padding,
+ dilation=dilation,
+ groups=in_channels,
+ bias=not norm1,
+ norm=get_norm(norm1, in_channels),
+ activation=activation1,
+ )
+ self.pointwise = Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=1,
+ bias=not norm2,
+ norm=get_norm(norm2, out_channels),
+ activation=activation2,
+ )
+
+ # default initialization
+ weight_init.c2_msra_fill(self.depthwise)
+ weight_init.c2_msra_fill(self.pointwise)
+
+ def forward(self, x):
+ return self.pointwise(self.depthwise(x))
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/README.md b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..778ed3da0bae89820831bcd8a72ff7b9cad8d4dd
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/README.md
@@ -0,0 +1,7 @@
+
+
+To add a new Op:
+
+1. Create a new directory
+2. Implement new ops there
+3. Delcare its Python interface in `vision.cpp`.
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h
new file mode 100644
index 0000000000000000000000000000000000000000..03f4211003f42f601f0cfcf4a690f5da4a0a1f67
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h
@@ -0,0 +1,115 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#pragma once
+#include
+
+namespace detectron2 {
+
+at::Tensor ROIAlignRotated_forward_cpu(
+ const at::Tensor& input,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio);
+
+at::Tensor ROIAlignRotated_backward_cpu(
+ const at::Tensor& grad,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int batch_size,
+ const int channels,
+ const int height,
+ const int width,
+ const int sampling_ratio);
+
+#if defined(WITH_CUDA) || defined(WITH_HIP)
+at::Tensor ROIAlignRotated_forward_cuda(
+ const at::Tensor& input,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio);
+
+at::Tensor ROIAlignRotated_backward_cuda(
+ const at::Tensor& grad,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int batch_size,
+ const int channels,
+ const int height,
+ const int width,
+ const int sampling_ratio);
+#endif
+
+// Interface for Python
+inline at::Tensor ROIAlignRotated_forward(
+ const at::Tensor& input,
+ const at::Tensor& rois,
+ const double spatial_scale,
+ const int64_t pooled_height,
+ const int64_t pooled_width,
+ const int64_t sampling_ratio) {
+ if (input.is_cuda()) {
+#if defined(WITH_CUDA) || defined(WITH_HIP)
+ return ROIAlignRotated_forward_cuda(
+ input,
+ rois,
+ spatial_scale,
+ pooled_height,
+ pooled_width,
+ sampling_ratio);
+#else
+ AT_ERROR("Detectron2 is not compiled with GPU support!");
+#endif
+ }
+ return ROIAlignRotated_forward_cpu(
+ input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio);
+}
+
+inline at::Tensor ROIAlignRotated_backward(
+ const at::Tensor& grad,
+ const at::Tensor& rois,
+ const double spatial_scale,
+ const int64_t pooled_height,
+ const int64_t pooled_width,
+ const int64_t batch_size,
+ const int64_t channels,
+ const int64_t height,
+ const int64_t width,
+ const int64_t sampling_ratio) {
+ if (grad.is_cuda()) {
+#if defined(WITH_CUDA) || defined(WITH_HIP)
+ return ROIAlignRotated_backward_cuda(
+ grad,
+ rois,
+ spatial_scale,
+ pooled_height,
+ pooled_width,
+ batch_size,
+ channels,
+ height,
+ width,
+ sampling_ratio);
+#else
+ AT_ERROR("Detectron2 is not compiled with GPU support!");
+#endif
+ }
+ return ROIAlignRotated_backward_cpu(
+ grad,
+ rois,
+ spatial_scale,
+ pooled_height,
+ pooled_width,
+ batch_size,
+ channels,
+ height,
+ width,
+ sampling_ratio);
+}
+
+} // namespace detectron2
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2a3d3056cc71a4acaafb570739a9dd247a7eb1ed
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp
@@ -0,0 +1,522 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#include
+#include "ROIAlignRotated.h"
+
+// Note: this implementation originates from the Caffe2 ROIAlignRotated Op
+// and PyTorch ROIAlign (non-rotated) Op implementations.
+// The key difference between this implementation and those ones is
+// we don't do "legacy offset" in this version, as there aren't many previous
+// works, if any, using the "legacy" ROIAlignRotated Op.
+// This would make the interface a bit cleaner.
+
+namespace detectron2 {
+
+namespace {
+template
+struct PreCalc {
+ int pos1;
+ int pos2;
+ int pos3;
+ int pos4;
+ T w1;
+ T w2;
+ T w3;
+ T w4;
+};
+
+template
+void pre_calc_for_bilinear_interpolate(
+ const int height,
+ const int width,
+ const int pooled_height,
+ const int pooled_width,
+ const int iy_upper,
+ const int ix_upper,
+ T roi_start_h,
+ T roi_start_w,
+ T bin_size_h,
+ T bin_size_w,
+ int roi_bin_grid_h,
+ int roi_bin_grid_w,
+ T roi_center_h,
+ T roi_center_w,
+ T cos_theta,
+ T sin_theta,
+ std::vector>& pre_calc) {
+ int pre_calc_index = 0;
+ for (int ph = 0; ph < pooled_height; ph++) {
+ for (int pw = 0; pw < pooled_width; pw++) {
+ for (int iy = 0; iy < iy_upper; iy++) {
+ const T yy = roi_start_h + ph * bin_size_h +
+ static_cast(iy + .5f) * bin_size_h /
+ static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5
+ for (int ix = 0; ix < ix_upper; ix++) {
+ const T xx = roi_start_w + pw * bin_size_w +
+ static_cast(ix + .5f) * bin_size_w /
+ static_cast(roi_bin_grid_w);
+
+ // Rotate by theta around the center and translate
+ // In image space, (y, x) is the order for Right Handed System,
+ // and this is essentially multiplying the point by a rotation matrix
+ // to rotate it counterclockwise through angle theta.
+ T y = yy * cos_theta - xx * sin_theta + roi_center_h;
+ T x = yy * sin_theta + xx * cos_theta + roi_center_w;
+ // deal with: inverse elements are out of feature map boundary
+ if (y < -1.0 || y > height || x < -1.0 || x > width) {
+ // empty
+ PreCalc pc;
+ pc.pos1 = 0;
+ pc.pos2 = 0;
+ pc.pos3 = 0;
+ pc.pos4 = 0;
+ pc.w1 = 0;
+ pc.w2 = 0;
+ pc.w3 = 0;
+ pc.w4 = 0;
+ pre_calc[pre_calc_index] = pc;
+ pre_calc_index += 1;
+ continue;
+ }
+
+ if (y < 0) {
+ y = 0;
+ }
+ if (x < 0) {
+ x = 0;
+ }
+
+ int y_low = (int)y;
+ int x_low = (int)x;
+ int y_high;
+ int x_high;
+
+ if (y_low >= height - 1) {
+ y_high = y_low = height - 1;
+ y = (T)y_low;
+ } else {
+ y_high = y_low + 1;
+ }
+
+ if (x_low >= width - 1) {
+ x_high = x_low = width - 1;
+ x = (T)x_low;
+ } else {
+ x_high = x_low + 1;
+ }
+
+ T ly = y - y_low;
+ T lx = x - x_low;
+ T hy = 1. - ly, hx = 1. - lx;
+ T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
+
+ // save weights and indices
+ PreCalc pc;
+ pc.pos1 = y_low * width + x_low;
+ pc.pos2 = y_low * width + x_high;
+ pc.pos3 = y_high * width + x_low;
+ pc.pos4 = y_high * width + x_high;
+ pc.w1 = w1;
+ pc.w2 = w2;
+ pc.w3 = w3;
+ pc.w4 = w4;
+ pre_calc[pre_calc_index] = pc;
+
+ pre_calc_index += 1;
+ }
+ }
+ }
+ }
+}
+
+template
+void bilinear_interpolate_gradient(
+ const int height,
+ const int width,
+ T y,
+ T x,
+ T& w1,
+ T& w2,
+ T& w3,
+ T& w4,
+ int& x_low,
+ int& x_high,
+ int& y_low,
+ int& y_high) {
+ // deal with cases that inverse elements are out of feature map boundary
+ if (y < -1.0 || y > height || x < -1.0 || x > width) {
+ // empty
+ w1 = w2 = w3 = w4 = 0.;
+ x_low = x_high = y_low = y_high = -1;
+ return;
+ }
+
+ if (y < 0) {
+ y = 0;
+ }
+
+ if (x < 0) {
+ x = 0;
+ }
+
+ y_low = (int)y;
+ x_low = (int)x;
+
+ if (y_low >= height - 1) {
+ y_high = y_low = height - 1;
+ y = (T)y_low;
+ } else {
+ y_high = y_low + 1;
+ }
+
+ if (x_low >= width - 1) {
+ x_high = x_low = width - 1;
+ x = (T)x_low;
+ } else {
+ x_high = x_low + 1;
+ }
+
+ T ly = y - y_low;
+ T lx = x - x_low;
+ T hy = 1. - ly, hx = 1. - lx;
+
+ // reference in forward
+ // T v1 = input[y_low * width + x_low];
+ // T v2 = input[y_low * width + x_high];
+ // T v3 = input[y_high * width + x_low];
+ // T v4 = input[y_high * width + x_high];
+ // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
+
+ w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
+
+ return;
+}
+
+template
+inline void add(T* address, const T& val) {
+ *address += val;
+}
+
+} // namespace
+
+template
+void ROIAlignRotatedForward(
+ const int nthreads,
+ const T* input,
+ const T& spatial_scale,
+ const int channels,
+ const int height,
+ const int width,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio,
+ const T* rois,
+ T* output) {
+ int n_rois = nthreads / channels / pooled_width / pooled_height;
+ // (n, c, ph, pw) is an element in the pooled output
+ // can be parallelized using omp
+ // #pragma omp parallel for num_threads(32)
+ for (int n = 0; n < n_rois; n++) {
+ int index_n = n * channels * pooled_width * pooled_height;
+
+ const T* current_roi = rois + n * 6;
+ int roi_batch_ind = current_roi[0];
+
+ // Do not use rounding; this implementation detail is critical
+ // ROIAlignRotated supports align == true, i.e., continuous coordinate
+ // by default, thus the 0.5 offset
+ T offset = (T)0.5;
+ T roi_center_w = current_roi[1] * spatial_scale - offset;
+ T roi_center_h = current_roi[2] * spatial_scale - offset;
+ T roi_width = current_roi[3] * spatial_scale;
+ T roi_height = current_roi[4] * spatial_scale;
+ T theta = current_roi[5] * M_PI / 180.0;
+ T cos_theta = cos(theta);
+ T sin_theta = sin(theta);
+
+ AT_ASSERTM(
+ roi_width >= 0 && roi_height >= 0,
+ "ROIs in ROIAlignRotated do not have non-negative size!");
+
+ T bin_size_h = static_cast(roi_height) / static_cast(pooled_height);
+ T bin_size_w = static_cast(roi_width) / static_cast(pooled_width);
+
+ // We use roi_bin_grid to sample the grid and mimic integral
+ int roi_bin_grid_h = (sampling_ratio > 0)
+ ? sampling_ratio
+ : ceil(roi_height / pooled_height); // e.g., = 2
+ int roi_bin_grid_w =
+ (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
+
+ // We do average (integral) pooling inside a bin
+ const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
+
+ // we want to precalculate indices and weights shared by all channels,
+ // this is the key point of optimization
+ std::vector> pre_calc(
+ roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height);
+
+ // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
+ // Appropriate translation needs to be applied after.
+ T roi_start_h = -roi_height / 2.0;
+ T roi_start_w = -roi_width / 2.0;
+
+ pre_calc_for_bilinear_interpolate(
+ height,
+ width,
+ pooled_height,
+ pooled_width,
+ roi_bin_grid_h,
+ roi_bin_grid_w,
+ roi_start_h,
+ roi_start_w,
+ bin_size_h,
+ bin_size_w,
+ roi_bin_grid_h,
+ roi_bin_grid_w,
+ roi_center_h,
+ roi_center_w,
+ cos_theta,
+ sin_theta,
+ pre_calc);
+
+ for (int c = 0; c < channels; c++) {
+ int index_n_c = index_n + c * pooled_width * pooled_height;
+ const T* offset_input =
+ input + (roi_batch_ind * channels + c) * height * width;
+ int pre_calc_index = 0;
+
+ for (int ph = 0; ph < pooled_height; ph++) {
+ for (int pw = 0; pw < pooled_width; pw++) {
+ int index = index_n_c + ph * pooled_width + pw;
+
+ T output_val = 0.;
+ for (int iy = 0; iy < roi_bin_grid_h; iy++) {
+ for (int ix = 0; ix < roi_bin_grid_w; ix++) {
+ PreCalc pc = pre_calc[pre_calc_index];
+ output_val += pc.w1 * offset_input[pc.pos1] +
+ pc.w2 * offset_input[pc.pos2] +
+ pc.w3 * offset_input[pc.pos3] + pc.w4 * offset_input[pc.pos4];
+
+ pre_calc_index += 1;
+ }
+ }
+ output_val /= count;
+
+ output[index] = output_val;
+ } // for pw
+ } // for ph
+ } // for c
+ } // for n
+}
+
+template
+void ROIAlignRotatedBackward(
+ const int nthreads,
+ // may not be contiguous. should index using n_stride, etc
+ const T* grad_output,
+ const T& spatial_scale,
+ const int channels,
+ const int height,
+ const int width,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio,
+ T* grad_input,
+ const T* rois,
+ const int n_stride,
+ const int c_stride,
+ const int h_stride,
+ const int w_stride) {
+ for (int index = 0; index < nthreads; index++) {
+ // (n, c, ph, pw) is an element in the pooled output
+ int pw = index % pooled_width;
+ int ph = (index / pooled_width) % pooled_height;
+ int c = (index / pooled_width / pooled_height) % channels;
+ int n = index / pooled_width / pooled_height / channels;
+
+ const T* current_roi = rois + n * 6;
+ int roi_batch_ind = current_roi[0];
+
+ // Do not use rounding; this implementation detail is critical
+ // ROIAlignRotated supports align == true, i.e., continuous coordinate
+ // by default, thus the 0.5 offset
+ T offset = (T)0.5;
+ T roi_center_w = current_roi[1] * spatial_scale - offset;
+ T roi_center_h = current_roi[2] * spatial_scale - offset;
+ T roi_width = current_roi[3] * spatial_scale;
+ T roi_height = current_roi[4] * spatial_scale;
+ T theta = current_roi[5] * M_PI / 180.0;
+ T cos_theta = cos(theta);
+ T sin_theta = sin(theta);
+
+ AT_ASSERTM(
+ roi_width >= 0 && roi_height >= 0,
+ "ROIs in ROIAlignRotated do not have non-negative size!");
+
+ T bin_size_h = static_cast(roi_height) / static_cast(pooled_height);
+ T bin_size_w = static_cast(roi_width) / static_cast(pooled_width);
+
+ T* offset_grad_input =
+ grad_input + ((roi_batch_ind * channels + c) * height * width);
+
+ int output_offset = n * n_stride + c * c_stride;
+ const T* offset_grad_output = grad_output + output_offset;
+ const T grad_output_this_bin =
+ offset_grad_output[ph * h_stride + pw * w_stride];
+
+ // We use roi_bin_grid to sample the grid and mimic integral
+ int roi_bin_grid_h = (sampling_ratio > 0)
+ ? sampling_ratio
+ : ceil(roi_height / pooled_height); // e.g., = 2
+ int roi_bin_grid_w =
+ (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
+
+ // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
+ // Appropriate translation needs to be applied after.
+ T roi_start_h = -roi_height / 2.0;
+ T roi_start_w = -roi_width / 2.0;
+
+ // We do average (integral) pooling inside a bin
+ const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
+
+ for (int iy = 0; iy < roi_bin_grid_h; iy++) {
+ const T yy = roi_start_h + ph * bin_size_h +
+ static_cast(iy + .5f) * bin_size_h /
+ static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5
+ for (int ix = 0; ix < roi_bin_grid_w; ix++) {
+ const T xx = roi_start_w + pw * bin_size_w +
+ static_cast(ix + .5f) * bin_size_w /
+ static_cast(roi_bin_grid_w);
+
+ // Rotate by theta around the center and translate
+ T y = yy * cos_theta - xx * sin_theta + roi_center_h;
+ T x = yy * sin_theta + xx * cos_theta + roi_center_w;
+
+ T w1, w2, w3, w4;
+ int x_low, x_high, y_low, y_high;
+
+ bilinear_interpolate_gradient(
+ height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
+
+ T g1 = grad_output_this_bin * w1 / count;
+ T g2 = grad_output_this_bin * w2 / count;
+ T g3 = grad_output_this_bin * w3 / count;
+ T g4 = grad_output_this_bin * w4 / count;
+
+ if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
+ // atomic add is not needed for now since it is single threaded
+ add(offset_grad_input + y_low * width + x_low, static_cast(g1));
+ add(offset_grad_input + y_low * width + x_high, static_cast(g2));
+ add(offset_grad_input + y_high * width + x_low, static_cast(g3));
+ add(offset_grad_input + y_high * width + x_high, static_cast(g4));
+ } // if
+ } // ix
+ } // iy
+ } // for
+} // ROIAlignRotatedBackward
+
+at::Tensor ROIAlignRotated_forward_cpu(
+ const at::Tensor& input,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio) {
+ AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor");
+ AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor");
+
+ at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
+
+ at::CheckedFrom c = "ROIAlign_forward_cpu";
+ at::checkAllSameType(c, {input_t, rois_t});
+
+ auto num_rois = rois.size(0);
+ auto channels = input.size(1);
+ auto height = input.size(2);
+ auto width = input.size(3);
+
+ at::Tensor output = at::zeros(
+ {num_rois, channels, pooled_height, pooled_width}, input.options());
+
+ auto output_size = num_rois * pooled_height * pooled_width * channels;
+
+ if (output.numel() == 0) {
+ return output;
+ }
+
+ auto input_ = input.contiguous(), rois_ = rois.contiguous();
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
+ input.scalar_type(), "ROIAlignRotated_forward", [&] {
+ ROIAlignRotatedForward(
+ output_size,
+ input_.data_ptr(),
+ spatial_scale,
+ channels,
+ height,
+ width,
+ pooled_height,
+ pooled_width,
+ sampling_ratio,
+ rois_.data_ptr(),
+ output.data_ptr());
+ });
+ return output;
+}
+
+at::Tensor ROIAlignRotated_backward_cpu(
+ const at::Tensor& grad,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int batch_size,
+ const int channels,
+ const int height,
+ const int width,
+ const int sampling_ratio) {
+ AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor");
+ AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor");
+
+ at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
+
+ at::CheckedFrom c = "ROIAlignRotated_backward_cpu";
+ at::checkAllSameType(c, {grad_t, rois_t});
+
+ at::Tensor grad_input =
+ at::zeros({batch_size, channels, height, width}, grad.options());
+
+ // handle possibly empty gradients
+ if (grad.numel() == 0) {
+ return grad_input;
+ }
+
+ // get stride values to ensure indexing into gradients is correct.
+ int n_stride = grad.stride(0);
+ int c_stride = grad.stride(1);
+ int h_stride = grad.stride(2);
+ int w_stride = grad.stride(3);
+
+ auto rois_ = rois.contiguous();
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
+ grad.scalar_type(), "ROIAlignRotated_forward", [&] {
+ ROIAlignRotatedBackward(
+ grad.numel(),
+ grad.data_ptr(),
+ spatial_scale,
+ channels,
+ height,
+ width,
+ pooled_height,
+ pooled_width,
+ sampling_ratio,
+ grad_input.data_ptr(),
+ rois_.data_ptr(),
+ n_stride,
+ c_stride,
+ h_stride,
+ w_stride);
+ });
+ return grad_input;
+}
+
+} // namespace detectron2
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu
new file mode 100644
index 0000000000000000000000000000000000000000..fca186519143b168a912c880a4cf495a0a5a9322
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu
@@ -0,0 +1,443 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#include
+#include
+#include
+#include
+
+// TODO make it in a common file
+#define CUDA_1D_KERNEL_LOOP(i, n) \
+ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
+ i += blockDim.x * gridDim.x)
+
+// Note: this implementation originates from the Caffe2 ROIAlignRotated Op
+// and PyTorch ROIAlign (non-rotated) Op implementations.
+// The key difference between this implementation and those ones is
+// we don't do "legacy offset" in this version, as there aren't many previous
+// works, if any, using the "legacy" ROIAlignRotated Op.
+// This would make the interface a bit cleaner.
+
+namespace detectron2 {
+
+namespace {
+
+template
+__device__ T bilinear_interpolate(
+ const T* input,
+ const int height,
+ const int width,
+ T y,
+ T x) {
+ // deal with cases that inverse elements are out of feature map boundary
+ if (y < -1.0 || y > height || x < -1.0 || x > width) {
+ // empty
+ return 0;
+ }
+
+ if (y < 0) {
+ y = 0;
+ }
+
+ if (x < 0) {
+ x = 0;
+ }
+
+ int y_low = (int)y;
+ int x_low = (int)x;
+ int y_high;
+ int x_high;
+
+ if (y_low >= height - 1) {
+ y_high = y_low = height - 1;
+ y = (T)y_low;
+ } else {
+ y_high = y_low + 1;
+ }
+
+ if (x_low >= width - 1) {
+ x_high = x_low = width - 1;
+ x = (T)x_low;
+ } else {
+ x_high = x_low + 1;
+ }
+
+ T ly = y - y_low;
+ T lx = x - x_low;
+ T hy = 1. - ly, hx = 1. - lx;
+ // do bilinear interpolation
+ T v1 = input[y_low * width + x_low];
+ T v2 = input[y_low * width + x_high];
+ T v3 = input[y_high * width + x_low];
+ T v4 = input[y_high * width + x_high];
+ T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
+
+ T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
+
+ return val;
+}
+
+template
+__device__ void bilinear_interpolate_gradient(
+ const int height,
+ const int width,
+ T y,
+ T x,
+ T& w1,
+ T& w2,
+ T& w3,
+ T& w4,
+ int& x_low,
+ int& x_high,
+ int& y_low,
+ int& y_high) {
+ // deal with cases that inverse elements are out of feature map boundary
+ if (y < -1.0 || y > height || x < -1.0 || x > width) {
+ // empty
+ w1 = w2 = w3 = w4 = 0.;
+ x_low = x_high = y_low = y_high = -1;
+ return;
+ }
+
+ if (y < 0) {
+ y = 0;
+ }
+
+ if (x < 0) {
+ x = 0;
+ }
+
+ y_low = (int)y;
+ x_low = (int)x;
+
+ if (y_low >= height - 1) {
+ y_high = y_low = height - 1;
+ y = (T)y_low;
+ } else {
+ y_high = y_low + 1;
+ }
+
+ if (x_low >= width - 1) {
+ x_high = x_low = width - 1;
+ x = (T)x_low;
+ } else {
+ x_high = x_low + 1;
+ }
+
+ T ly = y - y_low;
+ T lx = x - x_low;
+ T hy = 1. - ly, hx = 1. - lx;
+
+ // reference in forward
+ // T v1 = input[y_low * width + x_low];
+ // T v2 = input[y_low * width + x_high];
+ // T v3 = input[y_high * width + x_low];
+ // T v4 = input[y_high * width + x_high];
+ // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
+
+ w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
+
+ return;
+}
+
+} // namespace
+
+template
+__global__ void RoIAlignRotatedForward(
+ const int nthreads,
+ const T* input,
+ const T spatial_scale,
+ const int channels,
+ const int height,
+ const int width,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio,
+ const T* rois,
+ T* top_data) {
+ CUDA_1D_KERNEL_LOOP(index, nthreads) {
+ // (n, c, ph, pw) is an element in the pooled output
+ int pw = index % pooled_width;
+ int ph = (index / pooled_width) % pooled_height;
+ int c = (index / pooled_width / pooled_height) % channels;
+ int n = index / pooled_width / pooled_height / channels;
+
+ const T* current_roi = rois + n * 6;
+ int roi_batch_ind = current_roi[0];
+
+ // Do not use rounding; this implementation detail is critical
+ // ROIAlignRotated supports align == true, i.e., continuous coordinate
+ // by default, thus the 0.5 offset
+ T offset = (T)0.5;
+ T roi_center_w = current_roi[1] * spatial_scale - offset;
+ T roi_center_h = current_roi[2] * spatial_scale - offset;
+ T roi_width = current_roi[3] * spatial_scale;
+ T roi_height = current_roi[4] * spatial_scale;
+ T theta = current_roi[5] * M_PI / 180.0;
+ T cos_theta = cos(theta);
+ T sin_theta = sin(theta);
+
+ T bin_size_h = static_cast(roi_height) / static_cast(pooled_height);
+ T bin_size_w = static_cast(roi_width) / static_cast(pooled_width);
+
+ const T* offset_input =
+ input + (roi_batch_ind * channels + c) * height * width;
+
+ // We use roi_bin_grid to sample the grid and mimic integral
+ int roi_bin_grid_h = (sampling_ratio > 0)
+ ? sampling_ratio
+ : ceil(roi_height / pooled_height); // e.g., = 2
+ int roi_bin_grid_w =
+ (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
+
+ // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
+ // Appropriate translation needs to be applied after.
+ T roi_start_h = -roi_height / 2.0;
+ T roi_start_w = -roi_width / 2.0;
+
+ // We do average (inte gral) pooling inside a bin
+ const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
+
+ T output_val = 0.;
+ for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
+ {
+ const T yy = roi_start_h + ph * bin_size_h +
+ static_cast(iy + .5f) * bin_size_h /
+ static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5
+ for (int ix = 0; ix < roi_bin_grid_w; ix++) {
+ const T xx = roi_start_w + pw * bin_size_w +
+ static_cast(ix + .5f) * bin_size_w /
+ static_cast(roi_bin_grid_w);
+
+ // Rotate by theta around the center and translate
+ T y = yy * cos_theta - xx * sin_theta + roi_center_h;
+ T x = yy * sin_theta + xx * cos_theta + roi_center_w;
+
+ T val = bilinear_interpolate(offset_input, height, width, y, x);
+ output_val += val;
+ }
+ }
+ output_val /= count;
+
+ top_data[index] = output_val;
+ }
+}
+
+template
+__global__ void RoIAlignRotatedBackwardFeature(
+ const int nthreads,
+ const T* top_diff,
+ const int num_rois,
+ const T spatial_scale,
+ const int channels,
+ const int height,
+ const int width,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio,
+ T* bottom_diff,
+ const T* rois) {
+ CUDA_1D_KERNEL_LOOP(index, nthreads) {
+ // (n, c, ph, pw) is an element in the pooled output
+ int pw = index % pooled_width;
+ int ph = (index / pooled_width) % pooled_height;
+ int c = (index / pooled_width / pooled_height) % channels;
+ int n = index / pooled_width / pooled_height / channels;
+
+ const T* current_roi = rois + n * 6;
+ int roi_batch_ind = current_roi[0];
+
+ // Do not use rounding; this implementation detail is critical
+ // ROIAlignRotated supports align == true, i.e., continuous coordinate
+ // by default, thus the 0.5 offset
+ T offset = (T)0.5;
+ T roi_center_w = current_roi[1] * spatial_scale - offset;
+ T roi_center_h = current_roi[2] * spatial_scale - offset;
+ T roi_width = current_roi[3] * spatial_scale;
+ T roi_height = current_roi[4] * spatial_scale;
+ T theta = current_roi[5] * M_PI / 180.0;
+ T cos_theta = cos(theta);
+ T sin_theta = sin(theta);
+
+ T bin_size_h = static_cast(roi_height) / static_cast(pooled_height);
+ T bin_size_w = static_cast(roi_width) / static_cast(pooled_width);
+
+ T* offset_bottom_diff =
+ bottom_diff + (roi_batch_ind * channels + c) * height * width;
+
+ int top_offset = (n * channels + c) * pooled_height * pooled_width;
+ const T* offset_top_diff = top_diff + top_offset;
+ const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
+
+ // We use roi_bin_grid to sample the grid and mimic integral
+ int roi_bin_grid_h = (sampling_ratio > 0)
+ ? sampling_ratio
+ : ceil(roi_height / pooled_height); // e.g., = 2
+ int roi_bin_grid_w =
+ (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
+
+ // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
+ // Appropriate translation needs to be applied after.
+ T roi_start_h = -roi_height / 2.0;
+ T roi_start_w = -roi_width / 2.0;
+
+ // We do average (integral) pooling inside a bin
+ const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
+
+ for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
+ {
+ const T yy = roi_start_h + ph * bin_size_h +
+ static_cast(iy + .5f) * bin_size_h /
+ static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5
+ for (int ix = 0; ix < roi_bin_grid_w; ix++) {
+ const T xx = roi_start_w + pw * bin_size_w +
+ static_cast(ix + .5f) * bin_size_w /
+ static_cast(roi_bin_grid_w);
+
+ // Rotate by theta around the center and translate
+ T y = yy * cos_theta - xx * sin_theta + roi_center_h;
+ T x = yy * sin_theta + xx * cos_theta + roi_center_w;
+
+ T w1, w2, w3, w4;
+ int x_low, x_high, y_low, y_high;
+
+ bilinear_interpolate_gradient(
+ height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
+
+ T g1 = top_diff_this_bin * w1 / count;
+ T g2 = top_diff_this_bin * w2 / count;
+ T g3 = top_diff_this_bin * w3 / count;
+ T g4 = top_diff_this_bin * w4 / count;
+
+ if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
+ atomicAdd(
+ offset_bottom_diff + y_low * width + x_low, static_cast(g1));
+ atomicAdd(
+ offset_bottom_diff + y_low * width + x_high, static_cast(g2));
+ atomicAdd(
+ offset_bottom_diff + y_high * width + x_low, static_cast(g3));
+ atomicAdd(
+ offset_bottom_diff + y_high * width + x_high, static_cast(g4));
+ } // if
+ } // ix
+ } // iy
+ } // CUDA_1D_KERNEL_LOOP
+} // RoIAlignRotatedBackward
+
+at::Tensor ROIAlignRotated_forward_cuda(
+ const at::Tensor& input,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio) {
+ AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
+ AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
+ at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
+
+ at::CheckedFrom c = "ROIAlignRotated_forward_cuda";
+ at::checkAllSameGPU(c, {input_t, rois_t});
+ at::checkAllSameType(c, {input_t, rois_t});
+ at::cuda::CUDAGuard device_guard(input.device());
+
+ auto num_rois = rois.size(0);
+ auto channels = input.size(1);
+ auto height = input.size(2);
+ auto width = input.size(3);
+
+ auto output = at::empty(
+ {num_rois, channels, pooled_height, pooled_width}, input.options());
+ auto output_size = num_rois * pooled_height * pooled_width * channels;
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
+
+ dim3 grid(std::min(
+ at::cuda::ATenCeilDiv(
+ static_cast(output_size), static_cast(512)),
+ static_cast(4096)));
+ dim3 block(512);
+
+ if (output.numel() == 0) {
+ AT_CUDA_CHECK(cudaGetLastError());
+ return output;
+ }
+
+ auto input_ = input.contiguous(), rois_ = rois.contiguous();
+ AT_DISPATCH_FLOATING_TYPES(
+ input.scalar_type(), "ROIAlignRotated_forward", [&] {
+ RoIAlignRotatedForward<<>>(
+ output_size,
+ input_.data_ptr(),
+ spatial_scale,
+ channels,
+ height,
+ width,
+ pooled_height,
+ pooled_width,
+ sampling_ratio,
+ rois_.data_ptr(),
+ output.data_ptr());
+ });
+ cudaDeviceSynchronize();
+ AT_CUDA_CHECK(cudaGetLastError());
+ return output;
+}
+
+// TODO remove the dependency on input and use instead its sizes -> save memory
+at::Tensor ROIAlignRotated_backward_cuda(
+ const at::Tensor& grad,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int batch_size,
+ const int channels,
+ const int height,
+ const int width,
+ const int sampling_ratio) {
+ AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
+ AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
+
+ at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
+ at::CheckedFrom c = "ROIAlign_backward_cuda";
+ at::checkAllSameGPU(c, {grad_t, rois_t});
+ at::checkAllSameType(c, {grad_t, rois_t});
+ at::cuda::CUDAGuard device_guard(grad.device());
+
+ auto num_rois = rois.size(0);
+ auto grad_input =
+ at::zeros({batch_size, channels, height, width}, grad.options());
+
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
+
+ dim3 grid(std::min(
+ at::cuda::ATenCeilDiv(
+ static_cast(grad.numel()), static_cast(512)),
+ static_cast(4096)));
+ dim3 block(512);
+
+ // handle possibly empty gradients
+ if (grad.numel() == 0) {
+ AT_CUDA_CHECK(cudaGetLastError());
+ return grad_input;
+ }
+
+ auto grad_ = grad.contiguous(), rois_ = rois.contiguous();
+ AT_DISPATCH_FLOATING_TYPES(
+ grad.scalar_type(), "ROIAlignRotated_backward", [&] {
+ RoIAlignRotatedBackwardFeature<<>>(
+ grad.numel(),
+ grad_.data_ptr(),
+ num_rois,
+ spatial_scale,
+ channels,
+ height,
+ width,
+ pooled_height,
+ pooled_width,
+ sampling_ratio,
+ grad_input.data_ptr(),
+ rois_.data_ptr());
+ });
+ AT_CUDA_CHECK(cudaGetLastError());
+ return grad_input;
+}
+
+} // namespace detectron2
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h
new file mode 100644
index 0000000000000000000000000000000000000000..3bf383b8ed9b358b5313d433a9682c294dfb77e4
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h
@@ -0,0 +1,35 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#pragma once
+#include
+
+namespace detectron2 {
+
+at::Tensor box_iou_rotated_cpu(
+ const at::Tensor& boxes1,
+ const at::Tensor& boxes2);
+
+#if defined(WITH_CUDA) || defined(WITH_HIP)
+at::Tensor box_iou_rotated_cuda(
+ const at::Tensor& boxes1,
+ const at::Tensor& boxes2);
+#endif
+
+// Interface for Python
+// inline is needed to prevent multiple function definitions when this header is
+// included by different cpps
+inline at::Tensor box_iou_rotated(
+ const at::Tensor& boxes1,
+ const at::Tensor& boxes2) {
+ assert(boxes1.device().is_cuda() == boxes2.device().is_cuda());
+ if (boxes1.device().is_cuda()) {
+#if defined(WITH_CUDA) || defined(WITH_HIP)
+ return box_iou_rotated_cuda(boxes1.contiguous(), boxes2.contiguous());
+#else
+ AT_ERROR("Detectron2 is not compiled with GPU support!");
+#endif
+ }
+
+ return box_iou_rotated_cpu(boxes1.contiguous(), boxes2.contiguous());
+}
+
+} // namespace detectron2
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c843487b5fa4e8077dd27402ec99009266ddda8d
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp
@@ -0,0 +1,39 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#include "box_iou_rotated.h"
+#include "box_iou_rotated_utils.h"
+
+namespace detectron2 {
+
+template
+void box_iou_rotated_cpu_kernel(
+ const at::Tensor& boxes1,
+ const at::Tensor& boxes2,
+ at::Tensor& ious) {
+ auto num_boxes1 = boxes1.size(0);
+ auto num_boxes2 = boxes2.size(0);
+
+ for (int i = 0; i < num_boxes1; i++) {
+ for (int j = 0; j < num_boxes2; j++) {
+ ious[i * num_boxes2 + j] = single_box_iou_rotated(
+ boxes1[i].data_ptr(), boxes2[j].data_ptr());
+ }
+ }
+}
+
+at::Tensor box_iou_rotated_cpu(
+ // input must be contiguous:
+ const at::Tensor& boxes1,
+ const at::Tensor& boxes2) {
+ auto num_boxes1 = boxes1.size(0);
+ auto num_boxes2 = boxes2.size(0);
+ at::Tensor ious =
+ at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat));
+
+ box_iou_rotated_cpu_kernel(boxes1, boxes2, ious);
+
+ // reshape from 1d array to 2d array
+ auto shape = std::vector{num_boxes1, num_boxes2};
+ return ious.reshape(shape);
+}
+
+} // namespace detectron2
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu
new file mode 100644
index 0000000000000000000000000000000000000000..952710e53041187907fbd113f8d0d0fa24134a86
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu
@@ -0,0 +1,130 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#include
+#include
+#include
+#include
+#include "box_iou_rotated_utils.h"
+
+namespace detectron2 {
+
+// 2D block with 32 * 16 = 512 threads per block
+const int BLOCK_DIM_X = 32;
+const int BLOCK_DIM_Y = 16;
+
+template
+__global__ void box_iou_rotated_cuda_kernel(
+ const int n_boxes1,
+ const int n_boxes2,
+ const T* dev_boxes1,
+ const T* dev_boxes2,
+ T* dev_ious) {
+ const int row_start = blockIdx.x * blockDim.x;
+ const int col_start = blockIdx.y * blockDim.y;
+
+ const int row_size = min(n_boxes1 - row_start, blockDim.x);
+ const int col_size = min(n_boxes2 - col_start, blockDim.y);
+
+ __shared__ float block_boxes1[BLOCK_DIM_X * 5];
+ __shared__ float block_boxes2[BLOCK_DIM_Y * 5];
+
+ // It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y
+ if (threadIdx.x < row_size && threadIdx.y == 0) {
+ block_boxes1[threadIdx.x * 5 + 0] =
+ dev_boxes1[(row_start + threadIdx.x) * 5 + 0];
+ block_boxes1[threadIdx.x * 5 + 1] =
+ dev_boxes1[(row_start + threadIdx.x) * 5 + 1];
+ block_boxes1[threadIdx.x * 5 + 2] =
+ dev_boxes1[(row_start + threadIdx.x) * 5 + 2];
+ block_boxes1[threadIdx.x * 5 + 3] =
+ dev_boxes1[(row_start + threadIdx.x) * 5 + 3];
+ block_boxes1[threadIdx.x * 5 + 4] =
+ dev_boxes1[(row_start + threadIdx.x) * 5 + 4];
+ }
+
+ if (threadIdx.x < col_size && threadIdx.y == 0) {
+ block_boxes2[threadIdx.x * 5 + 0] =
+ dev_boxes2[(col_start + threadIdx.x) * 5 + 0];
+ block_boxes2[threadIdx.x * 5 + 1] =
+ dev_boxes2[(col_start + threadIdx.x) * 5 + 1];
+ block_boxes2[threadIdx.x * 5 + 2] =
+ dev_boxes2[(col_start + threadIdx.x) * 5 + 2];
+ block_boxes2[threadIdx.x * 5 + 3] =
+ dev_boxes2[(col_start + threadIdx.x) * 5 + 3];
+ block_boxes2[threadIdx.x * 5 + 4] =
+ dev_boxes2[(col_start + threadIdx.x) * 5 + 4];
+ }
+ __syncthreads();
+
+ if (threadIdx.x < row_size && threadIdx.y < col_size) {
+ int offset = (row_start + threadIdx.x) * n_boxes2 + col_start + threadIdx.y;
+ dev_ious[offset] = single_box_iou_rotated(
+ block_boxes1 + threadIdx.x * 5, block_boxes2 + threadIdx.y * 5);
+ }
+}
+
+at::Tensor box_iou_rotated_cuda(
+ // input must be contiguous
+ const at::Tensor& boxes1,
+ const at::Tensor& boxes2) {
+ using scalar_t = float;
+ AT_ASSERTM(
+ boxes1.scalar_type() == at::kFloat, "boxes1 must be a float tensor");
+ AT_ASSERTM(
+ boxes2.scalar_type() == at::kFloat, "boxes2 must be a float tensor");
+ AT_ASSERTM(boxes1.is_cuda(), "boxes1 must be a CUDA tensor");
+ AT_ASSERTM(boxes2.is_cuda(), "boxes2 must be a CUDA tensor");
+ at::cuda::CUDAGuard device_guard(boxes1.device());
+
+ auto num_boxes1 = boxes1.size(0);
+ auto num_boxes2 = boxes2.size(0);
+
+ at::Tensor ious =
+ at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat));
+
+ bool transpose = false;
+ if (num_boxes1 > 0 && num_boxes2 > 0) {
+ scalar_t *data1 = boxes1.data_ptr(),
+ *data2 = boxes2.data_ptr();
+
+ if (num_boxes2 > 65535 * BLOCK_DIM_Y) {
+ AT_ASSERTM(
+ num_boxes1 <= 65535 * BLOCK_DIM_Y,
+ "Too many boxes for box_iou_rotated_cuda!");
+ // x dim is allowed to be large, but y dim cannot,
+ // so we transpose the two to avoid "invalid configuration argument"
+ // error. We assume one of them is small. Otherwise the result is hard to
+ // fit in memory anyway.
+ std::swap(num_boxes1, num_boxes2);
+ std::swap(data1, data2);
+ transpose = true;
+ }
+
+ const int blocks_x =
+ at::cuda::ATenCeilDiv(static_cast(num_boxes1), BLOCK_DIM_X);
+ const int blocks_y =
+ at::cuda::ATenCeilDiv(static_cast(num_boxes2), BLOCK_DIM_Y);
+
+ dim3 blocks(blocks_x, blocks_y);
+ dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y);
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
+
+ box_iou_rotated_cuda_kernel<<>>(
+ num_boxes1,
+ num_boxes2,
+ data1,
+ data2,
+ (scalar_t*)ious.data_ptr());
+
+ AT_CUDA_CHECK(cudaGetLastError());
+ }
+
+ // reshape from 1d array to 2d array
+ auto shape = std::vector{num_boxes1, num_boxes2};
+ if (transpose) {
+ return ious.view(shape).t();
+ } else {
+ return ious.view(shape);
+ }
+}
+
+} // namespace detectron2
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..b54a5dde2ca11a74d29c4d8adb7fe1634f5baf9c
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h
@@ -0,0 +1,370 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#pragma once
+
+#include
+#include
+
+#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1
+// Designates functions callable from the host (CPU) and the device (GPU)
+#define HOST_DEVICE __host__ __device__
+#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__
+#else
+#include
+#define HOST_DEVICE
+#define HOST_DEVICE_INLINE HOST_DEVICE inline
+#endif
+
+namespace detectron2 {
+
+namespace {
+
+template
+struct RotatedBox {
+ T x_ctr, y_ctr, w, h, a;
+};
+
+template
+struct Point {
+ T x, y;
+ HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {}
+ HOST_DEVICE_INLINE Point operator+(const Point& p) const {
+ return Point(x + p.x, y + p.y);
+ }
+ HOST_DEVICE_INLINE Point& operator+=(const Point& p) {
+ x += p.x;
+ y += p.y;
+ return *this;
+ }
+ HOST_DEVICE_INLINE Point operator-(const Point& p) const {
+ return Point(x - p.x, y - p.y);
+ }
+ HOST_DEVICE_INLINE Point operator*(const T coeff) const {
+ return Point(x * coeff, y * coeff);
+ }
+};
+
+template
+HOST_DEVICE_INLINE T dot_2d(const Point& A, const Point& B) {
+ return A.x * B.x + A.y * B.y;
+}
+
+// R: result type. can be different from input type
+template
+HOST_DEVICE_INLINE R cross_2d(const Point& A, const Point& B) {
+ return static_cast(A.x) * static_cast(B.y) -
+ static_cast(B.x) * static_cast(A.y);
+}
+
+template
+HOST_DEVICE_INLINE void get_rotated_vertices(
+ const RotatedBox& box,
+ Point (&pts)[4]) {
+ // M_PI / 180. == 0.01745329251
+ double theta = box.a * 0.01745329251;
+ T cosTheta2 = (T)cos(theta) * 0.5f;
+ T sinTheta2 = (T)sin(theta) * 0.5f;
+
+ // y: top --> down; x: left --> right
+ pts[0].x = box.x_ctr + sinTheta2 * box.h + cosTheta2 * box.w;
+ pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w;
+ pts[1].x = box.x_ctr - sinTheta2 * box.h + cosTheta2 * box.w;
+ pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w;
+ pts[2].x = 2 * box.x_ctr - pts[0].x;
+ pts[2].y = 2 * box.y_ctr - pts[0].y;
+ pts[3].x = 2 * box.x_ctr - pts[1].x;
+ pts[3].y = 2 * box.y_ctr - pts[1].y;
+}
+
+template
+HOST_DEVICE_INLINE int get_intersection_points(
+ const Point (&pts1)[4],
+ const Point (&pts2)[4],
+ Point (&intersections)[24]) {
+ // Line vector
+ // A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1]
+ Point vec1[4], vec2[4];
+ for (int i = 0; i < 4; i++) {
+ vec1[i] = pts1[(i + 1) % 4] - pts1[i];
+ vec2[i] = pts2[(i + 1) % 4] - pts2[i];
+ }
+
+ // When computing the intersection area, it doesn't hurt if we have
+ // more (duplicated/approximate) intersections/vertices than needed,
+ // while it can cause drastic difference if we miss an intersection/vertex.
+ // Therefore, we add an epsilon to relax the comparisons between
+ // the float point numbers that decide the intersection points.
+ double EPS = 1e-5;
+
+ // Line test - test all line combos for intersection
+ int num = 0; // number of intersections
+ for (int i = 0; i < 4; i++) {
+ for (int j = 0; j < 4; j++) {
+ // Solve for 2x2 Ax=b
+ T det = cross_2d(vec2[j], vec1[i]);
+
+ // This takes care of parallel lines
+ if (fabs(det) <= 1e-14) {
+ continue;
+ }
+
+ auto vec12 = pts2[j] - pts1[i];
+
+ T t1 = cross_2d(vec2[j], vec12) / det;
+ T t2 = cross_2d(vec1[i], vec12) / det;
+
+ if (t1 > -EPS && t1 < 1.0f + EPS && t2 > -EPS && t2 < 1.0f + EPS) {
+ intersections[num++] = pts1[i] + vec1[i] * t1;
+ }
+ }
+ }
+
+ // Check for vertices of rect1 inside rect2
+ {
+ const auto& AB = vec2[0];
+ const auto& DA = vec2[3];
+ auto ABdotAB = dot_2d(AB, AB);
+ auto ADdotAD = dot_2d(DA, DA);
+ for (int i = 0; i < 4; i++) {
+ // assume ABCD is the rectangle, and P is the point to be judged
+ // P is inside ABCD iff. P's projection on AB lies within AB
+ // and P's projection on AD lies within AD
+
+ auto AP = pts1[i] - pts2[0];
+
+ auto APdotAB = dot_2d(AP, AB);
+ auto APdotAD = -dot_2d(AP, DA);
+
+ if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) &&
+ (APdotAD < ADdotAD + EPS)) {
+ intersections[num++] = pts1[i];
+ }
+ }
+ }
+
+ // Reverse the check - check for vertices of rect2 inside rect1
+ {
+ const auto& AB = vec1[0];
+ const auto& DA = vec1[3];
+ auto ABdotAB = dot_2d(AB, AB);
+ auto ADdotAD = dot_2d(DA, DA);
+ for (int i = 0; i < 4; i++) {
+ auto AP = pts2[i] - pts1[0];
+
+ auto APdotAB = dot_2d(AP, AB);
+ auto APdotAD = -dot_2d(AP, DA);
+
+ if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) &&
+ (APdotAD < ADdotAD + EPS)) {
+ intersections[num++] = pts2[i];
+ }
+ }
+ }
+
+ return num;
+}
+
+template
+HOST_DEVICE_INLINE int convex_hull_graham(
+ const Point (&p)[24],
+ const int& num_in,
+ Point (&q)[24],
+ bool shift_to_zero = false) {
+ assert(num_in >= 2);
+
+ // Step 1:
+ // Find point with minimum y
+ // if more than 1 points have the same minimum y,
+ // pick the one with the minimum x.
+ int t = 0;
+ for (int i = 1; i < num_in; i++) {
+ if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) {
+ t = i;
+ }
+ }
+ auto& start = p[t]; // starting point
+
+ // Step 2:
+ // Subtract starting point from every points (for sorting in the next step)
+ for (int i = 0; i < num_in; i++) {
+ q[i] = p[i] - start;
+ }
+
+ // Swap the starting point to position 0
+ auto tmp = q[0];
+ q[0] = q[t];
+ q[t] = tmp;
+
+ // Step 3:
+ // Sort point 1 ~ num_in according to their relative cross-product values
+ // (essentially sorting according to angles)
+ // If the angles are the same, sort according to their distance to origin
+ T dist[24];
+#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1
+ // compute distance to origin before sort, and sort them together with the
+ // points
+ for (int i = 0; i < num_in; i++) {
+ dist[i] = dot_2d(q[i], q[i]);
+ }
+
+ // CUDA version
+ // In the future, we can potentially use thrust
+ // for sorting here to improve speed (though not guaranteed)
+ for (int i = 1; i < num_in - 1; i++) {
+ for (int j = i + 1; j < num_in; j++) {
+ T crossProduct = cross_2d(q[i], q[j]);
+ if ((crossProduct < -1e-6) ||
+ (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) {
+ auto q_tmp = q[i];
+ q[i] = q[j];
+ q[j] = q_tmp;
+ auto dist_tmp = dist[i];
+ dist[i] = dist[j];
+ dist[j] = dist_tmp;
+ }
+ }
+ }
+#else
+ // CPU version
+ std::sort(
+ q + 1, q + num_in, [](const Point& A, const Point& B) -> bool {
+ T temp = cross_2d(A, B);
+ if (fabs(temp) < 1e-6) {
+ return dot_2d(A, A) < dot_2d(B, B);
+ } else {
+ return temp > 0;
+ }
+ });
+ // compute distance to origin after sort, since the points are now different.
+ for (int i = 0; i < num_in; i++) {
+ dist[i] = dot_2d(q[i], q[i]);
+ }
+#endif
+
+ // Step 4:
+ // Make sure there are at least 2 points (that don't overlap with each other)
+ // in the stack
+ int k; // index of the non-overlapped second point
+ for (k = 1; k < num_in; k++) {
+ if (dist[k] > 1e-8) {
+ break;
+ }
+ }
+ if (k == num_in) {
+ // We reach the end, which means the convex hull is just one point
+ q[0] = p[t];
+ return 1;
+ }
+ q[1] = q[k];
+ int m = 2; // 2 points in the stack
+ // Step 5:
+ // Finally we can start the scanning process.
+ // When a non-convex relationship between the 3 points is found
+ // (either concave shape or duplicated points),
+ // we pop the previous point from the stack
+ // until the 3-point relationship is convex again, or
+ // until the stack only contains two points
+ for (int i = k + 1; i < num_in; i++) {
+ while (m > 1) {
+ auto q1 = q[i] - q[m - 2], q2 = q[m - 1] - q[m - 2];
+ // cross_2d() uses FMA and therefore computes round(round(q1.x*q2.y) -
+ // q2.x*q1.y) So it may not return 0 even when q1==q2. Therefore we
+ // compare round(q1.x*q2.y) and round(q2.x*q1.y) directly. (round means
+ // round to nearest floating point).
+ if (q1.x * q2.y >= q2.x * q1.y)
+ m--;
+ else
+ break;
+ }
+ // Using double also helps, but float can solve the issue for now.
+ // while (m > 1 && cross_2d(q[i] - q[m - 2], q[m - 1] - q[m - 2])
+ // >= 0) {
+ // m--;
+ // }
+ q[m++] = q[i];
+ }
+
+ // Step 6 (Optional):
+ // In general sense we need the original coordinates, so we
+ // need to shift the points back (reverting Step 2)
+ // But if we're only interested in getting the area/perimeter of the shape
+ // We can simply return.
+ if (!shift_to_zero) {
+ for (int i = 0; i < m; i++) {
+ q[i] += start;
+ }
+ }
+
+ return m;
+}
+
+template
+HOST_DEVICE_INLINE T polygon_area(const Point (&q)[24], const int& m) {
+ if (m <= 2) {
+ return 0;
+ }
+
+ T area = 0;
+ for (int i = 1; i < m - 1; i++) {
+ area += fabs(cross_2d(q[i] - q[0], q[i + 1] - q[0]));
+ }
+
+ return area / 2.0;
+}
+
+template
+HOST_DEVICE_INLINE T rotated_boxes_intersection(
+ const RotatedBox& box1,
+ const RotatedBox& box2) {
+ // There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned
+ // from rotated_rect_intersection_pts
+ Point intersectPts[24], orderedPts[24];
+
+ Point pts1[4];
+ Point pts2[4];
+ get_rotated_vertices(box1, pts1);
+ get_rotated_vertices(box2, pts2);
+
+ int num = get_intersection_points(pts1, pts2, intersectPts);
+
+ if (num <= 2) {
+ return 0.0;
+ }
+
+ // Convex Hull to order the intersection points in clockwise order and find
+ // the contour area.
+ int num_convex = convex_hull_graham(intersectPts, num, orderedPts, true);
+ return polygon_area(orderedPts, num_convex);
+}
+
+} // namespace
+
+template
+HOST_DEVICE_INLINE T
+single_box_iou_rotated(T const* const box1_raw, T const* const box2_raw) {
+ // shift center to the middle point to achieve higher precision in result
+ RotatedBox box1, box2;
+ auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0;
+ auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0;
+ box1.x_ctr = box1_raw[0] - center_shift_x;
+ box1.y_ctr = box1_raw[1] - center_shift_y;
+ box1.w = box1_raw[2];
+ box1.h = box1_raw[3];
+ box1.a = box1_raw[4];
+ box2.x_ctr = box2_raw[0] - center_shift_x;
+ box2.y_ctr = box2_raw[1] - center_shift_y;
+ box2.w = box2_raw[2];
+ box2.h = box2_raw[3];
+ box2.a = box2_raw[4];
+
+ T area1 = box1.w * box1.h;
+ T area2 = box2.w * box2.h;
+ if (area1 < 1e-14 || area2 < 1e-14) {
+ return 0.f;
+ }
+
+ T intersection = rotated_boxes_intersection(box1, box2);
+ T iou = intersection / (area1 + area2 - intersection);
+ return iou;
+}
+
+} // namespace detectron2
diff --git a/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/cocoeval/cocoeval.cpp b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/cocoeval/cocoeval.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0a5b7b907c06720fefc77b0dfd921b8ec3ecf2be
--- /dev/null
+++ b/model/vision/grit_src/third_party/CenterNet2/detectron2/layers/csrc/cocoeval/cocoeval.cpp
@@ -0,0 +1,507 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#include "cocoeval.h"
+#include
+#include
+#include
+#include
+
+using namespace pybind11::literals;
+
+namespace detectron2 {
+
+namespace COCOeval {
+
+// Sort detections from highest score to lowest, such that
+// detection_instances[detection_sorted_indices[t]] >=
+// detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match
+// original COCO API
+void SortInstancesByDetectionScore(
+ const std::vector& detection_instances,
+ std::vector* detection_sorted_indices) {
+ detection_sorted_indices->resize(detection_instances.size());
+ std::iota(
+ detection_sorted_indices->begin(), detection_sorted_indices->end(), 0);
+ std::stable_sort(
+ detection_sorted_indices->begin(),
+ detection_sorted_indices->end(),
+ [&detection_instances](size_t j1, size_t j2) {
+ return detection_instances[j1].score > detection_instances[j2].score;
+ });
+}
+
+// Partition the ground truth objects based on whether or not to ignore them
+// based on area
+void SortInstancesByIgnore(
+ const std::array& area_range,
+ const std::vector& ground_truth_instances,
+ std::vector* ground_truth_sorted_indices,
+ std::vector* ignores) {
+ ignores->clear();
+ ignores->reserve(ground_truth_instances.size());
+ for (auto o : ground_truth_instances) {
+ ignores->push_back(
+ o.ignore || o.area < area_range[0] || o.area > area_range[1]);
+ }
+
+ ground_truth_sorted_indices->resize(ground_truth_instances.size());
+ std::iota(
+ ground_truth_sorted_indices->begin(),
+ ground_truth_sorted_indices->end(),
+ 0);
+ std::stable_sort(
+ ground_truth_sorted_indices->begin(),
+ ground_truth_sorted_indices->end(),
+ [&ignores](size_t j1, size_t j2) {
+ return (int)(*ignores)[j1] < (int)(*ignores)[j2];
+ });
+}
+
+// For each IOU threshold, greedily match each detected instance to a ground
+// truth instance (if possible) and store the results
+void MatchDetectionsToGroundTruth(
+ const std::vector& detection_instances,
+ const std::vector& detection_sorted_indices,
+ const std::vector& ground_truth_instances,
+ const std::vector& ground_truth_sorted_indices,
+ const std::vector& ignores,
+ const std::vector>& ious,
+ const std::vector& iou_thresholds,
+ const std::array& area_range,
+ ImageEvaluation* results) {
+ // Initialize memory to store return data matches and ignore
+ const int num_iou_thresholds = iou_thresholds.size();
+ const int num_ground_truth = ground_truth_sorted_indices.size();
+ const int num_detections = detection_sorted_indices.size();
+ std::vector ground_truth_matches(
+ num_iou_thresholds * num_ground_truth, 0);
+ std::vector& detection_matches = results->detection_matches;
+ std::vector& detection_ignores = results->detection_ignores;
+ std::vector& ground_truth_ignores = results->ground_truth_ignores;
+ detection_matches.resize(num_iou_thresholds * num_detections, 0);
+ detection_ignores.resize(num_iou_thresholds * num_detections, false);
+ ground_truth_ignores.resize(num_ground_truth);
+ for (auto g = 0; g < num_ground_truth; ++g) {
+ ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]];
+ }
+
+ for (auto t = 0; t < num_iou_thresholds; ++t) {
+ for (auto d = 0; d < num_detections; ++d) {
+ // information about best match so far (match=-1 -> unmatched)
+ double best_iou = std::min(iou_thresholds[t], 1 - 1e-10);
+ int match = -1;
+ for (auto g = 0; g < num_ground_truth; ++g) {
+ // if this ground truth instance is already matched and not a
+ // crowd, it cannot be matched to another detection
+ if (ground_truth_matches[t * num_ground_truth + g] > 0 &&
+ !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) {
+ continue;
+ }
+
+ // if detected instance matched to a regular ground truth
+ // instance, we can break on the first ground truth instance
+ // tagged as ignore (because they are sorted by the ignore tag)
+ if (match >= 0 && !ground_truth_ignores[match] &&
+ ground_truth_ignores[g]) {
+ break;
+ }
+
+ // if IOU overlap is the best so far, store the match appropriately
+ if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) {
+ best_iou = ious[d][ground_truth_sorted_indices[g]];
+ match = g;
+ }
+ }
+ // if match was made, store id of match for both detection and
+ // ground truth
+ if (match >= 0) {
+ detection_ignores[t * num_detections + d] = ground_truth_ignores[match];
+ detection_matches[t * num_detections + d] =
+ ground_truth_instances[ground_truth_sorted_indices[match]].id;
+ ground_truth_matches[t * num_ground_truth + match] =
+ detection_instances[detection_sorted_indices[d]].id;
+ }
+
+ // set unmatched detections outside of area range to ignore
+ const InstanceAnnotation& detection =
+ detection_instances[detection_sorted_indices[d]];
+ detection_ignores[t * num_detections + d] =
+ detection_ignores[t * num_detections + d] ||
+ (detection_matches[t * num_detections + d] == 0 &&
+ (detection.area < area_range[0] || detection.area > area_range[1]));
+ }
+ }
+
+ // store detection score results
+ results->detection_scores.resize(detection_sorted_indices.size());
+ for (size_t d = 0; d < detection_sorted_indices.size(); ++d) {
+ results->detection_scores[d] =
+ detection_instances[detection_sorted_indices[d]].score;
+ }
+}
+
+std::vector EvaluateImages(
+ const std::vector>& area_ranges,
+ int max_detections,
+ const std::vector& iou_thresholds,
+ const ImageCategoryInstances>& image_category_ious,
+ const ImageCategoryInstances&
+ image_category_ground_truth_instances,
+ const ImageCategoryInstances&
+ image_category_detection_instances) {
+ const int num_area_ranges = area_ranges.size();
+ const int num_images = image_category_ground_truth_instances.size();
+ const int num_categories =
+ image_category_ious.size() > 0 ? image_category_ious[0].size() : 0;
+ std::vector detection_sorted_indices;
+ std::vector ground_truth_sorted_indices;
+ std::vector ignores;
+ std::vector results_all(
+ num_images * num_area_ranges * num_categories);
+
+ // Store results for each image, category, and area range combination. Results
+ // for each IOU threshold are packed into the same ImageEvaluation object
+ for (auto i = 0; i < num_images; ++i) {
+ for (auto c = 0; c < num_categories; ++c) {
+ const std::vector& ground_truth_instances =
+ image_category_ground_truth_instances[i][c];
+ const std::vector& detection_instances =
+ image_category_detection_instances[i][c];
+
+ SortInstancesByDetectionScore(
+ detection_instances, &detection_sorted_indices);
+ if ((int)detection_sorted_indices.size() > max_detections) {
+ detection_sorted_indices.resize(max_detections);
+ }
+
+ for (size_t a = 0; a < area_ranges.size(); ++a) {
+ SortInstancesByIgnore(
+ area_ranges[a],
+ ground_truth_instances,
+ &ground_truth_sorted_indices,
+ &ignores);
+
+ MatchDetectionsToGroundTruth(
+ detection_instances,
+ detection_sorted_indices,
+ ground_truth_instances,
+ ground_truth_sorted_indices,
+ ignores,
+ image_category_ious[i][c],
+ iou_thresholds,
+ area_ranges[a],
+ &results_all
+ [c * num_area_ranges * num_images + a * num_images + i]);
+ }
+ }
+ }
+
+ return results_all;
+}
+
+// Convert a python list to a vector
+template