Upload folder using huggingface_hub
Browse files- .gitattributes +6 -0
- assets/metadata.textproto +3 -0
- ckpt-2378490.data-00000-of-00001 +3 -0
- ckpt-2378490.index +0 -0
- fingerprint.pb +3 -0
- policy_specs.pbtxt +422 -0
- saved_model.pb +3 -0
- variables/variables.data-00000-of-00001 +3 -0
- variables/variables.index +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
ckpt-2378490.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
|
37 |
+
variables/* filter=lfs diff=lfs merge=lfs -text
|
38 |
+
assets/* filter=lfs diff=lfs merge=lfs -text
|
39 |
+
saved_model.pb filter=lfs diff=lfs merge=lfs -text
|
40 |
+
variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
|
41 |
+
*.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
|
assets/metadata.textproto
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
xmanager_id: 59470521
|
2 |
+
xmanager_wid: 1
|
3 |
+
gin_config: "from __gin__ import dynamic_registration\nfrom gin import config\nfrom google3.robotics.learning.task_explore.tasks.fractal.data.rds.datasets import language_table\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.filters\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.fractal_rds_input_registry as google32\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.model_spec as google33\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.rds_input_builder as google34\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.transforms as google35\nimport google3.robotics.learning.task_explore.tasks.fractal.t2r_models.fractal_critic_model_builder as google36\nimport google3.robotics.learning.task_explore.tasks.fractal.t2r_models.moma_critic_model_builder as google37\nfrom google3.robotics.learning.task_explore.tasks.fractal.tf_agents import action_tokenizer\nimport google3.robotics.learning.task_explore.tasks.fractal.tf_agents.launch.gin_helper as google38\nfrom google3.robotics.learning.task_explore.tasks.fractal.tf_agents import observation_tokenizer\nimport google3.robotics.learning.task_explore.tasks.fractal.tf_agents.sample_clipper as google39\nfrom google3.robotics.learning.task_explore.tasks.mobile_grasping.t2r_models import grasping_44_critic_fat_model_builder\nimport google3.robotics.learning.task_explore.tf_agents.bc0.bc0_actor_net as google310\nfrom google3.robotics.learning.task_explore.tf_agents.bc0 import pretrained_efficientnet_encoder\nimport google3.robotics.learning.task_explore.tf_agents.episode_statistics_visualizations as google311\nimport google3.robotics.learning.task_explore.tf_agents.kuka_e2e_grasping.grasping_net as google312\nimport google3.robotics.learning.task_explore.tf_agents.kuka_e2e_grasping.workspace_clip_policy as google313\nfrom google3.robotics.learning.task_explore.tf_agents.policies import greedy_policy\nimport google3.robotics.learning.task_explore.tf_agents.preprocessors as google314\nfrom google3.robotics.learning.task_explore.tf_agents.sequence import sequence_agent\nfrom google3.robotics.learning.task_explore.tf_agents.sequence import transformer_network\nfrom google3.robotics.learning.task_explore.tf_agents import tf_agents_trainer\nimport google3.robotics.learning.task_explore.utils.gin_string_functions as google315\nfrom robotics_transformer.film_efficientnet import preprocessors\nfrom tensor2robot.preprocessors import image_transformations\nimport tensor2robot.utils.tensorspec_utils\nimport tensorflow as tf\nfrom tf_agents.google.xm import environment_specs\nfrom tf_agents.policies import actor_policy\nimport tf_agents.policies.samplers.qtopt_cem_actions_sampler_continuous_and_one_hot\nfrom tf_agents.train import learner\n\n# Macros:\n# ==============================================================================\nACTION_ORDER = \\\n [\'terminate_episode\',\n \'world_vector\',\n \'rotation_delta\',\n \'gripper_closedness_action\',\n \'base_displacement_vertical_rotation\',\n \'base_displacement_vector\']\nACTOR_NETWORK = @transformer_network.TransformerNetwork\nACTOR_OPTIMIZER = @actor_optimizer/tf.keras.optimizers.Adam()\nBASE_TRANSFORM_NAMES = \\\n [\'mk1_500tasks_te_real_without_filters\',\n \'jaco_play_without_filters\',\n \'bridge_without_filters\',\n \'berkeley_cable_routing_without_filters\',\n \'kuka_filters_positive\',\n \'language_table_kona_without_filters\',\n \'roboturk_without_filters\',\n \'viola_without_filters\',\n \'nyu_door_opening_surprising_effectiveness_without_filters\',\n \'berkeley_autolab_ur5_without_filters\',\n \'taco_play_without_filters\',\n \'toto_without_filters\']\nBATCH_SIZE = 8\nCROP_SIZE = 236\nDATASET_NAMES = \\\n [\'fractal.mk1_500tasks_te_real\',\n \'rlds.jaco_play\',\n \'rlds.bridge\',\n \'rlds.berkeley_cable_routing\',\n \'rlds.kuka\',\n \'language_table_augmented.language_table_kona\',\n \'rlds.roboturk\',\n \'rlds.viola\',\n \'rlds.nyu_door_opening_surprising_effectiveness\',\n \'rlds.berkeley_autolab_ur5\',\n \'rlds.taco_play\',\n \'rlds.toto\']\nDATASET_WEIGHTS = [150, 20, 50, 20, 20, 30, 10, 3, 5, 5, 5, 5]\nDEBUG_SUMMARIES = True\nLEARNING_RATE_ACTOR = 0.0001\nLOG_SUMMARY_INTERVAL = 280\nMAX_TRAINING_STEPS = 45000\nNUM_SHARDS_REVERB = 40\nPOLICY_CHECKPOINT_INTERVAL = 280\nREPLAY_BUFFER_BACKEND = None\nSEQUENCE_LENGTH = 15\nSPLITS = [\'train\']\nTF_AGENT_CLASS = @sequence_agent.SequenceAgent\nTRAIN_CHECKPOINT_INTERVAL = 280\nTRAIN_DIR = \\\n \'/cns/ib-d/home/quanhovuong-brain/quanhovuong/rs=6.3:sl=48M/ttl=12w:gc=0/xm_fractal_experiment_59470521/1/train\'\nTRAIN_LOG_DIR = \\\n \'/cns/ib-d/home/quanhovuong-brain/quanhovuong/rs=6.3:sl=48M/ttl=12w:gc=0/xm_fractal_experiment_59470521/1/train\'\nTRAIN_TABLE_NAME = None\nUSE_TCL = False\nVOCAB_SIZE = 512\nWORLD_VECTOR_LIMIT = 2.0\n\n# Parameters for google314._distort_proxy_images:\n# ==============================================================================\ngoogle314._distort_proxy_images.all_augs = False\ngoogle314._distort_proxy_images.use_cutout = False\n\n# Parameters for environment_specs.action_spec:\n# ==============================================================================\n# None.\n\n# Parameters for actor_policy.ActorPolicy:\n# ==============================================================================\nactor_policy.ActorPolicy.clip = True\nactor_policy.ActorPolicy.name = None\nactor_policy.ActorPolicy.observation_and_action_constraint_splitter = None\nactor_policy.ActorPolicy.observation_normalizer = None\nactor_policy.ActorPolicy.policy_state_spec = ()\n\n# Parameters for actor_optimizer/tf.keras.optimizers.Adam:\n# ==============================================================================\nactor_optimizer/tf.keras.optimizers.Adam.amsgrad = False\nactor_optimizer/tf.keras.optimizers.Adam.beta_1 = 0.9\nactor_optimizer/tf.keras.optimizers.Adam.beta_2 = 0.999\nactor_optimizer/tf.keras.optimizers.Adam.clipnorm = None\nactor_optimizer/tf.keras.optimizers.Adam.clipvalue = None\nactor_optimizer/tf.keras.optimizers.Adam.ema_momentum = 0.99\nactor_optimizer/tf.keras.optimizers.Adam.ema_overwrite_frequency = None\nactor_optimizer/tf.keras.optimizers.Adam.epsilon = 1e-07\nactor_optimizer/tf.keras.optimizers.Adam.global_clipnorm = None\nactor_optimizer/tf.keras.optimizers.Adam.jit_compile = True\nactor_optimizer/tf.keras.optimizers.Adam.learning_rate = %LEARNING_RATE_ACTOR\nactor_optimizer/tf.keras.optimizers.Adam.name = \'Adam\'\nactor_optimizer/tf.keras.optimizers.Adam.use_ema = False\nactor_optimizer/tf.keras.optimizers.Adam.weight_decay = None\n\n# Parameters for image_transformations.ApplyPhotometricImageDistortions:\n# ==============================================================================\nimage_transformations.ApplyPhotometricImageDistortions.lower_contrast = 0.5\nimage_transformations.ApplyPhotometricImageDistortions.lower_saturation = 0.5\nimage_transformations.ApplyPhotometricImageDistortions.max_delta_brightness = 0.125\nimage_transformations.ApplyPhotometricImageDistortions.max_delta_hue = 0.2\nimage_transformations.ApplyPhotometricImageDistortions.random_noise_apply_probability = \\\n 0.5\nimage_transformations.ApplyPhotometricImageDistortions.upper_contrast = 1.5\nimage_transformations.ApplyPhotometricImageDistortions.upper_saturation = 1.5\n\n# Parameters for preprocessors.convert_dtype_and_crop_images:\n# ==============================================================================\n# None.\n\n# Parameters for tf_agents_trainer.create_agent_and_specs:\n# ==============================================================================\n# None.\n\n# Parameters for tf_agents_trainer.create_and_mix_rds_datasets:\n# ==============================================================================\ntf_agents_trainer.create_and_mix_rds_datasets.fewer_datasets_than_weights = False\ntf_agents_trainer.create_and_mix_rds_datasets.prefetch = True\ntf_agents_trainer.create_and_mix_rds_datasets.sample_from_datasets = True\n\n# Parameters for google33.create_model_spec:\n# ==============================================================================\n# None.\n\n# Parameters for google34.create_rds_episode_input_pipelines:\n# ==============================================================================\ngoogle34.create_rds_episode_input_pipelines.rds_dataset_names = %DATASET_NAMES\ngoogle34.create_rds_episode_input_pipelines.splits = %SPLITS\ngoogle34.create_rds_episode_input_pipelines.traj_transforms_names = \\\n @google32.infer_trajectory_transform_names()\n\n# Parameters for google34.create_rds_input_pipeline:\n# ==============================================================================\ngoogle34.create_rds_input_pipeline.episode_ds_pipeline_fn = None\ngoogle34.create_rds_input_pipeline.repeat = True\ngoogle34.create_rds_input_pipeline.split_slice = \'\'\ngoogle34.create_rds_input_pipeline.use_replicated = True\n\n# Parameters for google34.create_rds_input_pipeline_for_registered_trajectory_transform:\n# ==============================================================================\ngoogle34.create_rds_input_pipeline_for_registered_trajectory_transform.allow_read_cached = \\\n True\n\n# Parameters for google34.create_rds_input_pipelines_for_registered_trajectory_transforms:\n# ==============================================================================\ngoogle34.create_rds_input_pipelines_for_registered_trajectory_transforms.episode_shuffle_buffer_size = \\\n 1\ngoogle34.create_rds_input_pipelines_for_registered_trajectory_transforms.rds_dataset_names = \\\n %DATASET_NAMES\ngoogle34.create_rds_input_pipelines_for_registered_trajectory_transforms.splits = \\\n %SPLITS\ngoogle34.create_rds_input_pipelines_for_registered_trajectory_transforms.traj_shuffle_buffer_size = \\\n 3328\ngoogle34.create_rds_input_pipelines_for_registered_trajectory_transforms.traj_transforms_names = \\\n @google32.infer_trajectory_transform_names()\n\n# Parameters for google35.create_traj_transform_bc_transformer:\n# ==============================================================================\ngoogle35.create_traj_transform_bc_transformer.spec_name = \'FractalRLDSSpecLean\'\ngoogle35.create_traj_transform_bc_transformer.use_half_transition = True\ngoogle35.create_traj_transform_bc_transformer.wrapper = None\n\n# Parameters for google35.create_traj_transform_bc_transformer_with_filters:\n# ==============================================================================\ngoogle35.create_traj_transform_bc_transformer_with_filters.spec_name = \\\n \'FractalRLDSSpecLean\'\ngoogle35.create_traj_transform_bc_transformer_with_filters.step_filters = None\ngoogle35.create_traj_transform_bc_transformer_with_filters.use_half_transition = \\\n True\n\n# Parameters for pretrained_efficientnet_encoder.EfficientNetEncoder:\n# ==============================================================================\npretrained_efficientnet_encoder.EfficientNetEncoder.freeze = False\npretrained_efficientnet_encoder.EfficientNetEncoder.include_top = False\npretrained_efficientnet_encoder.EfficientNetEncoder.model_variant = \'b3\'\npretrained_efficientnet_encoder.EfficientNetEncoder.weights = \'imagenet\'\n\n# Parameters for add_split_to_dataset/google315.elementwise_string_join:\n# ==============================================================================\nadd_split_to_dataset/google315.elementwise_string_join.left = %DATASET_NAMES\nadd_split_to_dataset/google315.elementwise_string_join.right = \\\n @add_split_to_dataset/google34.infer_split()\nadd_split_to_dataset/google315.elementwise_string_join.separator = \'_\'\n\n# Parameters for add_traj_transform_to_dataset/google315.elementwise_string_join:\n# ==============================================================================\nadd_traj_transform_to_dataset/google315.elementwise_string_join.left = \\\n @add_split_to_dataset/google315.elementwise_string_join()\nadd_traj_transform_to_dataset/google315.elementwise_string_join.right = \\\n @google32.infer_trajectory_transform_names()\nadd_traj_transform_to_dataset/google315.elementwise_string_join.separator = \'_\'\n\n# Parameters for google35.episode_to_steps:\n# ==============================================================================\ngoogle35.episode_to_steps.discount_rate = 0.98\ngoogle35.episode_to_steps.experience_sequence_length = 6\ngoogle35.episode_to_steps.learn_terminate_action = False\ngoogle35.episode_to_steps.step_filters = None\ngoogle35.episode_to_steps.use_goal_image = False\n\n# Parameters for action_tokenizer.FractalActionTokenizer:\n# ==============================================================================\n# None.\n\n# Parameters for observation_tokenizer.FractalObservationTokenizer:\n# ==============================================================================\nobservation_tokenizer.FractalObservationTokenizer.image_length = None\nobservation_tokenizer.FractalObservationTokenizer.image_width = None\nobservation_tokenizer.FractalObservationTokenizer.num_context_tokens = 1\nobservation_tokenizer.FractalObservationTokenizer.num_token_per_image = 8\nobservation_tokenizer.FractalObservationTokenizer.prepend_context_to_image = False\n\n# Parameters for google33.get_action_spec:\n# ==============================================================================\ngoogle33.get_action_spec.world_vector_limit = %WORLD_VECTOR_LIMIT\n\n# Parameters for google33.get_additional_state_images:\n# ==============================================================================\n# None.\n\n# Parameters for google33.get_additional_state_robot_states:\n# ==============================================================================\n# None.\n\n# Parameters for google33.get_navigation_spec:\n# ==============================================================================\ngoogle33.get_navigation_spec.base_displacement_vector_limit = 1.0\ngoogle33.get_navigation_spec.base_displacement_vertical_rotation_limit = \\\n 3.141592653589793\ngoogle33.get_navigation_spec.include_terminate = False\ngoogle33.get_navigation_spec.return_action_order_only = False\n\n# Parameters for google33.get_spec:\n# ==============================================================================\n# None.\n\n# Parameters for greedy_policy.GreedyPolicy:\n# ==============================================================================\ngreedy_policy.GreedyPolicy.name = None\n\n# Parameters for google34.infer_split:\n# ==============================================================================\ngoogle34.infer_split.default_split = \'train\'\n\n# Parameters for add_split_to_dataset/google34.infer_split:\n# ==============================================================================\nadd_split_to_dataset/google34.infer_split.default_split = \'train\'\nadd_split_to_dataset/google34.infer_split.rds_dataset_names = %DATASET_NAMES\nadd_split_to_dataset/google34.infer_split.splits = %SPLITS\n\n# Parameters for google32.infer_trajectory_transform_names:\n# ==============================================================================\ngoogle32.infer_trajectory_transform_names.base_transform_names = \\\n %BASE_TRANSFORM_NAMES\ngoogle32.infer_trajectory_transform_names.experience_sequence_length = \\\n %SEQUENCE_LENGTH\n\n# Parameters for add_traj_transform_to_dataset/google32.infer_trajectory_transform_names:\n# ==============================================================================\nadd_traj_transform_to_dataset/google32.infer_trajectory_transform_names.base_transform_names = \\\n %BASE_TRANSFORM_NAMES\nadd_traj_transform_to_dataset/google32.infer_trajectory_transform_names.experience_sequence_length = \\\n %SEQUENCE_LENGTH\n\n# Parameters for learner.Learner:\n# ==============================================================================\nlearner.Learner.after_train_strategy_step_fn = None\nlearner.Learner.experience_dataset_options = None\nlearner.Learner.max_checkpoints_to_keep = 3\nlearner.Learner.strategy_run_options = None\nlearner.Learner.summary_root_dir = None\nlearner.Learner.triggers = None\nlearner.Learner.use_kwargs_in_agent_train = False\n\n# Parameters for language_table.map_action_real:\n# ==============================================================================\nlanguage_table.map_action_real.world_vector_limit = %WORLD_VECTOR_LIMIT\n\n# Parameters for google314.ProxyPreProcessor:\n# ==============================================================================\ngoogle314.ProxyPreProcessor.image_history_len = None\n\n# Parameters for google3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.RDSFilterBuilder:\n# ==============================================================================\ngoogle3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.RDSFilterBuilder.filter_negative_rewards = \\\n False\ngoogle3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.RDSFilterBuilder.filter_small_action_threshold = \\\n None\ngoogle3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.RDSFilterBuilder.fractal_dataset_registry_legacy_dataset_name = \\\n None\ngoogle3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.RDSFilterBuilder.fractal_project_name_pattern = \\\n None\ngoogle3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.RDSFilterBuilder.positive_filter = \\\n False\n\n# Parameters for sequence_agent.SequenceAgent:\n# ==============================================================================\nsequence_agent.SequenceAgent.action_spec = @environment_specs.action_spec()\nsequence_agent.SequenceAgent.actor_network = %ACTOR_NETWORK\nsequence_agent.SequenceAgent.actor_optimizer = %ACTOR_OPTIMIZER\nsequence_agent.SequenceAgent.debug_summaries = %DEBUG_SUMMARIES\nsequence_agent.SequenceAgent.time_sequence_length = %SEQUENCE_LENGTH\nsequence_agent.SequenceAgent.time_step_spec = @environment_specs.time_step_spec()\nsequence_agent.SequenceAgent.use_tcl = %USE_TCL\n\n# Parameters for google3.robotics.learning.task_explore.tasks.fractal.data.rds.filters.small_actions_step_filter:\n# ==============================================================================\n# None.\n\n# Parameters for add_split_to_dataset/google315.string_join:\n# ==============================================================================\n# None.\n\n# Parameters for add_traj_transform_to_dataset/google315.string_join:\n# ==============================================================================\n# None.\n\n# Parameters for google35.tfa_transition_builder:\n# ==============================================================================\n# None.\n\n# Parameters for environment_specs.time_step_spec:\n# ==============================================================================\n# None.\n\n# Parameters for tf_agents_trainer.train:\n# ==============================================================================\ntf_agents_trainer.train.batch_size = %BATCH_SIZE\ntf_agents_trainer.train.data_spec_create_fn = @google33.create_model_spec\ntf_agents_trainer.train.data_spec_path = \\\n \'/cns/ib-d/home/quanhovuong-brain/quanhovuong/rs=6.3:sl=48M/ttl=12w:gc=0/xm_fractal_experiment_59470521/1/train/collect_policy/environment_specs.textproto\'\ntf_agents_trainer.train.enable_xla = False\ntf_agents_trainer.train.experience_sequence_length = %SEQUENCE_LENGTH\ntf_agents_trainer.train.flywheel_tick_counter_ctor = None\ntf_agents_trainer.train.in_graph_bellman_update = False\ntf_agents_trainer.train.log_interval = %LOG_SUMMARY_INTERVAL\ntf_agents_trainer.train.num_shards = %NUM_SHARDS_REVERB\ntf_agents_trainer.train.number_training_steps = %MAX_TRAINING_STEPS\ntf_agents_trainer.train.policy_checkpoint_interval = %POLICY_CHECKPOINT_INTERVAL\ntf_agents_trainer.train.pull_buffer_names = \\\n @add_traj_transform_to_dataset/google315.elementwise_string_join()\ntf_agents_trainer.train.pull_buffer_weights = %DATASET_WEIGHTS\ntf_agents_trainer.train.rds_datasets_create_fn = \\\n @google34.create_rds_input_pipelines_for_registered_trajectory_transforms()\ntf_agents_trainer.train.rds_episode_datasets_create_fn = \\\n @google34.create_rds_episode_input_pipelines()\ntf_agents_trainer.train.replay_buffer_backend = %REPLAY_BUFFER_BACKEND\ntf_agents_trainer.train.replay_buffer_name = %TRAIN_TABLE_NAME\ntf_agents_trainer.train.saved_model_policy_wrapper = None\ntf_agents_trainer.train.summary_episode_replay_buffer_names = \\\n @add_traj_transform_to_dataset/google315.elementwise_string_join()\ntf_agents_trainer.train.summary_interval = %LOG_SUMMARY_INTERVAL\ntf_agents_trainer.train.tf_agent_class = %TF_AGENT_CLASS\ntf_agents_trainer.train.train_checkpoint_interval = %TRAIN_CHECKPOINT_INTERVAL\ntf_agents_trainer.train.train_dir = %TRAIN_DIR\ntf_agents_trainer.train.train_log_dir = %TRAIN_LOG_DIR\ntf_agents_trainer.train.train_summary = False\ntf_agents_trainer.train.train_summary_episode = False\ntf_agents_trainer.train.train_summary_load_latest_ckpt = True\ntf_agents_trainer.train.warm_start_dir = None\n\n# Parameters for transformer_network.TransformerNetwork:\n# ==============================================================================\ntransformer_network.TransformerNetwork.action_order = %ACTION_ORDER\ntransformer_network.TransformerNetwork.action_scales = [%VOCAB_SIZE]\ntransformer_network.TransformerNetwork.continuous_robot_state_features = ()\ntransformer_network.TransformerNetwork.crop_size = %CROP_SIZE\ntransformer_network.TransformerNetwork.dropout_rate = 0.1\ntransformer_network.TransformerNetwork.feed_forward_size = 512\ntransformer_network.TransformerNetwork.image_patch_size = 16\ntransformer_network.TransformerNetwork.image_position_embedding = \\\n %google3.robotics.learning.task_explore.tasks.fractal.tf_agents.observation_tokenizer.PositionEmbeddingType.NONE\ntransformer_network.TransformerNetwork.image_token_encoder = \\\n %google3.robotics.learning.task_explore.tasks.fractal.tf_agents.observation_tokenizer.EncoderType.FILM_PRETRAINED_EFFICIENT_NET\ntransformer_network.TransformerNetwork.images_to_use = (\'image\',)\ntransformer_network.TransformerNetwork.include_prev_timesteps_actions = False\ntransformer_network.TransformerNetwork.include_same_timestep_prev_action_dimensions = \\\n False\ntransformer_network.TransformerNetwork.inference_time_return_discount_factor = 0.98\ntransformer_network.TransformerNetwork.layer_size = 256\ntransformer_network.TransformerNetwork.num_heads = 8\ntransformer_network.TransformerNetwork.num_layers = 8\ntransformer_network.TransformerNetwork.output_tokens_per_frame = None\ntransformer_network.TransformerNetwork.return_attention_scores = False\ntransformer_network.TransformerNetwork.return_optimality_weight = 0.1\ntransformer_network.TransformerNetwork.return_top_percentile = 85\ntransformer_network.TransformerNetwork.return_vocab_size = 128\ntransformer_network.TransformerNetwork.stack_images = False\ntransformer_network.TransformerNetwork.state_features = None\ntransformer_network.TransformerNetwork.tcl_weight = 0.05\ntransformer_network.TransformerNetwork.token_embedding_size = 512\ntransformer_network.TransformerNetwork.use_token_learner = False\ntransformer_network.TransformerNetwork.vocab_size = %VOCAB_SIZE\n"
|
ckpt-2378490.data-00000-of-00001
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7fd0fc96dee879acfcd0e51e64fe3e37618dd3cbe36392ee808965bad8db4897
|
3 |
+
size 635858175
|
ckpt-2378490.index
ADDED
Binary file (34 kB). View file
|
|
fingerprint.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9577ac113da254576b3e2124429da5a5124d5931a99a69960fbfd91503a55d2e
|
3 |
+
size 56
|
policy_specs.pbtxt
ADDED
@@ -0,0 +1,422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dict_value {
|
2 |
+
fields {
|
3 |
+
key: "collect_data_spec"
|
4 |
+
value {
|
5 |
+
named_tuple_value {
|
6 |
+
name: "Trajectory"
|
7 |
+
values {
|
8 |
+
key: "step_type"
|
9 |
+
value {
|
10 |
+
tensor_spec_value {
|
11 |
+
name: "step_type"
|
12 |
+
shape {
|
13 |
+
}
|
14 |
+
dtype: DT_INT32
|
15 |
+
}
|
16 |
+
}
|
17 |
+
}
|
18 |
+
values {
|
19 |
+
key: "observation"
|
20 |
+
value {
|
21 |
+
dict_value {
|
22 |
+
fields {
|
23 |
+
key: "image"
|
24 |
+
value {
|
25 |
+
bounded_tensor_spec_value {
|
26 |
+
name: "image"
|
27 |
+
shape {
|
28 |
+
dim {
|
29 |
+
size: 256
|
30 |
+
}
|
31 |
+
dim {
|
32 |
+
size: 320
|
33 |
+
}
|
34 |
+
dim {
|
35 |
+
size: 3
|
36 |
+
}
|
37 |
+
}
|
38 |
+
dtype: DT_UINT8
|
39 |
+
minimum {
|
40 |
+
dtype: DT_UINT8
|
41 |
+
tensor_shape {
|
42 |
+
}
|
43 |
+
int_val: 0
|
44 |
+
}
|
45 |
+
maximum {
|
46 |
+
dtype: DT_UINT8
|
47 |
+
tensor_shape {
|
48 |
+
}
|
49 |
+
int_val: 1
|
50 |
+
}
|
51 |
+
}
|
52 |
+
}
|
53 |
+
}
|
54 |
+
fields {
|
55 |
+
key: "natural_language_embedding"
|
56 |
+
value {
|
57 |
+
tensor_spec_value {
|
58 |
+
name: "natural_language_embedding"
|
59 |
+
shape {
|
60 |
+
dim {
|
61 |
+
size: 512
|
62 |
+
}
|
63 |
+
}
|
64 |
+
dtype: DT_FLOAT
|
65 |
+
}
|
66 |
+
}
|
67 |
+
}
|
68 |
+
fields {
|
69 |
+
key: "natural_language_instruction"
|
70 |
+
value {
|
71 |
+
tensor_spec_value {
|
72 |
+
name: "natural_language_instruction"
|
73 |
+
shape {
|
74 |
+
}
|
75 |
+
dtype: DT_STRING
|
76 |
+
}
|
77 |
+
}
|
78 |
+
}
|
79 |
+
}
|
80 |
+
}
|
81 |
+
}
|
82 |
+
values {
|
83 |
+
key: "action"
|
84 |
+
value {
|
85 |
+
dict_value {
|
86 |
+
fields {
|
87 |
+
key: "base_displacement_vector"
|
88 |
+
value {
|
89 |
+
bounded_tensor_spec_value {
|
90 |
+
name: "base_displacement_vector"
|
91 |
+
shape {
|
92 |
+
dim {
|
93 |
+
size: 2
|
94 |
+
}
|
95 |
+
}
|
96 |
+
dtype: DT_FLOAT
|
97 |
+
minimum {
|
98 |
+
dtype: DT_FLOAT
|
99 |
+
tensor_shape {
|
100 |
+
}
|
101 |
+
float_val: -1.0
|
102 |
+
}
|
103 |
+
maximum {
|
104 |
+
dtype: DT_FLOAT
|
105 |
+
tensor_shape {
|
106 |
+
}
|
107 |
+
float_val: 1.0
|
108 |
+
}
|
109 |
+
}
|
110 |
+
}
|
111 |
+
}
|
112 |
+
fields {
|
113 |
+
key: "base_displacement_vertical_rotation"
|
114 |
+
value {
|
115 |
+
bounded_tensor_spec_value {
|
116 |
+
name: "base_displacement_vertical_rotation"
|
117 |
+
shape {
|
118 |
+
dim {
|
119 |
+
size: 1
|
120 |
+
}
|
121 |
+
}
|
122 |
+
dtype: DT_FLOAT
|
123 |
+
minimum {
|
124 |
+
dtype: DT_FLOAT
|
125 |
+
tensor_shape {
|
126 |
+
}
|
127 |
+
float_val: -3.1415927
|
128 |
+
}
|
129 |
+
maximum {
|
130 |
+
dtype: DT_FLOAT
|
131 |
+
tensor_shape {
|
132 |
+
}
|
133 |
+
float_val: 3.1415927
|
134 |
+
}
|
135 |
+
}
|
136 |
+
}
|
137 |
+
}
|
138 |
+
fields {
|
139 |
+
key: "gripper_closedness_action"
|
140 |
+
value {
|
141 |
+
bounded_tensor_spec_value {
|
142 |
+
name: "gripper_closedness_action"
|
143 |
+
shape {
|
144 |
+
dim {
|
145 |
+
size: 1
|
146 |
+
}
|
147 |
+
}
|
148 |
+
dtype: DT_FLOAT
|
149 |
+
minimum {
|
150 |
+
dtype: DT_FLOAT
|
151 |
+
tensor_shape {
|
152 |
+
}
|
153 |
+
float_val: -1.0
|
154 |
+
}
|
155 |
+
maximum {
|
156 |
+
dtype: DT_FLOAT
|
157 |
+
tensor_shape {
|
158 |
+
}
|
159 |
+
float_val: 1.0
|
160 |
+
}
|
161 |
+
}
|
162 |
+
}
|
163 |
+
}
|
164 |
+
fields {
|
165 |
+
key: "rotation_delta"
|
166 |
+
value {
|
167 |
+
bounded_tensor_spec_value {
|
168 |
+
name: "rotation_delta"
|
169 |
+
shape {
|
170 |
+
dim {
|
171 |
+
size: 3
|
172 |
+
}
|
173 |
+
}
|
174 |
+
dtype: DT_FLOAT
|
175 |
+
minimum {
|
176 |
+
dtype: DT_FLOAT
|
177 |
+
tensor_shape {
|
178 |
+
}
|
179 |
+
float_val: -1.5707964
|
180 |
+
}
|
181 |
+
maximum {
|
182 |
+
dtype: DT_FLOAT
|
183 |
+
tensor_shape {
|
184 |
+
}
|
185 |
+
float_val: 1.5707964
|
186 |
+
}
|
187 |
+
}
|
188 |
+
}
|
189 |
+
}
|
190 |
+
fields {
|
191 |
+
key: "terminate_episode"
|
192 |
+
value {
|
193 |
+
bounded_tensor_spec_value {
|
194 |
+
name: "terminate_episode"
|
195 |
+
shape {
|
196 |
+
dim {
|
197 |
+
size: 3
|
198 |
+
}
|
199 |
+
}
|
200 |
+
dtype: DT_INT32
|
201 |
+
minimum {
|
202 |
+
dtype: DT_INT32
|
203 |
+
tensor_shape {
|
204 |
+
}
|
205 |
+
int_val: 0
|
206 |
+
}
|
207 |
+
maximum {
|
208 |
+
dtype: DT_INT32
|
209 |
+
tensor_shape {
|
210 |
+
}
|
211 |
+
int_val: 1
|
212 |
+
}
|
213 |
+
}
|
214 |
+
}
|
215 |
+
}
|
216 |
+
fields {
|
217 |
+
key: "world_vector"
|
218 |
+
value {
|
219 |
+
bounded_tensor_spec_value {
|
220 |
+
name: "world_vector"
|
221 |
+
shape {
|
222 |
+
dim {
|
223 |
+
size: 3
|
224 |
+
}
|
225 |
+
}
|
226 |
+
dtype: DT_FLOAT
|
227 |
+
minimum {
|
228 |
+
dtype: DT_FLOAT
|
229 |
+
tensor_shape {
|
230 |
+
}
|
231 |
+
float_val: -2.0
|
232 |
+
}
|
233 |
+
maximum {
|
234 |
+
dtype: DT_FLOAT
|
235 |
+
tensor_shape {
|
236 |
+
}
|
237 |
+
float_val: 2.0
|
238 |
+
}
|
239 |
+
}
|
240 |
+
}
|
241 |
+
}
|
242 |
+
}
|
243 |
+
}
|
244 |
+
}
|
245 |
+
values {
|
246 |
+
key: "policy_info"
|
247 |
+
value {
|
248 |
+
dict_value {
|
249 |
+
fields {
|
250 |
+
key: "discounted_return"
|
251 |
+
value {
|
252 |
+
bounded_tensor_spec_value {
|
253 |
+
name: "discounted_return"
|
254 |
+
shape {
|
255 |
+
}
|
256 |
+
dtype: DT_FLOAT
|
257 |
+
minimum {
|
258 |
+
dtype: DT_FLOAT
|
259 |
+
tensor_shape {
|
260 |
+
}
|
261 |
+
float_val: 0.0
|
262 |
+
}
|
263 |
+
maximum {
|
264 |
+
dtype: DT_FLOAT
|
265 |
+
tensor_shape {
|
266 |
+
}
|
267 |
+
float_val: 1.0
|
268 |
+
}
|
269 |
+
}
|
270 |
+
}
|
271 |
+
}
|
272 |
+
fields {
|
273 |
+
key: "return"
|
274 |
+
value {
|
275 |
+
bounded_tensor_spec_value {
|
276 |
+
name: "return"
|
277 |
+
shape {
|
278 |
+
}
|
279 |
+
dtype: DT_FLOAT
|
280 |
+
minimum {
|
281 |
+
dtype: DT_FLOAT
|
282 |
+
tensor_shape {
|
283 |
+
}
|
284 |
+
float_val: 0.0
|
285 |
+
}
|
286 |
+
maximum {
|
287 |
+
dtype: DT_FLOAT
|
288 |
+
tensor_shape {
|
289 |
+
}
|
290 |
+
float_val: 1.0
|
291 |
+
}
|
292 |
+
}
|
293 |
+
}
|
294 |
+
}
|
295 |
+
}
|
296 |
+
}
|
297 |
+
}
|
298 |
+
values {
|
299 |
+
key: "next_step_type"
|
300 |
+
value {
|
301 |
+
tensor_spec_value {
|
302 |
+
name: "step_type"
|
303 |
+
shape {
|
304 |
+
}
|
305 |
+
dtype: DT_INT32
|
306 |
+
}
|
307 |
+
}
|
308 |
+
}
|
309 |
+
values {
|
310 |
+
key: "reward"
|
311 |
+
value {
|
312 |
+
tensor_spec_value {
|
313 |
+
name: "reward"
|
314 |
+
shape {
|
315 |
+
}
|
316 |
+
dtype: DT_FLOAT
|
317 |
+
}
|
318 |
+
}
|
319 |
+
}
|
320 |
+
values {
|
321 |
+
key: "discount"
|
322 |
+
value {
|
323 |
+
bounded_tensor_spec_value {
|
324 |
+
name: "discount"
|
325 |
+
shape {
|
326 |
+
}
|
327 |
+
dtype: DT_FLOAT
|
328 |
+
minimum {
|
329 |
+
dtype: DT_FLOAT
|
330 |
+
tensor_shape {
|
331 |
+
}
|
332 |
+
float_val: 0.0
|
333 |
+
}
|
334 |
+
maximum {
|
335 |
+
dtype: DT_FLOAT
|
336 |
+
tensor_shape {
|
337 |
+
}
|
338 |
+
float_val: 1.0
|
339 |
+
}
|
340 |
+
}
|
341 |
+
}
|
342 |
+
}
|
343 |
+
}
|
344 |
+
}
|
345 |
+
}
|
346 |
+
fields {
|
347 |
+
key: "policy_state_spec"
|
348 |
+
value {
|
349 |
+
dict_value {
|
350 |
+
fields {
|
351 |
+
key: "action_tokens"
|
352 |
+
value {
|
353 |
+
tensor_spec_value {
|
354 |
+
name: "action_tokens"
|
355 |
+
shape {
|
356 |
+
dim {
|
357 |
+
size: 15
|
358 |
+
}
|
359 |
+
dim {
|
360 |
+
size: 11
|
361 |
+
}
|
362 |
+
dim {
|
363 |
+
size: 1
|
364 |
+
}
|
365 |
+
dim {
|
366 |
+
size: 1
|
367 |
+
}
|
368 |
+
}
|
369 |
+
dtype: DT_INT32
|
370 |
+
}
|
371 |
+
}
|
372 |
+
}
|
373 |
+
fields {
|
374 |
+
key: "context_image_tokens"
|
375 |
+
value {
|
376 |
+
tensor_spec_value {
|
377 |
+
name: "context_image_tokens"
|
378 |
+
shape {
|
379 |
+
dim {
|
380 |
+
size: 15
|
381 |
+
}
|
382 |
+
dim {
|
383 |
+
size: 81
|
384 |
+
}
|
385 |
+
dim {
|
386 |
+
size: 1
|
387 |
+
}
|
388 |
+
dim {
|
389 |
+
size: 512
|
390 |
+
}
|
391 |
+
}
|
392 |
+
dtype: DT_FLOAT
|
393 |
+
}
|
394 |
+
}
|
395 |
+
}
|
396 |
+
fields {
|
397 |
+
key: "seq_idx"
|
398 |
+
value {
|
399 |
+
tensor_spec_value {
|
400 |
+
name: "seq_idx"
|
401 |
+
shape {
|
402 |
+
dim {
|
403 |
+
size: 1
|
404 |
+
}
|
405 |
+
dim {
|
406 |
+
size: 1
|
407 |
+
}
|
408 |
+
dim {
|
409 |
+
size: 1
|
410 |
+
}
|
411 |
+
dim {
|
412 |
+
size: 1
|
413 |
+
}
|
414 |
+
}
|
415 |
+
dtype: DT_INT32
|
416 |
+
}
|
417 |
+
}
|
418 |
+
}
|
419 |
+
}
|
420 |
+
}
|
421 |
+
}
|
422 |
+
}
|
saved_model.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ee418cab500da16a982fdddcc83a49357e5a48d5c44af1c30df8f52653f573f0
|
3 |
+
size 21890516
|
variables/variables.data-00000-of-00001
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2bbce09cb3d43ddef4dc7ae56f03755a777fa4e19350f6b6917df5157c0cc99d
|
3 |
+
size 212549757
|
variables/variables.index
ADDED
Binary file (13.3 kB). View file
|
|