enemy7 commited on
Commit
fa4df14
1 Parent(s): 74a99d7

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +5 -6
main.py CHANGED
@@ -73,24 +73,23 @@ annotation_folder = '/annotations/'
73
  if not os.path.exists(os.path.abspath('.') + annotation_folder):
74
  annotation_zip = tf.keras.utils.get_file('captions.zip',
75
  cache_subdir=os.path.abspath('.'),
76
- origin='http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
77
  extract=True)
78
- annotation_file = os.path.dirname(annotation_zip)+'/annotations/captions_train2017.json'
79
  os.remove(annotation_zip)
80
 
81
  # Download image files
82
- image_folder = '/train2017/'
83
  if not os.path.exists(os.path.abspath('.') + image_folder):
84
- image_zip = tf.keras.utils.get_file('train2017.zip',
85
  cache_subdir=os.path.abspath('.'),
86
- origin='http://images.cocodataset.org/zips/train2017.zip',
87
  extract=True)
88
  PATH = os.path.dirname(image_zip) + image_folder
89
  os.remove(image_zip)
90
  else:
91
  PATH = os.path.abspath('.') + image_folder
92
 
93
- PATH
94
 
95
  """## Optional: limit the size of the training set
96
  To speed up training for this tutorial, you'll use a subset of 30,000 captions and their corresponding images to train your model. Choosing to use more data would result in improved captioning quality.
 
73
  if not os.path.exists(os.path.abspath('.') + annotation_folder):
74
  annotation_zip = tf.keras.utils.get_file('captions.zip',
75
  cache_subdir=os.path.abspath('.'),
76
+ origin='http://images.cocodataset.org/annotations/annotations_trainval2014.zip',
77
  extract=True)
78
+ annotation_file = os.path.dirname(annotation_zip)+'/annotations/captions_train2014.json'
79
  os.remove(annotation_zip)
80
 
81
  # Download image files
82
+ image_folder = '/train2014/'
83
  if not os.path.exists(os.path.abspath('.') + image_folder):
84
+ image_zip = tf.keras.utils.get_file('train2014.zip',
85
  cache_subdir=os.path.abspath('.'),
86
+ origin='http://images.cocodataset.org/zips/train2014.zip',
87
  extract=True)
88
  PATH = os.path.dirname(image_zip) + image_folder
89
  os.remove(image_zip)
90
  else:
91
  PATH = os.path.abspath('.') + image_folder
92
 
 
93
 
94
  """## Optional: limit the size of the training set
95
  To speed up training for this tutorial, you'll use a subset of 30,000 captions and their corresponding images to train your model. Choosing to use more data would result in improved captioning quality.