<!DOCTYPE html>
<!-- saved from url=(0044)https://www.tinymind.com/executions/zremsqku -->
<html lang="zh_CN"><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
    
    <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
    <meta name="keywords" content="ai,ml,tinymind,machine learning,deep learning,artificial intelligence,tensorflow,neural network">
    <meta name="url" content="https://www.tinymind.com">
    <meta name="apple-mobile-web-app-capable" content="yes">
    <meta name="apple-mobile-web-app-status-bar-style" content="white">
    <meta name="theme-color" content="#2991ff">
    <meta property="og:url" content="https://www.tinymind.com">
    <meta property="og:type" content="website">
    <meta property="og:site_name" content="TinyMind">
<meta property="og:title" content="运行 - TinyMind">
<meta property="og:image" content="https://danqing.github.io/tms/ogp/exec.jpg">
    <link rel="apple-touch-icon" href="https://danqing.github.io/tms/logo/logo144.png">
    <link rel="icon" type="image/png" sizes="32x32" href="https://danqing.github.io/tms/logo/logo32.png">
    <link rel="stylesheet" href="./w8_densenet_files/main-f549792a89.css">
    <script async="" src="./w8_densenet_files/analytics.js.下载"></script><script src="./w8_densenet_files/icons-7f67bb92d6.js.下载"></script>
    <script src="./w8_densenet_files/fa-b779e2e670.js.下载"></script>
    <script src="./w8_densenet_files/react-fb466d65d1.js.下载"></script>
    <script src="./w8_densenet_files/raven.min.js.下载" crossorigin="anonymous"></script>
    <script>(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-76540503-9', 'auto');
ga('send', 'pageview');
    </script>
    <script>Raven.config('https://a64a7462e2a847a2a0819a529284ddf8@sentry.io/1226794').install()
    </script>

<link rel="stylesheet" href="./w8_densenet_files/resource-fbb7ff89f3.css">
<link rel="stylesheet" href="./w8_densenet_files/profile-c24522516a.css">
<title>运行 - TinyMind</title>
  <style type="text/css">.fa-fw,.fa-layers,.fa-li{text-align:center}.fa-layers,.fa-stack,.svg-inline--fa{display:inline-block}.svg-inline--fa{font-size:inherit;height:1em;overflow:visible;vertical-align:-12.5%}.svg-inline--fa.fa-lg{vertical-align:-25%}.svg-inline--fa.fa-w-1{width:.0625em}.svg-inline--fa.fa-w-2{width:.125em}.svg-inline--fa.fa-w-3{width:.1875em}.svg-inline--fa.fa-w-4{width:.25em}.svg-inline--fa.fa-w-5{width:.3125em}.svg-inline--fa.fa-w-6{width:.375em}.svg-inline--fa.fa-w-7{width:.4375em}.svg-inline--fa.fa-w-8{width:.5em}.svg-inline--fa.fa-w-9{width:.5625em}.svg-inline--fa.fa-w-10{width:.625em}.svg-inline--fa.fa-w-11{width:.6875em}.svg-inline--fa.fa-w-12{width:.75em}.svg-inline--fa.fa-w-13{width:.8125em}.svg-inline--fa.fa-w-14{width:.875em}.svg-inline--fa.fa-w-15{width:.9375em}.svg-inline--fa.fa-w-16{width:1em}.svg-inline--fa.fa-w-17{width:1.0625em}.svg-inline--fa.fa-w-18{width:1.125em}.svg-inline--fa.fa-w-19{width:1.1875em}.svg-inline--fa.fa-w-20{width:1.25em}.svg-inline--fa.fa-pull-left{margin-right:.3em;width:auto}.svg-inline--fa.fa-pull-right{margin-left:.3em;width:auto}.svg-inline--fa.fa-border{height:1.5em}.svg-inline--fa.fa-li{top:auto;width:1.875em}.svg-inline--fa.fa-fw{width:1.25em}.fa-layers svg.svg-inline--fa{bottom:0;left:0;margin:auto;position:absolute;right:0;top:0;-webkit-transform-origin:center center;transform-origin:center center}.fa-layers{height:1em;position:relative;vertical-align:-12.5%;width:1em}.fa-layers-counter,.fa-layers-text{display:inline-block;position:absolute;text-align:center}.fa-layers-text{left:50%;top:50%;-webkit-transform:translate(-50%,-50%);transform:translate(-50%,-50%);-webkit-transform-origin:center center;transform-origin:center center}.fa-layers-counter{background-color:#ff253a;border-radius:1em;color:#fff;height:1.5em;line-height:1;max-width:5em;min-width:1.5em;overflow:hidden;padding:.25em;right:0;text-overflow:ellipsis;top:0;-webkit-transform:scale(.25);transform:scale(.25);-webkit-transform-origin:top right;transform-origin:top right}.fa-layers-bottom-right{bottom:0;right:0;top:auto;-webkit-transform:scale(.25);transform:scale(.25);-webkit-transform-origin:bottom right;transform-origin:bottom right}.fa-layers-bottom-left{bottom:0;left:0;right:auto;top:auto;-webkit-transform:scale(.25);transform:scale(.25);-webkit-transform-origin:bottom left;transform-origin:bottom left}.fa-layers-top-right{right:0;top:0;-webkit-transform:scale(.25);transform:scale(.25);-webkit-transform-origin:top right;transform-origin:top right}.fa-layers-top-left{left:0;right:auto;top:0;-webkit-transform:scale(.25);transform:scale(.25);-webkit-transform-origin:top left;transform-origin:top left}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-xs{font-size:.75em}.fa-sm{font-size:.875em}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-fw{width:1.25em}.fa-ul{list-style-type:none;margin-left:1.875em;padding-left:0}.fa-ul>li{position:relative}.fa-li{left:-1.875em;position:absolute;top:.14286em;width:1.875em}.fa-li.fa-lg{left:-1.625em}.fa-border{border:.08em solid #eee;border-radius:.1em;padding:.2em .25em .15em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.fab.fa-pull-left,.fal.fa-pull-left,.far.fa-pull-left,.fas.fa-pull-left{margin-right:.3em}.fa.fa-pull-right,.fab.fa-pull-right,.fal.fa-pull-right,.far.fa-pull-right,.fas.fa-pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scale(-1,1);transform:scale(-1,1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scale(1,-1);transform:scale(1,-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-rotate-90{-webkit-filter:none;filter:none}.fa-stack{height:2em;position:relative;width:2em}.fa-stack-1x,.fa-stack-2x{bottom:0;left:0;margin:auto;position:absolute;right:0;top:0}.svg-inline--fa.fa-stack-1x{height:1em;width:1em}.svg-inline--fa.fa-stack-2x{height:2em;width:2em}.fa-inverse{color:#fff}.sr-only{border:0;clip:rect(0,0,0,0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.sr-only-focusable:active,.sr-only-focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}</style></head>
  <body>
    <div id="page-wrapper">
      <div id="page-header">
<nav id="top-nav" class=" loggedin">
  <div id="mobile-bg"></div>
  <div class="wrapper"><a href="https://www.tinymind.com/" id="top-nav-logo">
      <svg viewBox="0 0 32 32" width="32px" height="32px">
        <path fill="currentColor" d="M15.9903258,25.473124 C17.5442842,25.8379319 19.2952287,25.9414015 20.4117212,25.9063682 C22.7957212,25.831563 23.9161368,24.0769656 24.7502147,21.8423682 C25.7473103,19.1699033 24.0150367,17.6240943 20.8700531,17.9022629 C21.5576564,16.6416323 22.0814147,15.436817 22.3441102,14.5552276 C23.2910193,11.3774613 22.355331,9.70743535 19.7836946,7.61392886 C16.9585518,5.31387691 13.4524739,4.75533146 11.5310193,11.203747 C11.1575851,12.4570503 10.9498439,14.5901716 11.0301114,16.7723149 C10.7027811,15.0503749 10.5532914,13.4253749 10.5731206,12.3629326 C10.6055362,10.6133222 10.8694323,9.36324432 11.2723414,8.37768588 C11.2619518,8.38807549 11.1524453,8.34610146 10.9286531,8.3317638 C7.59712061,8.11732224 4.68325048,9.56459497 6.67826346,15.3989846 C7.27971371,17.1584286 9.30498631,20.2852986 11.4451685,22.2223594 L6.89027663,18.9646326 C6.75541949,18.868217 6.61744546,18.9033339 6.65588702,19.0236456 C6.90565326,19.8047365 8.75957533,22.1997495 11.6385364,24.2589702 C14.5177052,26.318191 17.4623286,27.3552819 18.2824844,27.3392819 C18.4084065,27.3365806 18.3971857,27.1946585 18.2625364,27.098243 L15.9903258,25.473124 Z M32,16 C32,24.8365714 24.8363636,32 16,32 C7.16342857,32 0,24.8365714 0,16 C0,7.16342857 7.16342857,0 16,0 C24.8363636,0 32,7.16342857 32,16 Z"></path>
      </svg><span>TinyMind</span></a>
    <div id="top-nav-hamburger"><span></span><span></span><span></span>
    </div>
    <div class="nav-entries">
      <ul>
        <li><a href="https://www.tinymind.com/models"><span>模型</span></a>
        </li>
        <li><a href="https://www.tinymind.com/datasets"><span>数据集</span></a>
        </li>
        <li><a href="https://www.tinymind.com/notebooks"><span>Notebooks</span></a>
        </li>
        <li><a href="https://www.tinymind.cn/"><span>TinyMind.cn</span></a>
        </li>
        <li id="search-wrapper" class="search">
          <div class="a"><span>搜索</span>
          </div><svg class="svg-inline--fa fa-search fa-w-16 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="search" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M508.5 468.9L387.1 347.5c-2.3-2.3-5.3-3.5-8.5-3.5h-13.2c31.5-36.5 50.6-84 50.6-136C416 93.1 322.9 0 208 0S0 93.1 0 208s93.1 208 208 208c52 0 99.5-19.1 136-50.6v13.2c0 3.2 1.3 6.2 3.5 8.5l121.4 121.4c4.7 4.7 12.3 4.7 17 0l22.6-22.6c4.7-4.7 4.7-12.3 0-17zM208 368c-88.4 0-160-71.6-160-160S119.6 48 208 48s160 71.6 160 160-71.6 160-160 160z"></path></svg><!-- <i class="fa fa-fw fa-search"></i> -->
        </li>
      </ul>
    </div>
    <div class="nav-entries">
      <ul>
        <li><a id="nav-username" href="https://www.tinymind.com/evolution23"><span>evolution23</span></a>
        </li>
        <li class="narrow"><a href="https://www.tinymind.com/settings" title="设置"><span class="mobile-only">设置</span><svg class="svg-inline--fa fa-cog fa-w-16 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="cog" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M452.515 237l31.843-18.382c9.426-5.441 13.996-16.542 11.177-27.054-11.404-42.531-33.842-80.547-64.058-110.797-7.68-7.688-19.575-9.246-28.985-3.811l-31.785 18.358a196.276 196.276 0 0 0-32.899-19.02V39.541a24.016 24.016 0 0 0-17.842-23.206c-41.761-11.107-86.117-11.121-127.93-.001-10.519 2.798-17.844 12.321-17.844 23.206v36.753a196.276 196.276 0 0 0-32.899 19.02l-31.785-18.358c-9.41-5.435-21.305-3.877-28.985 3.811-30.216 30.25-52.654 68.265-64.058 110.797-2.819 10.512 1.751 21.613 11.177 27.054L59.485 237a197.715 197.715 0 0 0 0 37.999l-31.843 18.382c-9.426 5.441-13.996 16.542-11.177 27.054 11.404 42.531 33.842 80.547 64.058 110.797 7.68 7.688 19.575 9.246 28.985 3.811l31.785-18.358a196.202 196.202 0 0 0 32.899 19.019v36.753a24.016 24.016 0 0 0 17.842 23.206c41.761 11.107 86.117 11.122 127.93.001 10.519-2.798 17.844-12.321 17.844-23.206v-36.753a196.34 196.34 0 0 0 32.899-19.019l31.785 18.358c9.41 5.435 21.305 3.877 28.985-3.811 30.216-30.25 52.654-68.266 64.058-110.797 2.819-10.512-1.751-21.613-11.177-27.054L452.515 275c1.22-12.65 1.22-25.35 0-38zm-52.679 63.019l43.819 25.289a200.138 200.138 0 0 1-33.849 58.528l-43.829-25.309c-31.984 27.397-36.659 30.077-76.168 44.029v50.599a200.917 200.917 0 0 1-67.618 0v-50.599c-39.504-13.95-44.196-16.642-76.168-44.029l-43.829 25.309a200.15 200.15 0 0 1-33.849-58.528l43.819-25.289c-7.63-41.299-7.634-46.719 0-88.038l-43.819-25.289c7.85-21.229 19.31-41.049 33.849-58.529l43.829 25.309c31.984-27.397 36.66-30.078 76.168-44.029V58.845a200.917 200.917 0 0 1 67.618 0v50.599c39.504 13.95 44.196 16.642 76.168 44.029l43.829-25.309a200.143 200.143 0 0 1 33.849 58.529l-43.819 25.289c7.631 41.3 7.634 46.718 0 88.037zM256 160c-52.935 0-96 43.065-96 96s43.065 96 96 96 96-43.065 96-96-43.065-96-96-96zm0 144c-26.468 0-48-21.532-48-48 0-26.467 21.532-48 48-48s48 21.533 48 48c0 26.468-21.532 48-48 48z"></path></svg><!-- <i class="fa fa-fw fa-cog"></i> --></a>
        </li>
        <li class="narrow"><a href="https://www.tinymind.com/docs" title="文档"><span class="mobile-only">文档</span><svg class="svg-inline--fa fa-book fa-w-14 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="book" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><path fill="currentColor" d="M128 152v-32c0-4.4 3.6-8 8-8h208c4.4 0 8 3.6 8 8v32c0 4.4-3.6 8-8 8H136c-4.4 0-8-3.6-8-8zm8 88h208c4.4 0 8-3.6 8-8v-32c0-4.4-3.6-8-8-8H136c-4.4 0-8 3.6-8 8v32c0 4.4 3.6 8 8 8zm299.1 159.7c-4.2 13-4.2 51.6 0 64.6 7.3 1.4 12.9 7.9 12.9 15.7v16c0 8.8-7.2 16-16 16H80c-44.2 0-80-35.8-80-80V80C0 35.8 35.8 0 80 0h352c8.8 0 16 7.2 16 16v368c0 7.8-5.5 14.2-12.9 15.7zm-41.1.3H80c-17.6 0-32 14.4-32 32 0 17.7 14.3 32 32 32h314c-2.7-17.3-2.7-46.7 0-64zm6-352H80c-17.7 0-32 14.3-32 32v278.7c9.8-4.3 20.6-6.7 32-6.7h320V48z"></path></svg><!-- <i class="fa fa-fw fa-book"></i> --></a>
        </li>
        <li class="narrow"><a href="https://www.tinymind.com/logout" title="登出"><span class="mobile-only">登出</span><svg class="svg-inline--fa fa-sign-out fa-w-16 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="sign-out" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M96 64h84c6.6 0 12 5.4 12 12v24c0 6.6-5.4 12-12 12H96c-26.5 0-48 21.5-48 48v192c0 26.5 21.5 48 48 48h84c6.6 0 12 5.4 12 12v24c0 6.6-5.4 12-12 12H96c-53 0-96-43-96-96V160c0-53 43-96 96-96zm231.1 19.5l-19.6 19.6c-4.8 4.8-4.7 12.5.2 17.1L420.8 230H172c-6.6 0-12 5.4-12 12v28c0 6.6 5.4 12 12 12h248.8L307.7 391.7c-4.8 4.7-4.9 12.4-.2 17.1l19.6 19.6c4.7 4.7 12.3 4.7 17 0l164.4-164c4.7-4.7 4.7-12.3 0-17l-164.4-164c-4.7-4.6-12.3-4.6-17 .1z"></path></svg><!-- <i class="fa fa-fw fa-sign-out"></i> --></a>
        </li>
      </ul>
    </div>
  </div>
  <div class="searchbar">
    <div class="wrapper"><svg class="svg-inline--fa fa-search fa-w-16 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="search" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M508.5 468.9L387.1 347.5c-2.3-2.3-5.3-3.5-8.5-3.5h-13.2c31.5-36.5 50.6-84 50.6-136C416 93.1 322.9 0 208 0S0 93.1 0 208s93.1 208 208 208c52 0 99.5-19.1 136-50.6v13.2c0 3.2 1.3 6.2 3.5 8.5l121.4 121.4c4.7 4.7 12.3 4.7 17 0l22.6-22.6c4.7-4.7 4.7-12.3 0-17zM208 368c-88.4 0-160-71.6-160-160S119.6 48 208 48s160 71.6 160 160-71.6 160-160 160z"></path></svg><!-- <i class="fa fa-fw fa-search"></i> -->
      <input id="search-input" placeholder="搜索模型或数据组" class="form-control">
      <div id="search-closer"><svg class="svg-inline--fa fa-times fa-w-12 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="times" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M231.6 256l130.1-130.1c4.7-4.7 4.7-12.3 0-17l-22.6-22.6c-4.7-4.7-12.3-4.7-17 0L192 216.4 61.9 86.3c-4.7-4.7-12.3-4.7-17 0l-22.6 22.6c-4.7 4.7-4.7 12.3 0 17L152.4 256 22.3 386.1c-4.7 4.7-4.7 12.3 0 17l22.6 22.6c4.7 4.7 12.3 4.7 17 0L192 295.6l130.1 130.1c4.7 4.7 12.3 4.7 17 0l22.6-22.6c4.7-4.7 4.7-12.3 0-17L231.6 256z"></path></svg><!-- <i class="fa fa-fw fa-times"></i> -->
      </div>
    </div>
  </div>
</nav>
        <div id="info-banner" class="info-banner top full-width"></div>
      </div>
      <div id="page-content">
<div id="content-root"><div><div class="top-section"><div class="wrapper"><div class="restop-title-row"><h1><a class="title-user" href="https://www.tinymind.com/evolution23">evolution23</a><svg class="svg-inline--fa fa-angle-right fa-w-6 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="angle-right" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 192 512"><path fill="currentColor" d="M187.8 264.5L41 412.5c-4.7 4.7-12.3 4.7-17 0L4.2 392.7c-4.7-4.7-4.7-12.3 0-17L122.7 256 4.2 136.3c-4.7-4.7-4.7-12.3 0-17L24 99.5c4.7-4.7 12.3-4.7 17 0l146.8 148c4.7 4.7 4.7 12.3 0 17z"></path></svg><!-- <i class="fa fa-fw fa-angle-right"></i> --><a class="title-user" href="https://www.tinymind.com/evolution23/w8-densenet">w8-densenet</a><svg class="svg-inline--fa fa-angle-right fa-w-6 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="angle-right" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 192 512"><path fill="currentColor" d="M187.8 264.5L41 412.5c-4.7 4.7-12.3 4.7-17 0L4.2 392.7c-4.7-4.7-4.7-12.3 0-17L122.7 256 4.2 136.3c-4.7-4.7-4.7-12.3 0-17L24 99.5c4.7-4.7 12.3-4.7 17 0l146.8 148c4.7 4.7 4.7 12.3 0 17z"></path></svg><!-- <i class="fa fa-fw fa-angle-right"></i> --><span class="title-name"><span>Exec #3</span><span class="edit-desc"><svg class="svg-inline--fa fa-pencil fa-w-16 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="pencil" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M491.609 73.625l-53.861-53.839c-26.378-26.379-69.076-26.383-95.46-.001L24.91 335.089.329 484.085c-2.675 16.215 11.368 30.261 27.587 27.587l148.995-24.582 315.326-317.378c26.33-26.331 26.581-68.879-.628-96.087zM120.644 302l170.259-169.155 88.251 88.251L210 391.355V350h-48v-48h-41.356zM82.132 458.132l-28.263-28.263 12.14-73.587L84.409 338H126v48h48v41.59l-18.282 18.401-73.586 12.141zm378.985-319.533l-.051.051-.051.051-48.03 48.344-88.03-88.03 48.344-48.03.05-.05.05-.05c9.147-9.146 23.978-9.259 33.236-.001l53.854 53.854c9.878 9.877 9.939 24.549.628 33.861z"></path></svg><!-- <i class="fa fa-fw fa-pencil"></i> --></span></span></h1></div><ul><li class="a active" data-key="overview"><svg class="svg-inline--fa fa-info fa-w-16 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="info" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M256 8C119.043 8 8 119.083 8 256c0 136.997 111.043 248 248 248s248-111.003 248-248C504 119.083 392.957 8 256 8zm0 448c-110.532 0-200-89.431-200-200 0-110.495 89.472-200 200-200 110.491 0 200 89.471 200 200 0 110.53-89.431 200-200 200zm0-338c23.196 0 42 18.804 42 42s-18.804 42-42 42-42-18.804-42-42 18.804-42 42-42zm56 254c0 6.627-5.373 12-12 12h-88c-6.627 0-12-5.373-12-12v-24c0-6.627 5.373-12 12-12h12v-64h-12c-6.627 0-12-5.373-12-12v-24c0-6.627 5.373-12 12-12h64c6.627 0 12 5.373 12 12v100h12c6.627 0 12 5.373 12 12v24z"></path></svg><!-- <i class="fa fa-fw fa-info"></i> --><span class="tab-title">概览</span></li><li class="a" data-key="charts"><svg class="svg-inline--fa fa-chart-bar fa-w-16 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="chart-bar" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M500 400c6.6 0 12 5.4 12 12v24c0 6.6-5.4 12-12 12H12c-6.6 0-12-5.4-12-12V76c0-6.6 5.4-12 12-12h24c6.6 0 12 5.4 12 12v324h452zm-356-60v-72c0-6.6-5.4-12-12-12h-24c-6.6 0-12 5.4-12 12v72c0 6.6 5.4 12 12 12h24c6.6 0 12-5.4 12-12zm96 0V140c0-6.6-5.4-12-12-12h-24c-6.6 0-12 5.4-12 12v200c0 6.6 5.4 12 12 12h24c6.6 0 12-5.4 12-12zm96 0V204c0-6.6-5.4-12-12-12h-24c-6.6 0-12 5.4-12 12v136c0 6.6 5.4 12 12 12h24c6.6 0 12-5.4 12-12zm96 0V108c0-6.6-5.4-12-12-12h-24c-6.6 0-12 5.4-12 12v232c0 6.6 5.4 12 12 12h24c6.6 0 12-5.4 12-12z"></path></svg><!-- <i class="fa fa-fw fa-chart-bar"></i> --><span class="tab-title">图表</span></li><li class="a" data-key="code"><svg class="svg-inline--fa fa-code fa-w-16 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="code" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20"><path fill="currentColor" d="M6 6c0-0.55-0.45-1-1-1C4.72 5 4.47 5.11 4.29 5.29l-4 4C0.11 9.47 0 9.72 0 10c0 0.28 0.11 0.53 0.29 0.71l4 4C4.47 14.89 4.72 15 5 15c0.55 0 1-0.45 1-1c0-0.28-0.11-0.53-0.29-0.71L2.41 10l3.29-3.29C5.89 6.53 6 6.28 6 6z M12 2c-0.46 0-0.83 0.31-0.95 0.73l-4 14C7.03 16.82 7 16.9 7 17c0 0.55 0.45 1 1 1c0.46 0 0.83-0.31 0.95-0.73l4-14C12.97 3.18 13 3.1 13 3C13 2.45 12.55 2 12 2z M19.71 9.29l-4-4C15.53 5.11 15.28 5 15 5c-0.55 0-1 0.45-1 1c0 0.28 0.11 0.53 0.29 0.71L17.59 10l-3.29 3.29C14.11 13.47 14 13.72 14 14c0 0.55 0.45 1 1 1c0.28 0 0.53-0.11 0.71-0.29l4-4C19.89 10.53 20 10.28 20 10C20 9.72 19.89 9.47 19.71 9.29z"></path></svg><!-- <i class="fa fa-fw fa-code"></i> --><span class="tab-title">代码</span></li><li class="a" data-key="output"><svg class="svg-inline--fa fa-truck fa-w-20 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="truck" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 640 512"><path fill="currentColor" d="M592 0H272c-26.51 0-48 21.49-48 48v48h-44.118a48 48 0 0 0-33.941 14.059l-99.882 99.882A48 48 0 0 0 32 243.882V368H20c-6.627 0-12 5.373-12 12v24c0 6.627 5.373 12 12 12h44c0 53.019 42.981 96 96 96s96-42.981 96-96h128c0 53.019 42.981 96 96 96s96-42.981 96-96h16c26.51 0 48-21.49 48-48V48c0-26.51-21.49-48-48-48zM160 464c-26.467 0-48-21.533-48-48s21.533-48 48-48 48 21.533 48 48-21.533 48-48 48zm64-119.547C207.015 329.249 184.589 320 160 320c-33.395 0-62.802 17.055-80 42.926V243.882L179.882 144H224v200.453zM480 464c-26.467 0-48-21.533-48-48s21.533-48 48-48 48 21.533 48 48-21.533 48-48 48zm112-96h-28.846c-16.599-28.694-47.621-48-83.154-48s-66.555 19.306-83.154 48H272V48h320v320zM112 256l80-80v80h-80z"></path></svg><!-- <i class="fa fa-fw fa-truck"></i> --><span class="tab-title">输出</span></li><li class="a" data-key="settings"><svg class="svg-inline--fa fa-cog fa-w-16 fa-fw" aria-hidden="true" data-prefix="fa" data-icon="cog" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M452.515 237l31.843-18.382c9.426-5.441 13.996-16.542 11.177-27.054-11.404-42.531-33.842-80.547-64.058-110.797-7.68-7.688-19.575-9.246-28.985-3.811l-31.785 18.358a196.276 196.276 0 0 0-32.899-19.02V39.541a24.016 24.016 0 0 0-17.842-23.206c-41.761-11.107-86.117-11.121-127.93-.001-10.519 2.798-17.844 12.321-17.844 23.206v36.753a196.276 196.276 0 0 0-32.899 19.02l-31.785-18.358c-9.41-5.435-21.305-3.877-28.985 3.811-30.216 30.25-52.654 68.265-64.058 110.797-2.819 10.512 1.751 21.613 11.177 27.054L59.485 237a197.715 197.715 0 0 0 0 37.999l-31.843 18.382c-9.426 5.441-13.996 16.542-11.177 27.054 11.404 42.531 33.842 80.547 64.058 110.797 7.68 7.688 19.575 9.246 28.985 3.811l31.785-18.358a196.202 196.202 0 0 0 32.899 19.019v36.753a24.016 24.016 0 0 0 17.842 23.206c41.761 11.107 86.117 11.122 127.93.001 10.519-2.798 17.844-12.321 17.844-23.206v-36.753a196.34 196.34 0 0 0 32.899-19.019l31.785 18.358c9.41 5.435 21.305 3.877 28.985-3.811 30.216-30.25 52.654-68.266 64.058-110.797 2.819-10.512-1.751-21.613-11.177-27.054L452.515 275c1.22-12.65 1.22-25.35 0-38zm-52.679 63.019l43.819 25.289a200.138 200.138 0 0 1-33.849 58.528l-43.829-25.309c-31.984 27.397-36.659 30.077-76.168 44.029v50.599a200.917 200.917 0 0 1-67.618 0v-50.599c-39.504-13.95-44.196-16.642-76.168-44.029l-43.829 25.309a200.15 200.15 0 0 1-33.849-58.528l43.819-25.289c-7.63-41.299-7.634-46.719 0-88.038l-43.819-25.289c7.85-21.229 19.31-41.049 33.849-58.529l43.829 25.309c31.984-27.397 36.66-30.078 76.168-44.029V58.845a200.917 200.917 0 0 1 67.618 0v50.599c39.504 13.95 44.196 16.642 76.168 44.029l43.829-25.309a200.143 200.143 0 0 1 33.849 58.529l-43.819 25.289c7.631 41.3 7.634 46.718 0 88.037zM256 160c-52.935 0-96 43.065-96 96s43.065 96 96 96 96-43.065 96-96-43.065-96-96-96zm0 144c-26.468 0-48-21.532-48-48 0-26.467 21.532-48 48-48s48 21.533 48 48c0 26.468-21.532 48-48 48z"></path></svg><!-- <i class="fa fa-fw fa-cog"></i> --><span class="tab-title">设置</span></li></ul></div></div><div class="main-section"><div><div class="wrapper mainside overview-tab"><div class="sidebar"><div class="card"><div class="section-header"><h4>概览</h4></div><div class="section"><div><span class="meta-label">状态</span><span class="meta-value status killed">终止</span></div><div><span class="meta-label">由</span><span class="meta-value"><a href="https://www.tinymind.com/evolution23">evolution23</a></span></div><div><span class="meta-label">开始于</span><span class="meta-value">16 分钟前</span></div><div><span class="meta-label">时长</span><span class="meta-value">16 分钟</span></div><div><span class="meta-label">资源</span><span class="meta-value">CPU 2</span></div></div><div class="section env-section"><h5>环境</h5><div class="env-entry"><div class="env-label" style="color: rgb(53, 114, 165);"><div class="env-dot" style="background-color: rgb(53, 114, 165);"></div>Python 3.6</div></div><div class="env-entry"><div class="env-label" style="color: rgb(239, 108, 0);"><div class="env-dot" style="background-color: rgb(239, 108, 0);"></div>TensorFlow 1.4</div></div></div><div class="section"><h5>事件</h5><div class="event-row"><svg class="svg-inline--fa fa-plus-circle fa-w-16 fa-fw created" aria-hidden="true" data-prefix="fa" data-icon="plus-circle" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M384 240v32c0 6.6-5.4 12-12 12h-88v88c0 6.6-5.4 12-12 12h-32c-6.6 0-12-5.4-12-12v-88h-88c-6.6 0-12-5.4-12-12v-32c0-6.6 5.4-12 12-12h88v-88c0-6.6 5.4-12 12-12h32c6.6 0 12 5.4 12 12v88h88c6.6 0 12 5.4 12 12zm120 16c0 137-111 248-248 248S8 393 8 256 119 8 256 8s248 111 248 248zm-48 0c0-110.5-89.5-200-200-200S56 145.5 56 256s89.5 200 200 200 200-89.5 200-200z"></path></svg><!-- <i class="fa fa-fw fa-plus-circle created"></i> --><div class="event-content"><div class="event-ts">3:25 下午, 1月 21</div><div class="event-name created">已创建</div></div></div><div class="event-row"><svg class="svg-inline--fa fa-rocket fa-w-16 fa-fw starting" aria-hidden="true" data-prefix="fa" data-icon="rocket" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M512 112c0-36.8-.8-47.2-11-89.1-1.4-5.8-5.9-10.3-11.7-11.8C451.4 1.8 440 0 400 0c-68.3 0-127.4 39.5-177 96H96c-15.2 0-29 8.6-35.8 22.1l-56 112C-9 256.7 10.3 288 40.1 288h66.3c-6.7 15-12.9 29.8-18.6 44.1-2.4 5.9-1 12.7 3.6 17.2l71.3 71.3c4.5 4.5 11.3 5.9 17.2 3.6 14.3-5.7 29.1-11.9 44.1-18.6v66.3c0 29.7 31.3 49.1 57.9 35.8l112-56c13.6-6.8 22.1-20.6 22.1-35.8V289c56.5-49.6 96-108.7 96-177zM53 240l48-96h84.7c-21.1 30.3-39.9 63.1-56.6 96H53zm87.1 90.2C196.8 191.1 293 48 400.1 48c22.6 0 34.7 0 58.8 5.2 5.1 24 5.1 36.2 5.1 58.8 0 107.1-143.1 203.2-282.2 259.9l-41.7-41.7zM368 411l-96 48v-76.2c32.9-16.6 65.7-35.5 96-56.6V411zm0-315c26.5 0 48 21.5 48 48s-21.5 48-48 48-48-21.5-48-48 21.5-48 48-48z"></path></svg><!-- <i class="fa fa-fw fa-rocket starting"></i> --><div class="event-content"><div class="event-ts">3:25 下午, 1月 21</div><div class="event-name starting">启动中</div></div></div><div class="event-row"><svg class="svg-inline--fa fa-industry fa-w-16 fa-fw building" aria-hidden="true" data-prefix="fa" data-icon="industry" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M475.115 131.752L336 220.28V152c0-18.916-20.931-30.399-36.885-20.248L160 220.28V56c0-13.255-10.745-24-24-24H24C10.745 32 0 42.745 0 56v400c0 13.255 10.745 24 24 24h464c13.255 0 24-10.745 24-24V152c0-18.917-20.931-30.399-36.885-20.248zM464 432H48V80h64v184c0 18.916 20.931 30.399 36.885 20.248L288 195.72V264c0 18.915 20.931 30.399 36.885 20.248L464 195.72V432zm-60-48h-40c-6.627 0-12-5.373-12-12v-40c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v40c0 6.627-5.373 12-12 12zm-128 0h-40c-6.627 0-12-5.373-12-12v-40c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v40c0 6.627-5.373 12-12 12zm-128 0h-40c-6.627 0-12-5.373-12-12v-40c0-6.627 5.373-12 12-12h40c6.627 0 12 5.373 12 12v40c0 6.627-5.373 12-12 12z"></path></svg><!-- <i class="fa fa-fw fa-industry building"></i> --><div class="event-content"><div class="event-ts">3:26 下午, 1月 21</div><div class="event-name building">配置环境</div></div></div><div class="event-row"><svg class="svg-inline--fa fa-running fa-w-16 fa-fw running" aria-hidden="true" data-prefix="fa" data-icon="running" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path fill="currentColor" d="M21,11 C20.45,11 20,11.45 20,12 C20,16.42 16.42,20 12,20 C9.48,20 7.24,18.82 5.78,17 L7,17 C7.55,17 8,16.55 8,16 C8,15.45 7.55,15 7,15 L3,15 C2.45,15 2,15.45 2,16 L2,20 C2,20.55 2.45,21 3,21 C3.55,21 4,20.55 4,20 L4,17.94 C5.82,20.39 8.71,22 12,22 C17.52,22 22,17.52 22,12 C22,11.45 21.55,11 21,11 M21,3 C20.45,3 20,3.45 20,4 L20,6.06 C18.18,3.61 15.29,2 12,2 C6.48,2 2,6.48 2,12 C2,12.55 2.45,13 3,13 C3.55,13 4,12.55 4,12 C4,7.58 7.58,4 12,4 C14.52,4 16.76,5.18 18.22,7 L17,7 C16.45,7 16,7.45 16,8 C16,8.55 16.45,9 17,9 L21,9 C21.55,9 22,8.55 22,8 L22,4 C22,3.45 21.55,3 21,3"></path></svg><!-- <i class="fa fa-fw fa-running running"></i> --><div class="event-content"><div class="event-ts">3:29 下午, 1月 21</div><div class="event-name running">运行中</div></div></div><div class="event-row"><svg class="svg-inline--fa fa-save fa-w-14 fa-fw saving" aria-hidden="true" data-prefix="fa" data-icon="save" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><path fill="currentColor" d="M433.941 129.941l-83.882-83.882A48 48 0 0 0 316.118 32H48C21.49 32 0 53.49 0 80v352c0 26.51 21.49 48 48 48h352c26.51 0 48-21.49 48-48V163.882a48 48 0 0 0-14.059-33.941zM272 80v80H144V80h128zm122 352H54a6 6 0 0 1-6-6V86a6 6 0 0 1 6-6h42v104c0 13.255 10.745 24 24 24h176c13.255 0 24-10.745 24-24V83.882l78.243 78.243a6 6 0 0 1 1.757 4.243V426a6 6 0 0 1-6 6zM224 232c-48.523 0-88 39.477-88 88s39.477 88 88 88 88-39.477 88-88-39.477-88-88-88zm0 128c-22.056 0-40-17.944-40-40s17.944-40 40-40 40 17.944 40 40-17.944 40-40 40z"></path></svg><!-- <i class="fa fa-fw fa-save saving"></i> --><div class="event-content"><div class="event-ts">3:41 下午, 1月 21</div><div class="event-name saving">保存中</div></div></div><div class="event-row"><svg class="svg-inline--fa fa-stop-circle fa-w-16 fa-fw killed" aria-hidden="true" data-prefix="fa" data-icon="stop-circle" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M504 256C504 119 393 8 256 8S8 119 8 256s111 248 248 248 248-111 248-248zm-448 0c0-110.5 89.5-200 200-200s200 89.5 200 200-89.5 200-200 200S56 366.5 56 256zm296-80v160c0 8.8-7.2 16-16 16H176c-8.8 0-16-7.2-16-16V176c0-8.8 7.2-16 16-16h160c8.8 0 16 7.2 16 16z"></path></svg><!-- <i class="fa fa-fw fa-stop-circle killed"></i> --><div class="event-content"><div class="event-ts">3:41 下午, 1月 21</div><div class="event-name killed">终止</div></div></div></div></div></div><div class="main main-left"><h3>参数</h3><div class="params"><div class="param-section"><div class="param-group"><div class="param param-display"><code class="param-name">learning_rate</code><div class="param-value">0.1</div></div><div class="param param-display"><code class="param-name">batch_size</code><div class="param-value">32</div></div><div class="param param-display"><code class="param-name">output_dir</code><div class="param-value">/output</div></div><div class="param param-display"><code class="param-name">dataset_name</code><div class="param-value">quiz</div></div><div class="param param-display"><code class="param-name">dataset_dir</code><div class="param-value">/data/ai100/quiz-w7</div></div><div class="param param-display"><code class="param-name">checkpoint_path</code><div class="param-value"> /output/ckpt</div></div><div class="param param-display"><code class="param-name">model_name</code><div class="param-value">densenet</div></div><div class="param param-display"><code class="param-name">checkpoint_exclude_scopes</code><div class="param-value">InceptionV4/AuxLogits/Aux_logits</div></div><div class="param param-display"><code class="param-name">train_dir</code><div class="param-value">/output/ckpt</div></div><div class="param param-display"><code class="param-name">optimizer</code><div class="param-value">rmsprop</div></div><div class="param param-display"><code class="param-name">dataset_split_name</code><div class="param-value">validation</div></div><div class="param param-display"><code class="param-name">eval_dir</code><div class="param-value">/output/eval</div></div><div class="param param-display"><code class="param-name">max_num_batches</code><div class="param-value">128</div></div></div></div></div><hr><div><div class="section-header"><h3>日志</h3></div><div class="secondary">当前显示输出日志 (stdout). <span></span><span class="a">显示错误日志 (stderr)</span></div><pre>.6/site-packages/tensorflow/contrib/layers/python/layers/layers.py", line 1410, in dropout
    outputs = layer.apply(inputs, training=is_training)
  File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 671, in apply
    return self.__call__(inputs, *args, **kwargs)
  File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 575, in __call__
    outputs = self.call(inputs, *args, **kwargs)
  File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/layers/core.py", line 300, in call
    lambda: array_ops.identity(inputs))

InvalidArgumentError (see above for traceback): Cannot assign a device for operation 'gradients/densenet/block_1_conv1x10_dropout/dropout/div_grad/BroadcastGradientArgs': Operation was explicitly assigned to /device:GPU:0 but available devices are [ /job:localhost/replica:0/task:0/device:CPU:0, /job:localhost/replica:0/task:0/device:XLA_CPU:0 ]. Make sure the device specification refers to a valid device.
	 [[Node: gradients/densenet/block_1_conv1x10_dropout/dropout/div_grad/BroadcastGradientArgs = BroadcastGradientArgs[T=DT_INT32, _device="/device:GPU:0"](gradients/densenet/block_1_conv1x10_dropout/dropout/div_grad/Shape, gradients/densenet/block_1_conv1x10_dropout/dropout/div_grad/Shape_1)]]

################    eval    ################
WARNING:tensorflow:From ./eval_image_classifier.py:91: get_or_create_global_step (from tensorflow.contrib.framework.python.ops.variables) is deprecated and will be removed in a future version.
Instructions for updating:
Please switch to tf.train.get_or_create_global_step
WARNING:tensorflow:From ./eval_image_classifier.py:157: streaming_recall_at_k (from tensorflow.contrib.metrics.python.ops.metric_ops) is deprecated and will be removed after 2016-11-08.
Instructions for updating:
Please use `streaming_sparse_recall_at_k`, and reshape labels from [batch_size] to [batch_size, 1].
INFO:tensorflow:Evaluating None
INFO:tensorflow:Starting evaluation at 2019-01-21-07:30:16
INFO:tensorflow:Evaluation [1/128]
INFO:tensorflow:Evaluation [2/128]
INFO:tensorflow:Evaluation [3/128]
INFO:tensorflow:Evaluation [4/128]
INFO:tensorflow:Evaluation [5/128]
INFO:tensorflow:Evaluation [6/128]
INFO:tensorflow:Evaluation [7/128]
INFO:tensorflow:Evaluation [8/128]
INFO:tensorflow:Evaluation [9/128]
INFO:tensorflow:Evaluation [10/128]
INFO:tensorflow:Evaluation [11/128]
INFO:tensorflow:Evaluation [12/128]
INFO:tensorflow:Evaluation [13/128]
INFO:tensorflow:Evaluation [14/128]
INFO:tensorflow:Evaluation [15/128]
INFO:tensorflow:Evaluation [16/128]
INFO:tensorflow:Evaluation [17/128]
INFO:tensorflow:Evaluation [18/128]
INFO:tensorflow:Evaluation [19/128]
INFO:tensorflow:Evaluation [20/128]
INFO:tensorflow:Evaluation [21/128]
INFO:tensorflow:Evaluation [22/128]
INFO:tensorflow:Evaluation [23/128]
INFO:tensorflow:Evaluation [24/128]
INFO:tensorflow:Evaluation [25/128]
INFO:tensorflow:Evaluation [26/128]
INFO:tensorflow:Evaluation [27/128]
INFO:tensorflow:Evaluation [28/128]
INFO:tensorflow:Evaluation [29/128]
INFO:tensorflow:Evaluation [30/128]
INFO:tensorflow:Evaluation [31/128]
INFO:tensorflow:Evaluation [32/128]
INFO:tensorflow:Evaluation [33/128]
INFO:tensorflow:Evaluation [34/128]
INFO:tensorflow:Evaluation [35/128]
INFO:tensorflow:Evaluation [36/128]
INFO:tensorflow:Evaluation [37/128]
INFO:tensorflow:Evaluation [38/128]
INFO:tensorflow:Evaluation [39/128]
INFO:tensorflow:Evaluation [40/128]
INFO:tensorflow:Evaluation [41/128]
INFO:tensorflow:Evaluation [42/128]
INFO:tensorflow:Evaluation [43/128]
INFO:tensorflow:Evaluation [44/128]
INFO:tensorflow:Evaluation [45/128]
INFO:tensorflow:Evaluation [46/128]
INFO:tensorflow:Evaluation [47/128]
INFO:tensorflow:Evaluation [48/128]
INFO:tensorflow:Evaluation [49/128]
INFO:tensorflow:Evaluation [50/128]
INFO:tensorflow:Evaluation [51/128]
INFO:tensorflow:Evaluation [52/128]
INFO:tensorflow:Evaluation [53/128]
INFO:tensorflow:Evaluation [54/128]
INFO:tensorflow:Evaluation [55/128]
INFO:tensorflow:Evaluation [56/128]
INFO:tensorflow:Evaluation [57/128]
INFO:tensorflow:Evaluation [58/128]
INFO:tensorflow:Evaluation [59/128]
INFO:tensorflow:Evaluation [60/128]
INFO:tensorflow:Evaluation [61/128]
INFO:tensorflow:Evaluation [62/128]
INFO:tensorflow:Evaluation [63/128]
INFO:tensorflow:Evaluation [64/128]
INFO:tensorflow:Evaluation [65/128]
INFO:tensorflow:Evaluation [66/128]
INFO:tensorflow:Evaluation [67/128]
INFO:tensorflow:Evaluation [68/128]
INFO:tensorflow:Evaluation [69/128]
INFO:tensorflow:Evaluation [70/128]
INFO:tensorflow:Evaluation [71/128]
INFO:tensorflow:Evaluation [72/128]
INFO:tensorflow:Evaluation [73/128]
INFO:tensorflow:Evaluation [74/128]
INFO:tensorflow:Evaluation [75/128]
INFO:tensorflow:Evaluation [76/128]
INFO:tensorflow:Evaluation [77/128]
INFO:tensorflow:Evaluation [78/128]
INFO:tensorflow:Evaluation [79/128]
INFO:tensorflow:Evaluation [80/128]
INFO:tensorflow:Evaluation [81/128]
INFO:tensorflow:Evaluation [82/128]
INFO:tensorflow:Evaluation [83/128]
INFO:tensorflow:Evaluation [84/128]
INFO:tensorflow:Evaluation [85/128]
INFO:tensorflow:Evaluation [86/128]
INFO:tensorflow:Evaluation [87/128]
INFO:tensorflow:Evaluation [88/128]
INFO:tensorflow:Evaluation [89/128]
INFO:tensorflow:Evaluation [90/128]
INFO:tensorflow:Evaluation [91/128]
INFO:tensorflow:Evaluation [92/128]
INFO:tensorflow:Evaluation [93/128]
INFO:tensorflow:Evaluation [94/128]
INFO:tensorflow:Evaluation [95/128]
INFO:tensorflow:Evaluation [96/128]
INFO:tensorflow:Evaluation [97/128]
INFO:tensorflow:Evaluation [98/128]
INFO:tensorflow:Evaluation [99/128]
INFO:tensorflow:Evaluation [100/128]
INFO:tensorflow:Evaluation [101/128]
INFO:tensorflow:Evaluation [102/128]
INFO:tensorflow:Evaluation [103/128]
INFO:tensorflow:Evaluation [104/128]
INFO:tensorflow:Evaluation [105/128]
INFO:tensorflow:Evaluation [106/128]
INFO:tensorflow:Evaluation [107/128]
INFO:tensorflow:Evaluation [108/128]
INFO:tensorflow:Evaluation [109/128]
INFO:tensorflow:Evaluation [110/128]
INFO:tensorflow:Evaluation [111/128]
INFO:tensorflow:Evaluation [112/128]
INFO:tensorflow:Evaluation [113/128]
INFO:tensorflow:Evaluation [114/128]
INFO:tensorflow:Evaluation [115/128]
INFO:tensorflow:Evaluation [116/128]
INFO:tensorflow:Evaluation [117/128]
INFO:tensorflow:Evaluation [118/128]
INFO:tensorflow:Evaluation [119/128]
INFO:tensorflow:Evaluation [120/128]
INFO:tensorflow:Evaluation [121/128]
INFO:tensorflow:Evaluation [122/128]
INFO:tensorflow:Evaluation [123/128]
INFO:tensorflow:Evaluation [124/128]
INFO:tensorflow:Evaluation [125/128]
INFO:tensorflow:Evaluation [126/128]
INFO:tensorflow:Evaluation [127/128]
INFO:tensorflow:Evaluation [128/128]
2019-01-21 07:41:18.511187: I tensorflow/core/kernels/logging_ops.cc:79] eval/Recall_5[0.0263671875]
2019-01-21 07:41:18.511187: I tensorflow/core/kernels/logging_ops.cc:79] eval/Accuracy[0.00512695312]
INFO:tensorflow:Finished evaluation at 2019-01-21-07:41:18
################    train    ################
WARNING:tensorflow:From ./train_image_classifier.py:400: create_global_step (from tensorflow.contrib.framework.python.ops.variables) is deprecated and will be removed in a future version.
Instructions for updating:
Please switch to tf.train.create_global_step
WARNING:tensorflow:From ./train_image_classifier.py:468: softmax_cross_entropy (from tensorflow.contrib.losses.python.losses.loss_ops) is deprecated and will be removed after 2016-12-30.
Instructions for updating:
Use tf.losses.softmax_cross_entropy instead. Note that the order of the logits and labels arguments has been changed.
WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/contrib/losses/python/losses/loss_ops.py:398: compute_weighted_loss (from tensorflow.contrib.losses.python.losses.loss_ops) is deprecated and will be removed after 2016-12-30.
Instructions for updating:
Use tf.losses.compute_weighted_loss instead.
WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/contrib/losses/python/losses/loss_ops.py:151: add_arg_scope.<locals>.func_with_args (from tensorflow.contrib.losses.python.losses.loss_ops) is deprecated and will be removed after 2016-12-30.
Instructions for updating:
Use tf.losses.add_loss instead.
</locals></pre><div class="secondary">当前显示最后几行日志文件。 <span></span><span class="a">加载完整的日志文件</span></div></div></div></div></div></div></div></div>
      </div>
      <div id="page-footer"><footer><span class="copyright">©&nbsp;</span><span>2018 TinyMind</span>
</footer>
      </div>
    </div>
    <script>window.MSGS = {"error.input": "\u65e0\u6cd5\u8bc6\u522b\u60a8\u7684\u8f93\u5165\u3002\u8bf7\u68c0\u67e5\u540e\u518d\u8bd5\u4e00\u6b21\u3002", "error.internal": "\u54e6\u4e0d\uff01\u6211\u4eec\u9047\u5230\u4e86\u4e00\u70b9\u95ee\u9898\uff0c\u6682\u65f6\u65e0\u6cd5\u5904\u7406\u60a8\u7684\u8bf7\u6c42 :( \u8bf7\u7a0d\u540e\u518d\u8bd5\u3002", "error.network": "\ud83d\ude31 \u65e0\u6cd5\u8fde\u63a5 TinyMind \u670d\u52a1\u5668\u3002\u60a8\u6709\u8fde\u4e0a\u7f51\u7edc\u5417\uff1f", "warning.email_verify": "\u6b22\u8fce\u4f7f\u7528 TinyMind\u3002\u6211\u4eec\u7ed9\u60a8\u5bc4\u51fa\u4e86\u90ae\u7bb1\u9a8c\u8bc1\u90ae\u4ef6\u3002\u8bf7\u5728\u5f00\u59cb\u5efa\u7acb\u6a21\u578b\u524d\u5148\u9a8c\u8bc1\u90ae\u7bb1\u5730\u5740 :)", "success.email_verified": "\u60a8\u7684\u90ae\u7bb1\u5730\u5740\u5df2\u9a8c\u8bc1\u3002\u8c22\u8c22\uff01", "error.github.welcome.expired": "\u60a8\u70b9\u51fb\u7684\u94fe\u63a5\u5df2\u7ecf\u8fc7\u671f\u3002\u8bf7\u4f7f\u7528\u767b\u5165\u9875\u9762\u7684\u300c\u5fd8\u8bb0\u5bc6\u7801\u300d\u9009\u9879\u91cd\u7f6e\u60a8\u7684\u5bc6\u7801\u3002", "info.signed_out": "\u60a8\u5df2\u7ecf\u6210\u529f\u767b\u51fa\u3002\u5e0c\u671b\u60a8\u5f88\u5feb\u56de\u6765\uff01", "info.model.cloned": "\u60a8\u7684\u6a21\u578b\u5df2\u7ecf\u590d\u5236\u6210\u529f\u3002\u6211\u4eec\u8fd8\u5728\u590d\u5236\u6a21\u578b\u7684\u4ee3\u7801\u3002\u8fd9\u4e2a\u8fc7\u7a0b\u53ef\u80fd\u9700\u8981\u6700\u591a\u4e94\u5206\u949f\u3002"}
window.I18N = {"shared.datasets": "\u6570\u636e\u96c6", "exec.log.refreshing": "\u5b9e\u65f6\u66f4\u65b0\u4e2d\u3002\u60a8\u65e0\u9700\u5237\u65b0\u9875\u9762\u3002", "exec.delete.desc": "\u7ed3\u679c\u4e0d\u7406\u60f3\u5417\uff1f\u6216\u8bb8\u53ef\u4ee5\u53ea\u5220\u9664\u6587\u4ef6\uff0c\u4f46\u4fdd\u7559\u53c2\u6570\u548c\u5176\u4ed6\u4fe1\u606f\uff0c\u4ee5\u4f9b\u4ee5\u540e\u53c2\u8003\u3002\u60a8\u53ef\u5728\u6b64\u5904\u5f7b\u5e95\u5220\u9664\u8fd9\u4e2a\u8fd0\u884c\u3002", "exec.output": "\u8f93\u51fa", "shared.events": "\u4e8b\u4ef6", "shared.started": "\u5f00\u59cb\u4e8e", "file.empty_file": "\u8fd9\u4e2a\u6587\u4ef6\u662f\u7a7a\u7684\u3002", "shared.danger_zone": "\u5371\u9669\u533a\u57df", "shared.cancel": "\u53d6\u6d88", "shared.environment": "\u73af\u5883", "shared.by": "\u7531", "shared.save_changes": "\u4fdd\u5b58\u66f4\u6539", "exec.kill": "\u7ec8\u6b62\u8fd0\u884c", "param.no_specified": "\u672a\u5b9a\u4e49\u4efb\u4f55\u53c2\u6570", "exec.s.created": "\u5df2\u521b\u5efa", "exec.no_output.title": "\u5c1a\u65e0\u8f93\u51fa", "exec.s.queued": "\u7b49\u5f85\u4e2d", "shared.no_files.desc": "\u8fd9\u91cc\u6ca1\u6709\u4efb\u4f55\u6587\u4ef6\u3002", "exec.kill.desc": "<b>\u60a8\u5373\u5c06\u7ec8\u6b62\u8fd9\u4e2a\u8fd0\u884c\u3002</b>\u5b83\u5c06\u7acb\u523b\u88ab\u505c\u6b62\u3002\u6240\u6709\u5df2\u7ecf\u4ea7\u751f\u7684\u8f93\u51fa\u548c\u56fe\u8868\u4f1a\u88ab\u81ea\u52a8\u4fdd\u5b58\u3002\u8fd0\u884c\u7ec8\u6b62\u4e4b\u540e\u4e0d\u53ef\u4ee5\u91cd\u542f\u3002", "exec.log.showing_stdout": "\u5f53\u524d\u663e\u793a\u8f93\u51fa\u65e5\u5fd7 (stdout).", "shared.parameters": "\u53c2\u6570", "exec.log.show_stdout": "\u663e\u793a\u8f93\u51fa\u65e5\u5fd7 (stdout)", "exec.s.killed": "\u7ec8\u6b62", "exec.s.succeeded": "\u6210\u529f", "file.empty": "\u7a7a\u7684\u6587\u4ef6\u5939", "exec.s.starting": "\u542f\u52a8\u4e2d", "exec.log.load_full": "\u52a0\u8f7d\u5b8c\u6574\u7684\u65e5\u5fd7\u6587\u4ef6", "exec.delete.confirm": "\u786e\u8ba4\u5220\u9664", "shared.overview": "\u6982\u89c8", "shared.no_files": "\u6ca1\u6709\u6587\u4ef6", "file.delete.desc": "\u60a8\u5c06\u8981\u5220\u9664 %file%\u3002\u8fd9\u4e2a\u64cd\u4f5c\u662f\u6c38\u4e45\u6027\u7684\u3002", "exec.log.showing_stderr": "\u5f53\u524d\u663e\u793a\u9519\u8bef\u65e5\u5fd7 (stderr).", "exec.s.building": "\u914d\u7f6e\u73af\u5883", "exec.s.running": "\u8fd0\u884c\u4e2d", "exec.no_output.desc": "\u60a8\u7684\u8fd0\u884c\u5c1a\u672a\u5b8c\u6210\u3002\u5728\u8fd0\u884c\u7ed3\u675f\u540e\uff0c\u60a8\u53ef\u4e8e\u6b64\u67e5\u770b\u5b83\u7684\u8f93\u51fa\u3002\u4f11\u606f\u4e00\u4e0b\uff0c\u7a0d\u540e\u518d\u6765\u5427\uff01", "shared.summary": "\u6982\u89c8", "exec.desc": "\u6a21\u578b\u7684\u7b80\u77ed\u63cf\u8ff0", "shared.done": "\u5b8c\u6210", "exec.no_events": "\u5c1a\u672a\u6709\u4efb\u4f55\u4e8b\u4ef6\u88ab\u8bb0\u5f55", "exec.chart.none": "\u6ca1\u6709\u56fe\u8868", "exec.kill.confirm": "\u786e\u8ba4\u7ec8\u6b62\u8fd0\u884c", "exec.deps.show": "\u663e\u793a\u4f9d\u8d56", "exec.charts": "\u56fe\u8868", "exec.log.show_stderr": "\u663e\u793a\u9519\u8bef\u65e5\u5fd7 (stderr)", "file.1": "\u6587\u4ef6\u5939\u4e2d\u6709\u4e00\u4e2a\u6587\u4ef6", "file.multi": "\u6587\u4ef6\u5939\u4e2d\u6709 %count% \u4e2a\u6587\u4ef6", "shared.status": "\u72b6\u6001", "exec.s.failed": "\u5931\u8d25", "shared.no_summary": "shared.no_summary", "shared.code": "\u4ee3\u7801", "shared.results": "\u7ed3\u679c", "shared.settings": "\u8bbe\u7f6e", "shared.resource": "\u8d44\u6e90", "exec.delete": "\u5220\u9664\u8fd0\u884c", "file.delete.confirm": "\u786e\u8ba4\u5220\u9664", "exec.log.last_few_line": "\u5f53\u524d\u663e\u793a\u6700\u540e\u51e0\u884c\u65e5\u5fd7\u6587\u4ef6\u3002", "exec.s.saving": "\u4fdd\u5b58\u4e2d", "exec.delete.confirm_desc": "\u60a8\u5c06\u5f7b\u5e95\u5220\u9664\u8fd9\u4e2a\u8fd0\u884c\u3002\u6240\u6709\u6709\u5173\u4fe1\u606f\u548c\u6587\u4ef6\u90fd\u5c06\u88ab\u6c38\u4e45\u5220\u9664\u3002\u8bf7\u6ce8\u610f\uff1a\u8fd9\u4e2a\u64cd\u4f5c\u662f\u4e0d\u53ef\u9006\u7684\u3002", "exec.deps.hide": "\u9690\u85cf\u4f9d\u8d56", "exec.chart.none.desc": "\u6b64\u8fd0\u884c\u6682\u65e0\u56fe\u8868\u3002\u5982\u679c\u60a8\u6709\u8f93\u51fa\u56fe\u8868\uff0c\u8bf7\u7a0d\u540e\u518d\u67e5\u770b\u3002", "shared.duration": "\u65f6\u957f", "exec.log": "\u65e5\u5fd7"}
    </script>

    <script src="./w8_densenet_files/main-e3fcaefc61.js.下载"></script>
<script>window.data = {"user_id": 1641, "model_id": 3309, "model_name": "w8-densenet", "instance_id": 20, "seq": 3, "subseq": null, "batch_size": null, "prev_token": null, "prev_mode": null, "status": "killed", "token": "zremsqku", "batch_token": null, "summary": null, "env": {"deps": null, "language": {"name": "python", "version": "3.6"}, "framework": {"name": "tensorflow", "version": "1.4"}}, "code": {"mode": "github", "branch": {"sha": "cf00de3da5193592bcf5d9e8dc12116c943f521b", "url": "https://github.com/evolution232634/densenet/commit/cf00de3da5193592bcf5d9e8dc12116c943f521b", "name": "master", "message": "\u7b2c\u516b\u5468\u4f5c\u4e1a\u63d0\u4ea4"}, "repo_id": 166633597, "repo_name": "evolution232634/densenet", "entrypoint": "train_eval_image_classifier.py", "archive_url": null, "archive_state": null}, "code_version": 1, "code_changed": false, "resource": "cpu", "resource_count": 2, "params": [{"end": null, "name": "learning_rate", "step": null, "type": "float", "count": 1, "start": null, "value": 0.1, "values": []}, {"end": null, "name": "batch_size", "step": null, "type": "int", "count": 1, "start": null, "value": 32, "values": []}, {"end": null, "name": "output_dir", "step": null, "type": "string", "count": 1, "start": null, "value": "/output", "values": []}, {"end": null, "name": "dataset_name", "step": null, "type": "string", "count": 1, "start": null, "value": "quiz", "values": []}, {"end": null, "name": "dataset_dir", "step": null, "type": "string", "count": 1, "start": null, "value": "/data/ai100/quiz-w7", "values": []}, {"end": null, "name": "checkpoint_path", "step": null, "type": "string", "count": 1, "start": null, "value": " /output/ckpt", "values": []}, {"end": null, "name": "model_name", "step": null, "type": "string", "count": 1, "start": null, "value": "densenet", "values": []}, {"end": null, "name": "checkpoint_exclude_scopes", "step": null, "type": "string", "count": 1, "start": null, "value": "InceptionV4/AuxLogits/Aux_logits", "values": []}, {"end": null, "name": "train_dir", "step": null, "type": "string", "count": 1, "start": null, "value": "/output/ckpt", "values": []}, {"end": null, "name": "optimizer", "step": null, "type": "string", "count": 1, "start": null, "value": "rmsprop", "values": []}, {"end": null, "name": "dataset_split_name", "step": null, "type": "string", "count": 1, "start": null, "value": "validation", "values": []}, {"end": null, "name": "eval_dir", "step": null, "type": "string", "count": 1, "start": null, "value": "/output/eval", "values": []}, {"end": null, "name": "max_num_batches", "step": null, "type": "int", "count": 1, "start": null, "value": 128, "values": []}], "data": [{"chunks": [{"name": "quiz_validation_00000of00004.tfrecord", "version": 1}, {"name": "quiz_validation_00001of00004.tfrecord", "version": 1}, {"name": "quiz_validation_00002of00004.tfrecord", "version": 1}, {"name": "quiz_validation_00003of00004.tfrecord", "version": 1}, {"name": "quiz_train_00003of00004.tfrecord", "version": 1}, {"name": "quiz_train_00002of00004.tfrecord", "version": 1}, {"name": "quiz_train_00001of00004.tfrecord", "version": 1}, {"name": "quiz_train_00000of00004.tfrecord", "version": 1}], "dataset": "ai100/quiz-w7", "dataset_id": 24}], "events": [{"ts": 1548055517, "name": "created"}, {"ts": 1548055517, "name": "starting"}, {"ts": 1548055611, "name": "building"}, {"ts": 1548055786, "name": "running"}, {"ts": 1548056486, "name": "saving"}, {"ts": 1548056488, "name": "killed"}], "charts": {"ckpt": {"audio": [], "graph": false, "images": [], "scalars": [], "tensors": [], "histograms": [], "meta_graph": false, "run_metadata": [], "distributions": []}}, "results": {}, "stdout": ".6/site-packages/tensorflow/contrib/layers/python/layers/layers.py\", line 1410, in dropout\n    outputs = layer.apply(inputs, training=is_training)\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/layers/base.py\", line 671, in apply\n    return self.__call__(inputs, *args, **kwargs)\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/layers/base.py\", line 575, in __call__\n    outputs = self.call(inputs, *args, **kwargs)\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/layers/core.py\", line 300, in call\n    lambda: array_ops.identity(inputs))\n\nInvalidArgumentError (see above for traceback): Cannot assign a device for operation 'gradients/densenet/block_1_conv1x10_dropout/dropout/div_grad/BroadcastGradientArgs': Operation was explicitly assigned to /device:GPU:0 but available devices are [ /job:localhost/replica:0/task:0/device:CPU:0, /job:localhost/replica:0/task:0/device:XLA_CPU:0 ]. Make sure the device specification refers to a valid device.\n\t [[Node: gradients/densenet/block_1_conv1x10_dropout/dropout/div_grad/BroadcastGradientArgs = BroadcastGradientArgs[T=DT_INT32, _device=\"/device:GPU:0\"](gradients/densenet/block_1_conv1x10_dropout/dropout/div_grad/Shape, gradients/densenet/block_1_conv1x10_dropout/dropout/div_grad/Shape_1)]]\n\n################    eval    ################\nWARNING:tensorflow:From ./eval_image_classifier.py:91: get_or_create_global_step (from tensorflow.contrib.framework.python.ops.variables) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease switch to tf.train.get_or_create_global_step\nWARNING:tensorflow:From ./eval_image_classifier.py:157: streaming_recall_at_k (from tensorflow.contrib.metrics.python.ops.metric_ops) is deprecated and will be removed after 2016-11-08.\nInstructions for updating:\nPlease use `streaming_sparse_recall_at_k`, and reshape labels from [batch_size] to [batch_size, 1].\nINFO:tensorflow:Evaluating None\nINFO:tensorflow:Starting evaluation at 2019-01-21-07:30:16\nINFO:tensorflow:Evaluation [1/128]\nINFO:tensorflow:Evaluation [2/128]\nINFO:tensorflow:Evaluation [3/128]\nINFO:tensorflow:Evaluation [4/128]\nINFO:tensorflow:Evaluation [5/128]\nINFO:tensorflow:Evaluation [6/128]\nINFO:tensorflow:Evaluation [7/128]\nINFO:tensorflow:Evaluation [8/128]\nINFO:tensorflow:Evaluation [9/128]\nINFO:tensorflow:Evaluation [10/128]\nINFO:tensorflow:Evaluation [11/128]\nINFO:tensorflow:Evaluation [12/128]\nINFO:tensorflow:Evaluation [13/128]\nINFO:tensorflow:Evaluation [14/128]\nINFO:tensorflow:Evaluation [15/128]\nINFO:tensorflow:Evaluation [16/128]\nINFO:tensorflow:Evaluation [17/128]\nINFO:tensorflow:Evaluation [18/128]\nINFO:tensorflow:Evaluation [19/128]\nINFO:tensorflow:Evaluation [20/128]\nINFO:tensorflow:Evaluation [21/128]\nINFO:tensorflow:Evaluation [22/128]\nINFO:tensorflow:Evaluation [23/128]\nINFO:tensorflow:Evaluation [24/128]\nINFO:tensorflow:Evaluation [25/128]\nINFO:tensorflow:Evaluation [26/128]\nINFO:tensorflow:Evaluation [27/128]\nINFO:tensorflow:Evaluation [28/128]\nINFO:tensorflow:Evaluation [29/128]\nINFO:tensorflow:Evaluation [30/128]\nINFO:tensorflow:Evaluation [31/128]\nINFO:tensorflow:Evaluation [32/128]\nINFO:tensorflow:Evaluation [33/128]\nINFO:tensorflow:Evaluation [34/128]\nINFO:tensorflow:Evaluation [35/128]\nINFO:tensorflow:Evaluation [36/128]\nINFO:tensorflow:Evaluation [37/128]\nINFO:tensorflow:Evaluation [38/128]\nINFO:tensorflow:Evaluation [39/128]\nINFO:tensorflow:Evaluation [40/128]\nINFO:tensorflow:Evaluation [41/128]\nINFO:tensorflow:Evaluation [42/128]\nINFO:tensorflow:Evaluation [43/128]\nINFO:tensorflow:Evaluation [44/128]\nINFO:tensorflow:Evaluation [45/128]\nINFO:tensorflow:Evaluation [46/128]\nINFO:tensorflow:Evaluation [47/128]\nINFO:tensorflow:Evaluation [48/128]\nINFO:tensorflow:Evaluation [49/128]\nINFO:tensorflow:Evaluation [50/128]\nINFO:tensorflow:Evaluation [51/128]\nINFO:tensorflow:Evaluation [52/128]\nINFO:tensorflow:Evaluation [53/128]\nINFO:tensorflow:Evaluation [54/128]\nINFO:tensorflow:Evaluation [55/128]\nINFO:tensorflow:Evaluation [56/128]\nINFO:tensorflow:Evaluation [57/128]\nINFO:tensorflow:Evaluation [58/128]\nINFO:tensorflow:Evaluation [59/128]\nINFO:tensorflow:Evaluation [60/128]\nINFO:tensorflow:Evaluation [61/128]\nINFO:tensorflow:Evaluation [62/128]\nINFO:tensorflow:Evaluation [63/128]\nINFO:tensorflow:Evaluation [64/128]\nINFO:tensorflow:Evaluation [65/128]\nINFO:tensorflow:Evaluation [66/128]\nINFO:tensorflow:Evaluation [67/128]\nINFO:tensorflow:Evaluation [68/128]\nINFO:tensorflow:Evaluation [69/128]\nINFO:tensorflow:Evaluation [70/128]\nINFO:tensorflow:Evaluation [71/128]\nINFO:tensorflow:Evaluation [72/128]\nINFO:tensorflow:Evaluation [73/128]\nINFO:tensorflow:Evaluation [74/128]\nINFO:tensorflow:Evaluation [75/128]\nINFO:tensorflow:Evaluation [76/128]\nINFO:tensorflow:Evaluation [77/128]\nINFO:tensorflow:Evaluation [78/128]\nINFO:tensorflow:Evaluation [79/128]\nINFO:tensorflow:Evaluation [80/128]\nINFO:tensorflow:Evaluation [81/128]\nINFO:tensorflow:Evaluation [82/128]\nINFO:tensorflow:Evaluation [83/128]\nINFO:tensorflow:Evaluation [84/128]\nINFO:tensorflow:Evaluation [85/128]\nINFO:tensorflow:Evaluation [86/128]\nINFO:tensorflow:Evaluation [87/128]\nINFO:tensorflow:Evaluation [88/128]\nINFO:tensorflow:Evaluation [89/128]\nINFO:tensorflow:Evaluation [90/128]\nINFO:tensorflow:Evaluation [91/128]\nINFO:tensorflow:Evaluation [92/128]\nINFO:tensorflow:Evaluation [93/128]\nINFO:tensorflow:Evaluation [94/128]\nINFO:tensorflow:Evaluation [95/128]\nINFO:tensorflow:Evaluation [96/128]\nINFO:tensorflow:Evaluation [97/128]\nINFO:tensorflow:Evaluation [98/128]\nINFO:tensorflow:Evaluation [99/128]\nINFO:tensorflow:Evaluation [100/128]\nINFO:tensorflow:Evaluation [101/128]\nINFO:tensorflow:Evaluation [102/128]\nINFO:tensorflow:Evaluation [103/128]\nINFO:tensorflow:Evaluation [104/128]\nINFO:tensorflow:Evaluation [105/128]\nINFO:tensorflow:Evaluation [106/128]\nINFO:tensorflow:Evaluation [107/128]\nINFO:tensorflow:Evaluation [108/128]\nINFO:tensorflow:Evaluation [109/128]\nINFO:tensorflow:Evaluation [110/128]\nINFO:tensorflow:Evaluation [111/128]\nINFO:tensorflow:Evaluation [112/128]\nINFO:tensorflow:Evaluation [113/128]\nINFO:tensorflow:Evaluation [114/128]\nINFO:tensorflow:Evaluation [115/128]\nINFO:tensorflow:Evaluation [116/128]\nINFO:tensorflow:Evaluation [117/128]\nINFO:tensorflow:Evaluation [118/128]\nINFO:tensorflow:Evaluation [119/128]\nINFO:tensorflow:Evaluation [120/128]\nINFO:tensorflow:Evaluation [121/128]\nINFO:tensorflow:Evaluation [122/128]\nINFO:tensorflow:Evaluation [123/128]\nINFO:tensorflow:Evaluation [124/128]\nINFO:tensorflow:Evaluation [125/128]\nINFO:tensorflow:Evaluation [126/128]\nINFO:tensorflow:Evaluation [127/128]\nINFO:tensorflow:Evaluation [128/128]\n2019-01-21 07:41:18.511187: I tensorflow/core/kernels/logging_ops.cc:79] eval/Recall_5[0.0263671875]\n2019-01-21 07:41:18.511187: I tensorflow/core/kernels/logging_ops.cc:79] eval/Accuracy[0.00512695312]\nINFO:tensorflow:Finished evaluation at 2019-01-21-07:41:18\n################    train    ################\nWARNING:tensorflow:From ./train_image_classifier.py:400: create_global_step (from tensorflow.contrib.framework.python.ops.variables) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease switch to tf.train.create_global_step\nWARNING:tensorflow:From ./train_image_classifier.py:468: softmax_cross_entropy (from tensorflow.contrib.losses.python.losses.loss_ops) is deprecated and will be removed after 2016-12-30.\nInstructions for updating:\nUse tf.losses.softmax_cross_entropy instead. Note that the order of the logits and labels arguments has been changed.\nWARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/contrib/losses/python/losses/loss_ops.py:398: compute_weighted_loss (from tensorflow.contrib.losses.python.losses.loss_ops) is deprecated and will be removed after 2016-12-30.\nInstructions for updating:\nUse tf.losses.compute_weighted_loss instead.\nWARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/contrib/losses/python/losses/loss_ops.py:151: add_arg_scope.<locals>.func_with_args (from tensorflow.contrib.losses.python.losses.loss_ops) is deprecated and will be removed after 2016-12-30.\nInstructions for updating:\nUse tf.losses.add_loss instead.\n", "stderr": null, "created_at": 1548055517, "completed_at": 1548056488, "user": {"id": 1641, "username": "evolution23", "name": "evolution23", "locale": "zh_CN", "avatar_url": "https://danqing.github.io/tms/avatars/5@2x.png", "email_confirmed_at": true, "created_at": 1546420646}, "model": {"id": 3309, "user_id": 1641, "username": "evolution23", "collaborator_ids": null, "name": "w8-densenet", "summary": "w8-densenet", "description": "# \u7b80\u4ecb\nhttps://edu.csdn.net/topic/ai115\n\n# TinymMind\u4e0aGPU\u8fd0\u884c\u8d39\u7528\u8f83\u8d35\uff0c\u6bcf CPU \u6bcf\u5c0f\u65f6 $0.09\uff0c\u6bcf GPU \u6bcf\u5c0f\u65f6 $0.99\uff0c\u6240\u6709\u4f5c\u4e1a\u5185\u5bb9\u63a8\u8350\u5148\u5728\u672c\u5730\u8fd0\u884c\u51fa\u4e00\u5b9a\u7684\u7ed3\u679c\uff0c\u4fdd\u8bc1\u8fd0\u884c\u6b63\u786e\u4e4b\u540e\uff0c\u518d\u4e0a\u4f20\u5230TinyMind\u4e0a\u8fd0\u884c\u3002\u521d\u59cb\u8fd0\u884c\u63a8\u8350\u4f7f\u7528CPU\u8fd0\u884c\u8d44\u6e90\uff0c\u5f85\u6240\u6709\u4ee3\u7801\u786e\u4fdd\u6ca1\u6709\u95ee\u9898\u4e4b\u540e\uff0c\u518d\u542f\u52a8GPU\u8fd0\u884c\u3002\n\n\n\u5b66\u5458\u81ea\u5df1\u5b9e\u73b0\u4e00\u4e2adensenet\u7684\u7f51\u7edc\uff0c\u5e76\u63d2\u5165\u5230slim\u6846\u67b6\u4e2d\u8fdb\u884c\u8bad\u7ec3\u3002\n\ntinymind \u4f7f\u7528\u8bf4\u660e\uff1ahttps://gitee.com/ai100/quiz-w7-doc\n\n\n### \u6570\u636e\u96c6\n\u672c\u6570\u636e\u96c6\u62e5\u6709200\u4e2a\u5206\u7c7b\uff0c\u6bcf\u4e2a\u5206\u7c7b300\u5f20\u56fe\u7247\uff0c\u5171\u8ba16W\u5f20\u56fe\u7247\uff0c\u5176\u4e2d5W\u5f20\u4f5c\u4e3a\u8bad\u7ec3\u96c6\uff0c1W\u5f20\u56fe\u7247\u4f5c\u4e3a\u9a8c\u8bc1\u96c6\u3002\u56fe\u7247\u5df2\u7ecf\u9884\u6253\u5305\u4e3atfrecord\u683c\u5f0f\u5e76\u4e0a\u4f20\u5230tinymind\u4e0a\u3002\u5730\u5740\u5982\u4e0b\uff1a\nhttps://www.tinymind.com/ai100/datasets/quiz-w7\n\n### \u6a21\u578b\n\u6a21\u578b\u4ee3\u7801\u6765\u81ea\uff1a\nhttps://github.com/tensorflow/models/tree/master/research/slim\n\n\n\u8fd9\u91cc\u4e3a\u4e86\u9002\u5e94\u672c\u4f5c\u4e1a\u63d0\u4f9b\u7684\u6570\u636e\u96c6\uff0c\u7a0d\u4f5c\u4fee\u6539\uff0c\u6dfb\u52a0\u4e86\u4e00\u4e2aquiz\u6570\u636e\u96c6\u4ee5\u53ca\u4e00\u4e2a\u8bad\u7ec3\u5e76\u9a8c\u8bc1\u7684\u811a\u672c\uff0c\u5b9e\u9645\u4f7f\u7528\u7684\u4ee3\u7801\u4e3a\uff1a\nhttps://gitee.com/ai100/quiz-w7-2-densenet\n\n\n\u5176\u4e2dnets\u76ee\u5f55\u4e0b\u7684densenet.py\u4e2d\u5df2\u7ecf\u5b9a\u4e49\u4e86densenet\u7f51\u7edc\u7684\u5165\u53e3\u51fd\u6570\u7b49\uff0c\u76f8\u5e94\u7684\u8f85\u52a9\u4ee3\u7801\u4e5f\u90fd\u5df2\u7ecf\u5b8c\u6210\uff0c\u5b66\u5458\u53ea\u9700\u8981check\u6216\u8005fork\u8fd9\u91cc\u7684\u4ee3\u7801\uff0c\u6dfb\u52a0\u81ea\u5df1\u7684densenet\u5b9e\u73b0\u5e76\u5728tinymind\u4e0a\u5efa\u7acb\u76f8\u5e94\u7684\u6a21\u578b\u5373\u53ef\u3002\n\n\ndensenet\u8bba\u6587\u53c2\u8003 https://arxiv.org/abs/1608.06993\n\n\n\u5728tinymind\u4e0a\u65b0\u5efa\u4e00\u4e2a\u6a21\u578b\uff0c\u6a21\u578b\u8bbe\u7f6e\u53c2\u8003\u5982\u4e0b\u6a21\u578b\uff1a\n\nhttps://www.tinymind.com/ai100/quiz-w7-2-densenet-model\n\u590d\u5236\u6a21\u578b\u540e\u53ef\u4ee5\u770b\u5230\u6a21\u578b\u7684\u5168\u90e8\u53c2\u6570\u3002\n\n\u6a21\u578b\u53c2\u6570\u7684\u89e3\u91ca\uff1a\n\n- dataset_name quiz  # \u6570\u636e\u96c6\u7684\u540d\u79f0\uff0c\u8fd9\u91cc\u4f7f\u7528\u6211\u4eec\u4e3a\u672c\u6b21\u4f5c\u4e1a\u4e13\u95e8\u505a\u7684quiz\u6570\u636e\u96c6\n- dataset_dir /data/ai100/quiz-w7  # tfrecord\u5b58\u653e\u7684\u76ee\u5f55\uff0c\u8fd9\u4e2a\u76ee\u5f55\u662f\u5efa\u7acb\u6a21\u578b\u7684\u65f6\u5019\uff0c\u7531tinymind\u63d0\u4f9b\u7684\n- model_name densenet  # \u4f7f\u7528\u7684\u7f51\u7edc\u7684\u540d\u79f0\uff0c\u672c\u4f5c\u4e1a\u56fa\u5b9a\u4e3adensenet\n- train_dir /output/ckpt  # \u8bad\u7ec3\u76ee\u5f55\uff0c\u8bad\u7ec3\u7684\u4e2d\u95f4\u6587\u4ef6\u548csummary\uff0ccheckpoint\u7b49\u90fd\u5b58\u653e\u5728\u8fd9\u91cc\uff0c\u8fd9\u4e2a\u76ee\u5f55\u4e5f\u662f\u9a8c\u8bc1\u8fc7\u7a0b\u7684checkpoint_path\u53c2\u6570\uff0c \u8fd9\u4e2a\u76ee\u5f55\u7531tinymind\u63d0\u4f9b\uff0c\u9700\u8981\u6ce8\u610f\u8fd9\u4e2a\u76ee\u5f55\u662f\u9700\u8981\u5199\u5165\u7684\uff0c\u4f7f\u7528\u5176\u4ed6\u76ee\u5f55\u53ef\u80fd\u4f1a\u51fa\u73b0\u5199\u5165\u5931\u8d25\u7684\u60c5\u51b5\u3002\n- learning_rate 0.1  # \u5b66\u4e60\u7387, \u56e0\u4e3a\u6ca1\u6709\u9884\u8bad\u7ec3\u6a21\u578b\uff0c\u8fd9\u91cc\u4f7f\u7528\u8f83\u5927\u7684\u5b66\u4e60\u7387\u4ee5\u52a0\u5feb\u6536\u655b\u901f\u5ea6\u3002\n- optimizer rmsprop  # \u4f18\u5316\u5668\uff0c\u5173\u4e8e\u4f18\u5316\u5668\u7684\u533a\u522b\u8bf7\u53c2\u8003[\u8fd9\u91cc](https://arxiv.org/abs/1609.04747)\n- dataset_split_name validation # \u6570\u636e\u96c6\u5206\u5757\u540d\uff0c\u7528\u4e8e\u9a8c\u8bc1\u8fc7\u7a0b\uff0c\u4f20\u5165train\u53ef\u9a8c\u8bc1train\u96c6\u51c6\u786e\u5ea6\uff0c\u4f20\u5165validation\u53ef\u9a8c\u8bc1validation\u96c6\u51c6\u786e\u5ea6\uff0c\u8fd9\u91cc\u53ea\u5173\u6ce8validation\n- eval_dir /output/eval  # \u9a8c\u8bc1\u76ee\u5f55\uff0c\u9a8c\u8bc1\u7ed3\u679c\uff0c\u5305\u62ecsummary\u7b49\uff0c\u4f1a\u5199\u5165\u8fd9\u4e2a\u76ee\u5f55\n- max_num_batches 128  # \u9a8c\u8bc1batches\uff0c\u8fd9\u91cc\u4f1a\u9a8c\u8bc1128\u00d732\u51714096\u4e2a\u56fe\u7247\u6837\u672c\u7684\u6570\u636e\u3002\n\n\n\u9f13\u52b1\u53c2\u4e0e\u8bfe\u7a0b\u7684\u5b66\u5458\u5c1d\u8bd5\u4e0d\u540c\u7684\u53c2\u6570\u7ec4\u5408\u4ee5\u4f53\u9a8c\u4e0d\u540c\u7684\u53c2\u6570\u5bf9\u8bad\u7ec3\u51c6\u786e\u7387\u548c\u6536\u655b\u901f\u5ea6\u7684\u5f71\u54cd\u3002\n\n### \u7ed3\u679c\u8bc4\u4f30\n\u5b66\u5458\u9700\u8981\u63d0\u4f9b\u8fd0\u884clog\u7684\u622a\u56fe\u548c\u6587\u6863\u63cf\u8ff0\n\n\u5728tinymind\u8fd0\u884clog\u7684\u8f93\u51fa\u4e2d\uff0c\u53ef\u4ee5\u770b\u5230\u5982\u4e0b\u5185\u5bb9\uff1a\n```sh\n2017-12-1 23:03:04.327009: I tensorflow/core/kernels/logging_ops.cc:79] eval/Accuracy[0.252197266]\n2017-12-1 23:03:04.327097: I tensorflow/core/kernels/logging_ops.cc:79] eval/Recall_5[0.494873047]\n```\ndensenet\u7684\u7f51\u7edc\uff0c\u6548\u679c\u8981\u7565\u597d\u4e8einceptionv4\u3002\u8003\u8651\u5230\u5b9e\u73b0\u7684\u4e0d\u540c\uff0c\u800c\u4e14\u6ca1\u6709\u9884\u8bad\u7ec3\u6a21\u578b\uff0c\u8fd9\u91cc\u4e0d\u5bf9\u51c6\u786e\u7387\u505a\u8981\u6c42\u3002\u53ea\u8981\u8bad\u7ec3\u8fd0\u884c\u6210\u529f\u5e76\u6709\u51c6\u786e\u7387\u8f93\u51fa\u5373\u53ef\u8ba4\u4e3a\u53ca\u683c60\u5206\u3002\n\n\u63d0\u4f9b\u5bf9densenet\u5b9e\u73b0\u8fc7\u7a0b\u7684\u63cf\u8ff0\uff1a\n\u5bf9growth\u7684\u7406\u89e3 20\u5206\n\u5bf9\u7a20\u5bc6\u94fe\u63a5\u7684\u7406\u89e3 20\u5206\n\n\n# \u53c2\u8003\u5185\u5bb9\n>epoch\u8ba1\u7b97\u65b9\u5f0f\uff1a\n>epoch = step * batch_size / count_all_train_pics\n\n\n\u672c\u5730\u8fd0\u884cslim\u6846\u67b6\u6240\u7528\u547d\u4ee4\u884c\uff1a\n\n\u4f7f\u7528\u9884\u8bad\u7ec3\u6a21\u578b\u8fdb\u884cinceptionv4\u7b49\u7684finetune\n```sh\n\u8bad\u7ec3\uff1a\npython3 train_image_classifier.py --dataset_name=quiz --dataset_dir=/path/to/data --checkpoint_path=/path/to/inception_v4.ckpt --model_name=inception_v4 --checkpoint_exclude_scopes=InceptionV4/Logits,InceptionV4/AuxLogits/Aux_logits --train_dir=/path/to/train_ckpt --learning_rate=0.001 --optimizer=rmsprop  --batch_size=32\n\ntrain\u96c6\u9a8c\u8bc1\uff1a\npython3 eval_image_classifier.py --dataset_name=quiz --dataset_dir=/path/to/data --dataset_split_name=train --model_name=inception_v4 --checkpoint_path=/path/to/train_ckpt --eval_dir=/path/to/train_eval --batch_size=32 --max_num_batches=128\n\nvalidation\u96c6\u9a8c\u8bc1\uff1a\npython3 eval_image_classifier.py --dataset_name=quiz --dataset_dir=/path/to/data --dataset_split_name=validation --model_name=inception_v4 --checkpoint_path=/path/to/train_ckpt --eval_dir=/path/to/validation_eval --batch_size=32 --max_num_batches=128\n\n\u7edf\u4e00\u811a\u672c\uff1a\npython3 train_eval_image_classifier.py --dataset_name=quiz --dataset_dir=/path/to/data --checkpoint_path=/path/to/inception_v4.ckpt --model_name=inception_v4 --checkpoint_exclude_scopes=InceptionV4/Logits,InceptionV4/AuxLogits/Aux_logits --optimizer=rmsprop --train_dir=/path/to/log/train_ckpt --learning_rate=0.001 --dataset_split_name=validation --eval_dir=/path/to/eval --max_num_batches=128\n```\n\n\u4ece\u5934\u5f00\u59cb\u8bad\u7ec3densenet\n```sh\n\u8bad\u7ec3\npython3 train_image_classifier.py --dataset_name=quiz --dataset_dir=/path/to/data --model_name=densenet --train_dir=/path/to/train_ckpt_den --learning_rate=0.1 --optimizer=rmsprop  --batch_size=16/path/to\n\ntrain\u96c6\u9a8c\u8bc1\uff1a\npython3 eval_image_classifier.py --dataset_name=quiz --dataset_dir=/path/to/data --dataset_split_name=train --model_name=densenet --checkpoint_path=/path/to/train_ckpt_den --eval_dir=/path/to/train_eval_den --batch_size=32 --max_num_batches=128\n\nvalidation\u96c6\u9a8c\u8bc1\uff1a\npython3 eval_image_classifier.py --dataset_name=quiz --dataset_dir=/path/to/data --dataset_split_name=validation --model_name=densenet --checkpoint_path=/path/to/train_ckpt_den --eval_dir=/path/to/validation_eval_den --batch_size=32 --max_num_batches=128\n\n\u7edf\u4e00\u811a\u672c\uff1a\npython3 train_eval_image_classifier.py --dataset_name=quiz --dataset_dir=/path/to/data --model_name=densenet --checkpoint_exclude_scopes=InceptionV4/Logits,InceptionV4/AuxLogits/Aux_logits --train_dir=/path/to/log/train_ckpt --learning_rate=0.1 --dataset_split_name=validation --eval_dir=/path/to/eval_den --max_num_batches=128\n```\n\n## cpu\u8bad\u7ec3\n\u672c\u5730\u6ca1\u6709\u663e\u5361\u7684\u60c5\u51b5\u4e0b\uff0c\u4f7f\u7528\u4e0a\u8ff0\u547d\u4ee4\u8fdb\u884c\u8bad\u7ec3\u4f1a\u5bfc\u81f4\u9519\u8bef\u3002\u53ea\u4f7f\u7528CPU\u8fdb\u884c\u8bad\u7ec3\u7684\u8bdd\uff0c\u9700\u8981\u5728\u8bad\u7ec3\u547d\u4ee4\u6216\u8005\u7edf\u4e00\u811a\u672c\u4e0a\u6dfb\u52a0**--clone_on_cpu=True**\u53c2\u6570\u3002tinymind\u4e0a\u5219\u9700\u8981\u65b0\u5efa\u4e00\u4e2a**clone_on_cpu**\u7684**bool**\u7c7b\u578b\u53c2\u6570\u5e76\u8bbe\u7f6e\u4e3a**True**\n\n# \u4ee5\u4e0b\u5185\u5bb9\u4e3aslim\u5b98\u65b9\u4ecb\u7ecd\n----\n# TensorFlow-Slim image classification model library\n\n[TF-slim](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim)\nis a new lightweight high-level API of TensorFlow (`tensorflow.contrib.slim`)\nfor defining, training and evaluating complex\nmodels. This directory contains\ncode for training and evaluating several widely used Convolutional Neural\nNetwork (CNN) image classification models using TF-slim.\nIt contains scripts that will allow\nyou to train models from scratch or fine-tune them from pre-trained network\nweights. It also contains code for downloading standard image datasets,\nconverting them\nto TensorFlow's native TFRecord format and reading them in using TF-Slim's\ndata reading and queueing utilities. You can easily train any model on any of\nthese datasets, as we demonstrate below. We've also included a\n[jupyter notebook](https://github.com/tensorflow/models/blob/master/research/slim/slim_walkthrough.ipynb),\nwhich provides working examples of how to use TF-Slim for image classification.\nFor developing or modifying your own models, see also the [main TF-Slim page](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim).\n\n## Contacts\n\nMaintainers of TF-slim:\n\n* Nathan Silberman,\n  github: [nathansilberman](https://github.com/nathansilberman)\n* Sergio Guadarrama, github: [sguada](https://github.com/sguada)\n\n## Table of contents\n\n<a href=\"#Install\">Installation and setup</a><br>\n<a href='#Data'>Preparing the datasets</a><br>\n<a href='#Pretrained'>Using pre-trained models</a><br>\n<a href='#Training'>Training from scratch</a><br>\n<a href='#Tuning'>Fine tuning to a new task</a><br>\n<a href='#Eval'>Evaluating performance</a><br>\n<a href='#Export'>Exporting Inference Graph</a><br>\n<a href='#Troubleshooting'>Troubleshooting</a><br>\n\n# Installation\n<a id='Install'></a>\n\nIn this section, we describe the steps required to install the appropriate\nprerequisite packages.\n\n## Installing latest version of TF-slim\n\nTF-Slim is available as `tf.contrib.slim` via TensorFlow 1.0. To test that your\ninstallation is working, execute the following command; it should run without\nraising any errors.\n\n```\npython -c \"import tensorflow.contrib.slim as slim; eval = slim.evaluation.evaluate_once\"\n```\n\n## Installing the TF-slim image models library\n\nTo use TF-Slim for image classification, you also have to install\nthe [TF-Slim image models library](https://github.com/tensorflow/models/tree/master/research/slim),\nwhich is not part of the core TF library.\nTo do this, check out the\n[tensorflow/models](https://github.com/tensorflow/models/) repository as follows:\n\n```bash\ncd $HOME/workspace\ngit clone https://github.com/tensorflow/models/\n```\n\nThis will put the TF-Slim image models library in `$HOME/workspace/models/research/slim`.\n(It will also create a directory called\n[models/inception](https://github.com/tensorflow/models/tree/master/research/inception),\nwhich contains an older version of slim; you can safely ignore this.)\n\nTo verify that this has worked, execute the following commands; it should run\nwithout raising any errors.\n\n```\ncd $HOME/workspace/models/research/slim\npython -c \"from nets import cifarnet; mynet = cifarnet.cifarnet\"\n```\n\n\n# Preparing the datasets\n<a id='Data'></a>\n\nAs part of this library, we've included scripts to download several popular\nimage datasets (listed below) and convert them to slim format.\n\nDataset | Training Set Size | Testing Set Size | Number of Classes | Comments\n:------:|:---------------:|:---------------------:|:-----------:|:-----------:\nFlowers|2500 | 2500 | 5 | Various sizes (source: Flickr)\n[Cifar10](https://www.cs.toronto.edu/~kriz/cifar.html) | 60k| 10k | 10 |32x32 color\n[MNIST](http://yann.lecun.com/exdb/mnist/)| 60k | 10k | 10 | 28x28 gray\n[ImageNet](http://www.image-net.org/challenges/LSVRC/2012/)|1.2M| 50k | 1000 | Various sizes\n\n## Downloading and converting to TFRecord format\n\nFor each dataset, we'll need to download the raw data and convert it to\nTensorFlow's native\n[TFRecord](https://www.tensorflow.org/versions/r0.10/api_docs/python/python_io.html#tfrecords-format-details)\nformat. Each TFRecord contains a\n[TF-Example](https://github.com/tensorflow/tensorflow/blob/r0.10/tensorflow/core/example/example.proto)\nprotocol buffer. Below we demonstrate how to do this for the Flowers dataset.\n\n```shell\n$ DATA_DIR=/tmp/data/flowers\n$ python download_and_convert_data.py \\\n    --dataset_name=flowers \\\n    --dataset_dir=\"${DATA_DIR}\"\n```\n\nWhen the script finishes you will find several TFRecord files created:\n\n```shell\n$ ls ${DATA_DIR}\nflowers_train-00000-of-00005.tfrecord\n...\nflowers_train-00004-of-00005.tfrecord\nflowers_validation-00000-of-00005.tfrecord\n...\nflowers_validation-00004-of-00005.tfrecord\nlabels.txt\n```\n\nThese represent the training and validation data, sharded over 5 files each.\nYou will also find the `$DATA_DIR/labels.txt` file which contains the mapping\nfrom integer labels to class names.\n\nYou can use the same script to create the mnist and cifar10 datasets.\nHowever, for ImageNet, you have to follow the instructions\n[here](https://github.com/tensorflow/models/blob/master/research/inception/README.md#getting-started).\nNote that you first have to sign up for an account at image-net.org.\nAlso, the download can take several hours, and could use up to 500GB.\n\n\n## Creating a TF-Slim Dataset Descriptor.\n\nOnce the TFRecord files have been created, you can easily define a Slim\n[Dataset](https://github.com/tensorflow/tensorflow/blob/r0.10/tensorflow/contrib/slim/python/slim/data/dataset.py),\nwhich stores pointers to the data file, as well as various other pieces of\nmetadata, such as the class labels, the train/test split, and how to parse the\nTFExample protos. We have included the TF-Slim Dataset descriptors\nfor\n[Cifar10](https://github.com/tensorflow/models/blob/master/research/slim/datasets/cifar10.py),\n[ImageNet](https://github.com/tensorflow/models/blob/master/research/slim/datasets/imagenet.py),\n[Flowers](https://github.com/tensorflow/models/blob/master/research/slim/datasets/flowers.py),\nand\n[MNIST](https://github.com/tensorflow/models/blob/master/research/slim/datasets/mnist.py).\nAn example of how to load data using a TF-Slim dataset descriptor using a\nTF-Slim\n[DatasetDataProvider](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py)\nis found below:\n\n```python\nimport tensorflow as tf\nfrom datasets import flowers\n\nslim = tf.contrib.slim\n\n# Selects the 'validation' dataset.\ndataset = flowers.get_split('validation', DATA_DIR)\n\n# Creates a TF-Slim DataProvider which reads the dataset in the background\n# during both training and testing.\nprovider = slim.dataset_data_provider.DatasetDataProvider(dataset)\n[image, label] = provider.get(['image', 'label'])\n```\n## An automated script for processing ImageNet data.\n\nTraining a model with the ImageNet dataset is a common request. To facilitate\nworking with the ImageNet dataset, we provide an automated script for\ndownloading and processing the ImageNet dataset into the native TFRecord\nformat.\n\nThe TFRecord format consists of a set of sharded files where each entry is a serialized `tf.Example` proto. Each `tf.Example` proto contains the ImageNet image (JPEG encoded) as well as metadata such as label and bounding box information.\n\nWe provide a single [script](datasets/download_and_preprocess_imagenet.sh) for\ndownloading and converting ImageNet data to TFRecord format. Downloading and\npreprocessing the data may take several hours (up to half a day) depending on\nyour network and computer speed. Please be patient.\n\nTo begin, you will need to sign up for an account with [ImageNet]\n(http://image-net.org) to gain access to the data. Look for the sign up page,\ncreate an account and request an access key to download the data.\n\nAfter you have `USERNAME` and `PASSWORD`, you are ready to run our script. Make\nsure that your hard disk has at least 500 GB of free space for downloading and\nstoring the data. Here we select `DATA_DIR=$HOME/imagenet-data` as such a\nlocation but feel free to edit accordingly.\n\nWhen you run the below script, please enter *USERNAME* and *PASSWORD* when\nprompted. This will occur at the very beginning. Once these values are entered,\nyou will not need to interact with the script again.\n\n```shell\n# location of where to place the ImageNet data\nDATA_DIR=$HOME/imagenet-data\n\n# build the preprocessing script.\nbazel build slim/download_and_preprocess_imagenet\n\n# run it\nbazel-bin/slim/download_and_preprocess_imagenet \"${DATA_DIR}\"\n```\n\nThe final line of the output script should read:\n\n```shell\n2016-02-17 14:30:17.287989: Finished writing all 1281167 images in data set.\n```\n\nWhen the script finishes you will find 1024 and 128 training and validation\nfiles in the `DATA_DIR`. The files will match the patterns `train-????-of-1024`\nand `validation-?????-of-00128`, respectively.\n\n[Congratulations!](https://www.youtube.com/watch?v=9bZkp7q19f0) You are now\nready to train or evaluate with the ImageNet data set.\n\n# Pre-trained Models\n<a id='Pretrained'></a>\n\nNeural nets work best when they have many parameters, making them powerful\nfunction approximators.\nHowever, this  means they must be trained on very large datasets. Because\ntraining models from scratch can be a very computationally intensive process\nrequiring days or even weeks, we provide various pre-trained models,\nas listed below. These CNNs have been trained on the\n[ILSVRC-2012-CLS](http://www.image-net.org/challenges/LSVRC/2012/)\nimage classification dataset.\n\nIn the table below, we list each model, the corresponding\nTensorFlow model file, the link to the model checkpoint, and the top 1 and top 5\naccuracy (on the imagenet test set).\nNote that the VGG and ResNet V1 parameters have been converted from their original\ncaffe formats\n([here](https://github.com/BVLC/caffe/wiki/Model-Zoo#models-used-by-the-vgg-team-in-ilsvrc-2014)\nand\n[here](https://github.com/KaimingHe/deep-residual-networks)),\nwhereas the Inception and ResNet V2 parameters have been trained internally at\nGoogle. Also be aware that these accuracies were computed by evaluating using a\nsingle image crop. Some academic papers report higher accuracy by using multiple\ncrops at multiple scales.\n\nModel | TF-Slim File | Checkpoint | Top-1 Accuracy| Top-5 Accuracy |\n:----:|:------------:|:----------:|:-------:|:--------:|\n[Inception V1](http://arxiv.org/abs/1409.4842v1)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v1.py)|[inception_v1_2016_08_28.tar.gz](http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz)|69.8|89.6|\n[Inception V2](http://arxiv.org/abs/1502.03167)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v2.py)|[inception_v2_2016_08_28.tar.gz](http://download.tensorflow.org/models/inception_v2_2016_08_28.tar.gz)|73.9|91.8|\n[Inception V3](http://arxiv.org/abs/1512.00567)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v3.py)|[inception_v3_2016_08_28.tar.gz](http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz)|78.0|93.9|\n[Inception V4](http://arxiv.org/abs/1602.07261)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v4.py)|[inception_v4_2016_09_09.tar.gz](http://download.tensorflow.org/models/inception_v4_2016_09_09.tar.gz)|80.2|95.2|\n[Inception-ResNet-v2](http://arxiv.org/abs/1602.07261)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_resnet_v2.py)|[inception_resnet_v2_2016_08_30.tar.gz](http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz)|80.4|95.3|\n[ResNet V1 50](https://arxiv.org/abs/1512.03385)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v1.py)|[resnet_v1_50_2016_08_28.tar.gz](http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz)|75.2|92.2|\n[ResNet V1 101](https://arxiv.org/abs/1512.03385)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v1.py)|[resnet_v1_101_2016_08_28.tar.gz](http://download.tensorflow.org/models/resnet_v1_101_2016_08_28.tar.gz)|76.4|92.9|\n[ResNet V1 152](https://arxiv.org/abs/1512.03385)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v1.py)|[resnet_v1_152_2016_08_28.tar.gz](http://download.tensorflow.org/models/resnet_v1_152_2016_08_28.tar.gz)|76.8|93.2|\n[ResNet V2 50](https://arxiv.org/abs/1603.05027)^|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v2.py)|[resnet_v2_50_2017_04_14.tar.gz](http://download.tensorflow.org/models/resnet_v2_50_2017_04_14.tar.gz)|75.6|92.8|\n[ResNet V2 101](https://arxiv.org/abs/1603.05027)^|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v2.py)|[resnet_v2_101_2017_04_14.tar.gz](http://download.tensorflow.org/models/resnet_v2_101_2017_04_14.tar.gz)|77.0|93.7|\n[ResNet V2 152](https://arxiv.org/abs/1603.05027)^|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v2.py)|[resnet_v2_152_2017_04_14.tar.gz](http://download.tensorflow.org/models/resnet_v2_152_2017_04_14.tar.gz)|77.8|94.1|\n[ResNet V2 200](https://arxiv.org/abs/1603.05027)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v2.py)|[TBA]()|79.9\\*|95.2\\*|\n[VGG 16](http://arxiv.org/abs/1409.1556.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py)|[vgg_16_2016_08_28.tar.gz](http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz)|71.5|89.8|\n[VGG 19](http://arxiv.org/abs/1409.1556.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py)|[vgg_19_2016_08_28.tar.gz](http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz)|71.1|89.8|\n[MobileNet_v1_1.0_224](https://arxiv.org/pdf/1704.04861.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py)|[mobilenet_v1_1.0_224_2017_06_14.tar.gz](http://download.tensorflow.org/models/mobilenet_v1_1.0_224_2017_06_14.tar.gz)|70.7|89.5|\n[MobileNet_v1_0.50_160](https://arxiv.org/pdf/1704.04861.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py)|[mobilenet_v1_0.50_160_2017_06_14.tar.gz](http://download.tensorflow.org/models/mobilenet_v1_0.50_160_2017_06_14.tar.gz)|59.9|82.5|\n[MobileNet_v1_0.25_128](https://arxiv.org/pdf/1704.04861.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py)|[mobilenet_v1_0.25_128_2017_06_14.tar.gz](http://download.tensorflow.org/models/mobilenet_v1_0.25_128_2017_06_14.tar.gz)|41.3|66.2|\n[NASNet-A_Mobile_224](https://arxiv.org/abs/1707.07012)#|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/nasnet/nasnet.py)|[nasnet-a_mobile_04_10_2017.tar.gz](https://storage.googleapis.com/download.tensorflow.org/models/nasnet-a_mobile_04_10_2017.tar.gz)|74.0|91.6|\n[NASNet-A_Large_331](https://arxiv.org/abs/1707.07012)#|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/nasnet/nasnet.py)|[nasnet-a_large_04_10_2017.tar.gz](https://storage.googleapis.com/download.tensorflow.org/models/nasnet-a_large_04_10_2017.tar.gz)|82.7|96.2|\n\n^ ResNet V2 models use Inception pre-processing and input image size of 299 (use\n`--preprocessing_name inception --eval_image_size 299` when using\n`eval_image_classifier.py`). Performance numbers for ResNet V2 models are\nreported on the ImageNet validation set.\n\n(#) More information and details about the NASNet architectures are available at this [README](nets/nasnet/README.md)\n\nAll 16 MobileNet Models reported in the [MobileNet Paper](https://arxiv.org/abs/1704.04861) can be found [here](https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet_v1.md).\n\n(\\*): Results quoted from the [paper](https://arxiv.org/abs/1603.05027).\n\nHere is an example of how to download the Inception V3 checkpoint:\n\n```shell\n$ CHECKPOINT_DIR=/tmp/checkpoints\n$ mkdir ${CHECKPOINT_DIR}\n$ wget http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz\n$ tar -xvf inception_v3_2016_08_28.tar.gz\n$ mv inception_v3.ckpt ${CHECKPOINT_DIR}\n$ rm inception_v3_2016_08_28.tar.gz\n```\n\n\n\n# Training a model from scratch.\n<a id='Training'></a>\n\nWe provide an easy way to train a model from scratch using any TF-Slim dataset.\nThe following example demonstrates how to train Inception V3 using the default\nparameters on the ImageNet dataset.\n\n```shell\nDATASET_DIR=/tmp/imagenet\nTRAIN_DIR=/tmp/train_logs\npython train_image_classifier.py \\\n    --train_dir=${TRAIN_DIR} \\\n    --dataset_name=imagenet \\\n    --dataset_split_name=train \\\n    --dataset_dir=${DATASET_DIR} \\\n    --model_name=inception_v3\n```\n\nThis process may take several days, depending on your hardware setup.\nFor convenience, we provide a way to train a model on multiple GPUs,\nand/or multiple CPUs, either synchrononously or asynchronously.\nSee [model_deploy](https://github.com/tensorflow/models/blob/master/research/slim/deployment/model_deploy.py)\nfor details.\n\n### TensorBoard\n\nTo visualize the losses and other metrics during training, you can use\n[TensorBoard](https://github.com/tensorflow/tensorboard)\nby running the command below.\n\n```shell\ntensorboard --logdir=${TRAIN_DIR}\n```\n\nOnce TensorBoard is running, navigate your web browser to http://localhost:6006.\n\n# Fine-tuning a model from an existing checkpoint\n<a id='Tuning'></a>\n\nRather than training from scratch, we'll often want to start from a pre-trained\nmodel and fine-tune it.\nTo indicate a checkpoint from which to fine-tune, we'll call training with\nthe `--checkpoint_path` flag and assign it an absolute path to a checkpoint\nfile.\n\nWhen fine-tuning a model, we need to be careful about restoring checkpoint\nweights. In particular, when we fine-tune a model on a new task with a different\nnumber of output labels, we wont be able restore the final logits (classifier)\nlayer. For this, we'll use the `--checkpoint_exclude_scopes` flag. This flag\nhinders certain variables from being loaded. When fine-tuning on a\nclassification task using a different number of classes than the trained model,\nthe new model will have a final 'logits' layer whose dimensions differ from the\npre-trained model. For example, if fine-tuning an ImageNet-trained model on\nFlowers, the pre-trained logits layer will have dimensions `[2048 x 1001]` but\nour new logits layer will have dimensions `[2048 x 5]`. Consequently, this\nflag indicates to TF-Slim to avoid loading these weights from the checkpoint.\n\nKeep in mind that warm-starting from a checkpoint affects the model's weights\nonly during the initialization of the model. Once a model has started training,\na new checkpoint will be created in `${TRAIN_DIR}`. If the fine-tuning\ntraining is stopped and restarted, this new checkpoint will be the one from\nwhich weights are restored and not the `${checkpoint_path}$`. Consequently,\nthe flags `--checkpoint_path` and `--checkpoint_exclude_scopes` are only used\nduring the `0-`th global step (model initialization). Typically for fine-tuning\none only want train a sub-set of layers, so the flag `--trainable_scopes` allows\nto specify which subsets of layers should trained, the rest would remain frozen.\n\nBelow we give an example of\n[fine-tuning inception-v3 on flowers](https://github.com/tensorflow/models/blob/master/research/slim/scripts/finetune_inception_v3_on_flowers.sh),\ninception_v3  was trained on ImageNet with 1000 class labels, but the flowers\ndataset only have 5 classes. Since the dataset is quite small we will only train\nthe new layers.\n\n\n```shell\n$ DATASET_DIR=/tmp/flowers\n$ TRAIN_DIR=/tmp/flowers-models/inception_v3\n$ CHECKPOINT_PATH=/tmp/my_checkpoints/inception_v3.ckpt\n$ python train_image_classifier.py \\\n    --train_dir=${TRAIN_DIR} \\\n    --dataset_dir=${DATASET_DIR} \\\n    --dataset_name=flowers \\\n    --dataset_split_name=train \\\n    --model_name=inception_v3 \\\n    --checkpoint_path=${CHECKPOINT_PATH} \\\n    --checkpoint_exclude_scopes=InceptionV3/Logits,InceptionV3/AuxLogits \\\n    --trainable_scopes=InceptionV3/Logits,InceptionV3/AuxLogits\n```\n\n\n\n# Evaluating performance of a model\n<a id='Eval'></a>\n\nTo evaluate the performance of a model (whether pretrained or your own),\nyou can use the eval_image_classifier.py script, as shown below.\n\nBelow we give an example of downloading the pretrained inception model and\nevaluating it on the imagenet dataset.\n\n```shell\nCHECKPOINT_FILE = ${CHECKPOINT_DIR}/inception_v3.ckpt  # Example\n$ python eval_image_classifier.py \\\n    --alsologtostderr \\\n    --checkpoint_path=${CHECKPOINT_FILE} \\\n    --dataset_dir=${DATASET_DIR} \\\n    --dataset_name=imagenet \\\n    --dataset_split_name=validation \\\n    --model_name=inception_v3\n```\n\nSee the [evaluation module example](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim#evaluation-loop)\nfor an example of how to evaluate a model at multiple checkpoints during or after the training.\n\n# Exporting the Inference Graph\n<a id='Export'></a>\n\nSaves out a GraphDef containing the architecture of the model.\n\nTo use it with a model name defined by slim, run:\n\n```shell\n$ python export_inference_graph.py \\\n  --alsologtostderr \\\n  --model_name=inception_v3 \\\n  --output_file=/tmp/inception_v3_inf_graph.pb\n\n$ python export_inference_graph.py \\\n  --alsologtostderr \\\n  --model_name=mobilenet_v1 \\\n  --image_size=224 \\\n  --output_file=/tmp/mobilenet_v1_224.pb\n```\n\n## Freezing the exported Graph\nIf you then want to use the resulting model with your own or pretrained\ncheckpoints as part of a mobile model, you can run freeze_graph to get a graph\ndef with the variables inlined as constants using:\n\n```shell\nbazel build tensorflow/python/tools:freeze_graph\n\nbazel-bin/tensorflow/python/tools/freeze_graph \\\n  --input_graph=/tmp/inception_v3_inf_graph.pb \\\n  --input_checkpoint=/tmp/checkpoints/inception_v3.ckpt \\\n  --input_binary=true --output_graph=/tmp/frozen_inception_v3.pb \\\n  --output_node_names=InceptionV3/Predictions/Reshape_1\n```\n\nThe output node names will vary depending on the model, but you can inspect and\nestimate them using the summarize_graph tool:\n\n```shell\nbazel build tensorflow/tools/graph_transforms:summarize_graph\n\nbazel-bin/tensorflow/tools/graph_transforms/summarize_graph \\\n  --in_graph=/tmp/inception_v3_inf_graph.pb\n```\n\n## Run label image in C++\n\nTo run the resulting graph in C++, you can look at the label_image sample code:\n\n```shell\nbazel build tensorflow/examples/label_image:label_image\n\nbazel-bin/tensorflow/examples/label_image/label_image \\\n  --image=${HOME}/Pictures/flowers.jpg \\\n  --input_layer=input \\\n  --output_layer=InceptionV3/Predictions/Reshape_1 \\\n  --graph=/tmp/frozen_inception_v3.pb \\\n  --labels=/tmp/imagenet_slim_labels.txt \\\n  --input_mean=0 \\\n  --input_std=255\n```\n\n\n# Troubleshooting\n<a id='Troubleshooting'></a>\n\n#### The model runs out of CPU memory.\n\nSee\n[Model Runs out of CPU memory](https://github.com/tensorflow/models/tree/master/research/inception#the-model-runs-out-of-cpu-memory).\n\n#### The model runs out of GPU memory.\n\nSee\n[Adjusting Memory Demands](https://github.com/tensorflow/models/tree/master/research/inception#adjusting-memory-demands).\n\n#### The model training results in NaN's.\n\nSee\n[Model Resulting in NaNs](https://github.com/tensorflow/models/tree/master/research/inception#the-model-training-results-in-nans).\n\n#### The ResNet and VGG Models have 1000 classes but the ImageNet dataset has 1001\n\nThe ImageNet dataset provided has an empty background class which can be used\nto fine-tune the model to other tasks. If you try training or fine-tuning the\nVGG or ResNet models using the ImageNet dataset, you might encounter the\nfollowing error:\n\n```bash\nInvalidArgumentError: Assign requires shapes of both tensors to match. lhs shape= [1001] rhs shape= [1000]\n```\nThis is due to the fact that the VGG and ResNet V1 final layers have only 1000\noutputs rather than 1001.\n\nTo fix this issue, you can set the `--labels_offset=1` flag. This results in\nthe ImageNet labels being shifted down by one:\n\n\n#### I wish to train a model with a different image size.\n\nThe preprocessing functions all take `height` and `width` as parameters. You\ncan change the default values using the following snippet:\n\n```python\nimage_preprocessing_fn = preprocessing_factory.get_preprocessing(\n    preprocessing_name,\n    height=MY_NEW_HEIGHT,\n    width=MY_NEW_WIDTH,\n    is_training=True)\n```\n\n#### What hardware specification are these hyper-parameters targeted for?\n\nSee\n[Hardware Specifications](https://github.com/tensorflow/models/tree/master/research/inception#what-hardware-specification-are-these-hyper-parameters-targeted-for).\n", "env": {"deps": null, "language": {"name": "python", "version": "3.6"}, "framework": {"name": "tensorflow", "version": "1.4"}}, "code": {"mode": "github", "branch": {"sha": "cf00de3da5193592bcf5d9e8dc12116c943f521b", "url": "https://github.com/evolution232634/densenet/commit/cf00de3da5193592bcf5d9e8dc12116c943f521b", "name": "master", "message": "\u7b2c\u516b\u5468\u4f5c\u4e1a\u63d0\u4ea4"}, "repo_id": 166633597, "repo_name": "evolution232634/densenet", "entrypoint": "train_eval_image_classifier.py", "archive_url": null, "archive_state": null}, "params": [{"end": null, "name": "iterations", "step": null, "type": "int", "count": 0, "start": null, "value": null, "values": []}, {"end": null, "name": "learning_rate", "step": null, "type": "float", "count": 1, "start": null, "value": 0.1, "values": []}, {"end": null, "name": "batch_size", "step": null, "type": "int", "count": 1, "start": null, "value": 32, "values": []}, {"end": null, "name": "dropout", "step": null, "type": "float", "count": 0, "start": null, "value": null, "values": []}, {"end": null, "name": "decay", "step": null, "type": "float", "count": 0, "start": null, "value": null, "values": []}, {"end": null, "name": "output_dir", "step": null, "type": "string", "count": 1, "start": null, "value": "/output", "values": []}, {"end": null, "name": "dataset_name", "step": null, "type": "string", "count": 1, "start": null, "value": "quiz", "values": []}, {"end": null, "name": "dataset_dir", "step": null, "type": "string", "count": 1, "start": null, "value": "/data/ai100/quiz-w7", "values": []}, {"end": null, "name": "checkpoint_path", "step": null, "type": "string", "count": 1, "start": null, "value": " /output/ckpt", "values": []}, {"end": null, "name": "model_name", "step": null, "type": "string", "count": 1, "start": null, "value": "densenet", "values": []}, {"end": null, "name": "checkpoint_exclude_scopes", "step": null, "type": "string", "count": 1, "start": null, "value": "InceptionV4/AuxLogits/Aux_logits", "values": []}, {"end": null, "name": "train_dir", "step": null, "type": "string", "count": 1, "start": null, "value": "/output/ckpt", "values": []}, {"end": null, "name": "optimizer", "step": null, "type": "string", "count": 1, "start": null, "value": "rmsprop", "values": []}, {"end": null, "name": "dataset_split_name", "step": null, "type": "string", "count": 1, "start": null, "value": "validation", "values": []}, {"end": null, "name": "eval_dir", "step": null, "type": "string", "count": 1, "start": null, "value": "/output/eval", "values": []}, {"end": null, "name": "max_num_batches", "step": null, "type": "int", "count": 1, "start": null, "value": 128, "values": []}], "data": [{"chunks": [{"name": "quiz_validation_00000of00004.tfrecord", "version": 1}, {"name": "quiz_validation_00001of00004.tfrecord", "version": 1}, {"name": "quiz_validation_00002of00004.tfrecord", "version": 1}, {"name": "quiz_validation_00003of00004.tfrecord", "version": 1}, {"name": "quiz_train_00003of00004.tfrecord", "version": 1}, {"name": "quiz_train_00002of00004.tfrecord", "version": 1}, {"name": "quiz_train_00001of00004.tfrecord", "version": 1}, {"name": "quiz_train_00000of00004.tfrecord", "version": 1}], "dataset": "ai100/quiz-w7", "dataset_id": 24}], "private": false, "star_count": 0, "use_count": 0, "parent_id": null, "parent_name": null, "updated_at": 1548055517}, "access": 5}
</script>

<script src="./w8_densenet_files/execution-ae555a0021.js.下载"></script>
  
</body></html>