<html><!-- Created using the cpp_pretty_printer from the dlib C++ library.  See http://dlib.net for updates. --><head><title>dlib C++ Library - dnn_mmod_train_find_cars_ex.cpp</title></head><body bgcolor='white'><pre>
<font color='#009900'>// The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
</font><font color='#009900'>/*
    This example shows how to train a CNN based object detector using dlib's 
    loss_mmod loss layer.  This loss layer implements the Max-Margin Object
    Detection loss as described in the paper:
        Max-Margin Object Detection by Davis E. King (http://arxiv.org/abs/1502.00046).
    This is the same loss used by the popular SVM+HOG object detector in dlib
    (see <a href="fhog_object_detector_ex.cpp.html">fhog_object_detector_ex.cpp</a>) except here we replace the HOG features
    with a CNN and train the entire detector end-to-end.  This allows us to make
    much more powerful detectors.

    It would be a good idea to become familiar with dlib's DNN tooling before reading this
    example.  So you should read <a href="dnn_introduction_ex.cpp.html">dnn_introduction_ex.cpp</a> and <a href="dnn_introduction2_ex.cpp.html">dnn_introduction2_ex.cpp</a>
    before reading this example program.  You should also read the introductory DNN+MMOD
    example <a href="dnn_mmod_ex.cpp.html">dnn_mmod_ex.cpp</a> as well before proceeding.
    

    This example is essentially a more complex version of <a href="dnn_mmod_ex.cpp.html">dnn_mmod_ex.cpp</a>.  In it we train
    a detector that finds the rear ends of motor vehicles.  I will also discuss some
    aspects of data preparation useful when training this kind of detector.  
    
*/</font>


<font color='#0000FF'>#include</font> <font color='#5555FF'>&lt;</font>iostream<font color='#5555FF'>&gt;</font>
<font color='#0000FF'>#include</font> <font color='#5555FF'>&lt;</font>dlib<font color='#5555FF'>/</font>dnn.h<font color='#5555FF'>&gt;</font>
<font color='#0000FF'>#include</font> <font color='#5555FF'>&lt;</font>dlib<font color='#5555FF'>/</font>data_io.h<font color='#5555FF'>&gt;</font>

<font color='#0000FF'>using</font> <font color='#0000FF'>namespace</font> std;
<font color='#0000FF'>using</font> <font color='#0000FF'>namespace</font> dlib;



<font color='#0000FF'>template</font> <font color='#5555FF'>&lt;</font><font color='#0000FF'><u>long</u></font> num_filters, <font color='#0000FF'>typename</font> SUBNET<font color='#5555FF'>&gt;</font> <font color='#0000FF'>using</font> con5d <font color='#5555FF'>=</font> con<font color='#5555FF'>&lt;</font>num_filters,<font color='#979000'>5</font>,<font color='#979000'>5</font>,<font color='#979000'>2</font>,<font color='#979000'>2</font>,SUBNET<font color='#5555FF'>&gt;</font>;
<font color='#0000FF'>template</font> <font color='#5555FF'>&lt;</font><font color='#0000FF'><u>long</u></font> num_filters, <font color='#0000FF'>typename</font> SUBNET<font color='#5555FF'>&gt;</font> <font color='#0000FF'>using</font> con5  <font color='#5555FF'>=</font> con<font color='#5555FF'>&lt;</font>num_filters,<font color='#979000'>5</font>,<font color='#979000'>5</font>,<font color='#979000'>1</font>,<font color='#979000'>1</font>,SUBNET<font color='#5555FF'>&gt;</font>;
<font color='#0000FF'>template</font> <font color='#5555FF'>&lt;</font><font color='#0000FF'>typename</font> SUBNET<font color='#5555FF'>&gt;</font> <font color='#0000FF'>using</font> downsampler  <font color='#5555FF'>=</font> relu<font color='#5555FF'>&lt;</font>bn_con<font color='#5555FF'>&lt;</font>con5d<font color='#5555FF'>&lt;</font><font color='#979000'>32</font>, relu<font color='#5555FF'>&lt;</font>bn_con<font color='#5555FF'>&lt;</font>con5d<font color='#5555FF'>&lt;</font><font color='#979000'>32</font>, relu<font color='#5555FF'>&lt;</font>bn_con<font color='#5555FF'>&lt;</font>con5d<font color='#5555FF'>&lt;</font><font color='#979000'>16</font>,SUBNET<font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font>;
<font color='#0000FF'>template</font> <font color='#5555FF'>&lt;</font><font color='#0000FF'>typename</font> SUBNET<font color='#5555FF'>&gt;</font> <font color='#0000FF'>using</font> rcon5  <font color='#5555FF'>=</font> relu<font color='#5555FF'>&lt;</font>bn_con<font color='#5555FF'>&lt;</font>con5<font color='#5555FF'>&lt;</font><font color='#979000'>55</font>,SUBNET<font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font>;
<font color='#0000FF'>using</font> net_type <font color='#5555FF'>=</font> loss_mmod<font color='#5555FF'>&lt;</font>con<font color='#5555FF'>&lt;</font><font color='#979000'>1</font>,<font color='#979000'>9</font>,<font color='#979000'>9</font>,<font color='#979000'>1</font>,<font color='#979000'>1</font>,rcon5<font color='#5555FF'>&lt;</font>rcon5<font color='#5555FF'>&lt;</font>rcon5<font color='#5555FF'>&lt;</font>downsampler<font color='#5555FF'>&lt;</font>input_rgb_image_pyramid<font color='#5555FF'>&lt;</font>pyramid_down<font color='#5555FF'>&lt;</font><font color='#979000'>6</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font>;


<font color='#009900'>// ----------------------------------------------------------------------------------------
</font>
<font color='#0000FF'><u>int</u></font> <b><a name='ignore_overlapped_boxes'></a>ignore_overlapped_boxes</b><font face='Lucida Console'>(</font>
    std::vector<font color='#5555FF'>&lt;</font>mmod_rect<font color='#5555FF'>&gt;</font><font color='#5555FF'>&amp;</font> boxes,
    <font color='#0000FF'>const</font> test_box_overlap<font color='#5555FF'>&amp;</font> overlaps
<font face='Lucida Console'>)</font>
<font color='#009900'>/*!
    ensures
        - Whenever two rectangles in boxes overlap, according to overlaps(), we set the
          smallest box to ignore.
        - returns the number of newly ignored boxes.
!*/</font>
<b>{</b>
    <font color='#0000FF'><u>int</u></font> num_ignored <font color='#5555FF'>=</font> <font color='#979000'>0</font>;
    <font color='#0000FF'>for</font> <font face='Lucida Console'>(</font><font color='#0000FF'><u>size_t</u></font> i <font color='#5555FF'>=</font> <font color='#979000'>0</font>; i <font color='#5555FF'>&lt;</font> boxes.<font color='#BB00BB'>size</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font>; <font color='#5555FF'>+</font><font color='#5555FF'>+</font>i<font face='Lucida Console'>)</font>
    <b>{</b>
        <font color='#0000FF'>if</font> <font face='Lucida Console'>(</font>boxes[i].ignore<font face='Lucida Console'>)</font>
            <font color='#0000FF'>continue</font>;
        <font color='#0000FF'>for</font> <font face='Lucida Console'>(</font><font color='#0000FF'><u>size_t</u></font> j <font color='#5555FF'>=</font> i<font color='#5555FF'>+</font><font color='#979000'>1</font>; j <font color='#5555FF'>&lt;</font> boxes.<font color='#BB00BB'>size</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font>; <font color='#5555FF'>+</font><font color='#5555FF'>+</font>j<font face='Lucida Console'>)</font>
        <b>{</b>
            <font color='#0000FF'>if</font> <font face='Lucida Console'>(</font>boxes[j].ignore<font face='Lucida Console'>)</font>
                <font color='#0000FF'>continue</font>;
            <font color='#0000FF'>if</font> <font face='Lucida Console'>(</font><font color='#BB00BB'>overlaps</font><font face='Lucida Console'>(</font>boxes[i], boxes[j]<font face='Lucida Console'>)</font><font face='Lucida Console'>)</font>
            <b>{</b>
                <font color='#5555FF'>+</font><font color='#5555FF'>+</font>num_ignored;
                <font color='#0000FF'>if</font><font face='Lucida Console'>(</font>boxes[i].rect.<font color='#BB00BB'>area</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font> <font color='#5555FF'>&lt;</font> boxes[j].rect.<font color='#BB00BB'>area</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font><font face='Lucida Console'>)</font>
                    boxes[i].ignore <font color='#5555FF'>=</font> <font color='#979000'>true</font>;
                <font color='#0000FF'>else</font>
                    boxes[j].ignore <font color='#5555FF'>=</font> <font color='#979000'>true</font>;
            <b>}</b>
        <b>}</b>
    <b>}</b>
    <font color='#0000FF'>return</font> num_ignored;
<b>}</b>

<font color='#009900'>// ----------------------------------------------------------------------------------------
</font>
<font color='#0000FF'><u>int</u></font> <b><a name='main'></a>main</b><font face='Lucida Console'>(</font><font color='#0000FF'><u>int</u></font> argc, <font color='#0000FF'><u>char</u></font><font color='#5555FF'>*</font><font color='#5555FF'>*</font> argv<font face='Lucida Console'>)</font> <font color='#0000FF'>try</font>
<b>{</b>
    <font color='#0000FF'>if</font> <font face='Lucida Console'>(</font>argc <font color='#5555FF'>!</font><font color='#5555FF'>=</font> <font color='#979000'>2</font><font face='Lucida Console'>)</font>
    <b>{</b>
        cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>Give the path to a folder containing training.xml and testing.xml files.</font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
        cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>This example program is specifically designed to run on the dlib vehicle </font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
        cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>detection dataset, which is available at this URL: </font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
        cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>   http://dlib.net/files/data/dlib_rear_end_vehicles_v1.tar</font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
        cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
        cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>So download that dataset, extract it somewhere, and then run this program</font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
        cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>with the dlib_rear_end_vehicles folder as an argument.  E.g. if you extract</font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
        cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>the dataset to the current folder then you should run this example program</font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
        cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>by typing: </font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
        cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>   ./dnn_mmod_train_find_cars_ex dlib_rear_end_vehicles</font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
        cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
        cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>It takes about a day to finish if run on a high end GPU like a 1080ti.</font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
        cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
        <font color='#0000FF'>return</font> <font color='#979000'>0</font>;
    <b>}</b>
    <font color='#0000FF'>const</font> std::string data_directory <font color='#5555FF'>=</font> argv[<font color='#979000'>1</font>];


    std::vector<font color='#5555FF'>&lt;</font>matrix<font color='#5555FF'>&lt;</font>rgb_pixel<font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font> images_train, images_test;
    std::vector<font color='#5555FF'>&lt;</font>std::vector<font color='#5555FF'>&lt;</font>mmod_rect<font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font> boxes_train, boxes_test;
    <font color='#BB00BB'>load_image_dataset</font><font face='Lucida Console'>(</font>images_train, boxes_train, data_directory<font color='#5555FF'>+</font>"<font color='#CC0000'>/training.xml</font>"<font face='Lucida Console'>)</font>;
    <font color='#BB00BB'>load_image_dataset</font><font face='Lucida Console'>(</font>images_test,  boxes_test,  data_directory<font color='#5555FF'>+</font>"<font color='#CC0000'>/testing.xml</font>"<font face='Lucida Console'>)</font>;

    <font color='#009900'>// When I was creating the dlib vehicle detection dataset I had to label all the cars
</font>    <font color='#009900'>// in each image.  MMOD requires all cars to be labeled, since any unlabeled part of an
</font>    <font color='#009900'>// image is implicitly assumed to be not a car, and the algorithm will use it as
</font>    <font color='#009900'>// negative training data.  So every car must be labeled, either with a normal
</font>    <font color='#009900'>// rectangle or an "ignore" rectangle that tells MMOD to simply ignore it (i.e. neither
</font>    <font color='#009900'>// treat it as a thing to detect nor as negative training data).  
</font>    <font color='#009900'>// 
</font>    <font color='#009900'>// In our present case, many images contain very tiny cars in the distance, ones that
</font>    <font color='#009900'>// are essentially just dark smudges.  It's not reasonable to expect the CNN
</font>    <font color='#009900'>// architecture we defined to detect such vehicles.  However, I erred on the side of
</font>    <font color='#009900'>// having more complete annotations when creating the dataset.  So when I labeled these
</font>    <font color='#009900'>// images I labeled many of these really difficult cases as vehicles to detect.   
</font>    <font color='#009900'>//
</font>    <font color='#009900'>// So the first thing we are going to do is clean up our dataset a little bit.  In
</font>    <font color='#009900'>// particular, we are going to mark boxes smaller than 35*35 pixels as ignore since
</font>    <font color='#009900'>// only really small and blurry cars appear at those sizes.  We will also mark boxes
</font>    <font color='#009900'>// that are heavily overlapped by another box as ignore.  We do this because we want to
</font>    <font color='#009900'>// allow for stronger non-maximum suppression logic in the learned detector, since that
</font>    <font color='#009900'>// will help make it easier to learn a good detector. 
</font>    <font color='#009900'>// 
</font>    <font color='#009900'>// To explain this non-max suppression idea further it's important to understand how
</font>    <font color='#009900'>// the detector works.  Essentially, sliding window detectors scan all image locations
</font>    <font color='#009900'>// and ask "is there a car here?".  If there really is a car in a specific location in
</font>    <font color='#009900'>// an image then usually many slightly different sliding window locations will produce
</font>    <font color='#009900'>// high detection scores, indicating that there is a car at those locations.  If we
</font>    <font color='#009900'>// just stopped there then each car would produce multiple detections.  But that isn't
</font>    <font color='#009900'>// what we want.  We want each car to produce just one detection.  So it's common for
</font>    <font color='#009900'>// detectors to include "non-maximum suppression" logic which simply takes the
</font>    <font color='#009900'>// strongest detection and then deletes all detections "close to" the strongest.  This
</font>    <font color='#009900'>// is a simple post-processing step that can eliminate duplicate detections.  However,
</font>    <font color='#009900'>// we have to define what "close to" means.  We can do this by looking at your training
</font>    <font color='#009900'>// data and checking how close the closest target boxes are to each other, and then
</font>    <font color='#009900'>// picking a "close to" measure that doesn't suppress those target boxes but is
</font>    <font color='#009900'>// otherwise as tight as possible.  This is exactly what the mmod_options object does
</font>    <font color='#009900'>// by default.
</font>    <font color='#009900'>//
</font>    <font color='#009900'>// Importantly, this means that if your training dataset contains an image with two
</font>    <font color='#009900'>// target boxes that really overlap a whole lot, then the non-maximum suppression
</font>    <font color='#009900'>// "close to" measure will be configured to allow detections to really overlap a whole
</font>    <font color='#009900'>// lot.  On the other hand, if your dataset didn't contain any overlapped boxes at all,
</font>    <font color='#009900'>// then the non-max suppression logic would be configured to filter out any boxes that
</font>    <font color='#009900'>// overlapped at all, and thus would be performing a much stronger non-max suppression.  
</font>    <font color='#009900'>//
</font>    <font color='#009900'>// Why does this matter?  Well, remember that we want to avoid duplicate detections.
</font>    <font color='#009900'>// If non-max suppression just kills everything in a really wide area around a car then
</font>    <font color='#009900'>// the CNN doesn't really need to learn anything about avoiding duplicate detections.
</font>    <font color='#009900'>// However, if non-max suppression only suppresses a tiny area around each detection
</font>    <font color='#009900'>// then the CNN will need to learn to output small detection scores for those areas of
</font>    <font color='#009900'>// the image not suppressed.  The smaller the non-max suppression region the more the
</font>    <font color='#009900'>// CNN has to learn and the more difficult the learning problem will become.  This is
</font>    <font color='#009900'>// why we remove highly overlapped objects from the training dataset.  That is, we do
</font>    <font color='#009900'>// it so the non-max suppression logic will be able to be reasonably effective.  Here
</font>    <font color='#009900'>// we are ensuring that any boxes that are entirely contained by another are
</font>    <font color='#009900'>// suppressed.  We also ensure that boxes with an intersection over union of 0.5 or
</font>    <font color='#009900'>// greater are suppressed.  This will improve the resulting detector since it will be
</font>    <font color='#009900'>// able to use more aggressive non-max suppression settings.
</font>
    <font color='#0000FF'><u>int</u></font> num_overlapped_ignored_test <font color='#5555FF'>=</font> <font color='#979000'>0</font>;
    <font color='#0000FF'>for</font> <font face='Lucida Console'>(</font><font color='#0000FF'>auto</font><font color='#5555FF'>&amp;</font> v : boxes_test<font face='Lucida Console'>)</font>
        num_overlapped_ignored_test <font color='#5555FF'>+</font><font color='#5555FF'>=</font> <font color='#BB00BB'>ignore_overlapped_boxes</font><font face='Lucida Console'>(</font>v, <font color='#BB00BB'>test_box_overlap</font><font face='Lucida Console'>(</font><font color='#979000'>0.50</font>, <font color='#979000'>0.95</font><font face='Lucida Console'>)</font><font face='Lucida Console'>)</font>;

    <font color='#0000FF'><u>int</u></font> num_overlapped_ignored <font color='#5555FF'>=</font> <font color='#979000'>0</font>;
    <font color='#0000FF'><u>int</u></font> num_additional_ignored <font color='#5555FF'>=</font> <font color='#979000'>0</font>;
    <font color='#0000FF'>for</font> <font face='Lucida Console'>(</font><font color='#0000FF'>auto</font><font color='#5555FF'>&amp;</font> v : boxes_train<font face='Lucida Console'>)</font>
    <b>{</b>
        num_overlapped_ignored <font color='#5555FF'>+</font><font color='#5555FF'>=</font> <font color='#BB00BB'>ignore_overlapped_boxes</font><font face='Lucida Console'>(</font>v, <font color='#BB00BB'>test_box_overlap</font><font face='Lucida Console'>(</font><font color='#979000'>0.50</font>, <font color='#979000'>0.95</font><font face='Lucida Console'>)</font><font face='Lucida Console'>)</font>;
        <font color='#0000FF'>for</font> <font face='Lucida Console'>(</font><font color='#0000FF'>auto</font><font color='#5555FF'>&amp;</font> bb : v<font face='Lucida Console'>)</font>
        <b>{</b>
            <font color='#0000FF'>if</font> <font face='Lucida Console'>(</font>bb.rect.<font color='#BB00BB'>width</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font> <font color='#5555FF'>&lt;</font> <font color='#979000'>35</font> <font color='#5555FF'>&amp;</font><font color='#5555FF'>&amp;</font> bb.rect.<font color='#BB00BB'>height</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font> <font color='#5555FF'>&lt;</font> <font color='#979000'>35</font><font face='Lucida Console'>)</font>
            <b>{</b>
                <font color='#0000FF'>if</font> <font face='Lucida Console'>(</font><font color='#5555FF'>!</font>bb.ignore<font face='Lucida Console'>)</font>
                <b>{</b>
                    bb.ignore <font color='#5555FF'>=</font> <font color='#979000'>true</font>;
                    <font color='#5555FF'>+</font><font color='#5555FF'>+</font>num_additional_ignored;
                <b>}</b>
            <b>}</b>

            <font color='#009900'>// The dlib vehicle detection dataset doesn't contain any detections with
</font>            <font color='#009900'>// really extreme aspect ratios.  However, some datasets do, often because of
</font>            <font color='#009900'>// bad labeling.  So it's a good idea to check for that and either eliminate
</font>            <font color='#009900'>// those boxes or set them to ignore.  Although, this depends on your
</font>            <font color='#009900'>// application.  
</font>            <font color='#009900'>// 
</font>            <font color='#009900'>// For instance, if your dataset has boxes with an aspect ratio
</font>            <font color='#009900'>// of 10 then you should think about what that means for the network
</font>            <font color='#009900'>// architecture.  Does the receptive field even cover the entirety of the box
</font>            <font color='#009900'>// in those cases?  Do you care about these boxes?  Are they labeling errors?
</font>            <font color='#009900'>// I find that many people will download some dataset from the internet and
</font>            <font color='#009900'>// just take it as given.  They run it through some training algorithm and take
</font>            <font color='#009900'>// the dataset as unchallengeable truth.  But many datasets are full of
</font>            <font color='#009900'>// labeling errors.  There are also a lot of datasets that aren't full of
</font>            <font color='#009900'>// errors, but are annotated in a sloppy and inconsistent way.  Fixing those
</font>            <font color='#009900'>// errors and inconsistencies can often greatly improve models trained from
</font>            <font color='#009900'>// such data.  It's almost always worth the time to try and improve your
</font>            <font color='#009900'>// training dataset.   
</font>            <font color='#009900'>//
</font>            <font color='#009900'>// In any case, my point is that there are other types of dataset cleaning you
</font>            <font color='#009900'>// could put here.  What exactly you need depends on your application.  But you
</font>            <font color='#009900'>// should carefully consider it and not take your dataset as a given.  The work
</font>            <font color='#009900'>// of creating a good detector is largely about creating a high quality
</font>            <font color='#009900'>// training dataset.  
</font>        <b>}</b>
    <b>}</b>

    <font color='#009900'>// When modifying a dataset like this, it's a really good idea to print a log of how
</font>    <font color='#009900'>// many boxes you ignored.  It's easy to accidentally ignore a huge block of data, so
</font>    <font color='#009900'>// you should always look and see that things are doing what you expect.
</font>    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>num_overlapped_ignored: </font>"<font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> num_overlapped_ignored <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>num_additional_ignored: </font>"<font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> num_additional_ignored <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>num_overlapped_ignored_test: </font>"<font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> num_overlapped_ignored_test <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;


    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>num training images: </font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> images_train.<font color='#BB00BB'>size</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font> <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>num testing images: </font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> images_test.<font color='#BB00BB'>size</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font> <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;


    <font color='#009900'>// Our vehicle detection dataset has basically 3 different types of boxes.  Square
</font>    <font color='#009900'>// boxes, tall and skinny boxes (e.g. semi trucks), and short and wide boxes (e.g.
</font>    <font color='#009900'>// sedans).  Here we are telling the MMOD algorithm that a vehicle is recognizable as
</font>    <font color='#009900'>// long as the longest box side is at least 70 pixels long and the shortest box side is
</font>    <font color='#009900'>// at least 30 pixels long.  mmod_options will use these parameters to decide how large
</font>    <font color='#009900'>// each of the sliding windows needs to be so as to be able to detect all the vehicles.
</font>    <font color='#009900'>// Since our dataset has basically these 3 different aspect ratios, it will decide to
</font>    <font color='#009900'>// use 3 different sliding windows.  This means the final con layer in the network will
</font>    <font color='#009900'>// have 3 filters, one for each of these aspect ratios. 
</font>    <font color='#009900'>//
</font>    <font color='#009900'>// Another thing to consider when setting the sliding window size is the "stride" of
</font>    <font color='#009900'>// your network.  The network we defined above downsamples the image by a factor of 8x
</font>    <font color='#009900'>// in the first few layers.  So when the sliding windows are scanning the image, they
</font>    <font color='#009900'>// are stepping over it with a stride of 8 pixels.  If you set the sliding window size
</font>    <font color='#009900'>// too small then the stride will become an issue.  For instance, if you set the
</font>    <font color='#009900'>// sliding window size to 4 pixels, then it means a 4x4 window will be moved by 8
</font>    <font color='#009900'>// pixels at a time when scanning. This is obviously a problem since 75% of the image
</font>    <font color='#009900'>// won't even be visited by the sliding window.  So you need to set the window size to
</font>    <font color='#009900'>// be big enough relative to the stride of your network.  In our case, the windows are
</font>    <font color='#009900'>// at least 30 pixels in length, so being moved by 8 pixel steps is fine. 
</font>    mmod_options <font color='#BB00BB'>options</font><font face='Lucida Console'>(</font>boxes_train, <font color='#979000'>70</font>, <font color='#979000'>30</font><font face='Lucida Console'>)</font>;


    <font color='#009900'>// This setting is very important and dataset specific.  The vehicle detection dataset
</font>    <font color='#009900'>// contains boxes that are marked as "ignore", as we discussed above.  Some of them are
</font>    <font color='#009900'>// ignored because we set ignore to true in the above code.  However, the xml files
</font>    <font color='#009900'>// also contained a lot of ignore boxes.  Some of them are large boxes that encompass
</font>    <font color='#009900'>// large parts of an image and the intention is to have everything inside those boxes
</font>    <font color='#009900'>// be ignored.  Therefore, we need to tell the MMOD algorithm to do that, which we do
</font>    <font color='#009900'>// by setting options.overlaps_ignore appropriately.  
</font>    <font color='#009900'>// 
</font>    <font color='#009900'>// But first, we need to understand exactly what this option does.  The MMOD loss
</font>    <font color='#009900'>// is essentially counting the number of false alarms + missed detections produced by
</font>    <font color='#009900'>// the detector for each image.  During training, the code is running the detector on
</font>    <font color='#009900'>// each image in a mini-batch and looking at its output and counting the number of
</font>    <font color='#009900'>// mistakes.  The optimizer tries to find parameters settings that minimize the number
</font>    <font color='#009900'>// of detector mistakes.
</font>    <font color='#009900'>// 
</font>    <font color='#009900'>// This overlaps_ignore option allows you to tell the loss that some outputs from the
</font>    <font color='#009900'>// detector should be totally ignored, as if they never happened.  In particular, if a
</font>    <font color='#009900'>// detection overlaps a box in the training data with ignore==true then that detection
</font>    <font color='#009900'>// is ignored.  This overlap is determined by calling
</font>    <font color='#009900'>// options.overlaps_ignore(the_detection, the_ignored_training_box).  If it returns
</font>    <font color='#009900'>// true then that detection is ignored.
</font>    <font color='#009900'>// 
</font>    <font color='#009900'>// You should read the documentation for test_box_overlap, the class type for
</font>    <font color='#009900'>// overlaps_ignore for full details.  However, the gist is that the default behavior is
</font>    <font color='#009900'>// to only consider boxes as overlapping if their intersection over union is &gt; 0.5.
</font>    <font color='#009900'>// However, the dlib vehicle detection dataset contains large boxes that are meant to
</font>    <font color='#009900'>// mask out large areas of an image.  So intersection over union isn't an appropriate
</font>    <font color='#009900'>// way to measure "overlaps with box" in this case.  We want any box that is contained
</font>    <font color='#009900'>// inside one of these big regions to be ignored, even if the detection box is really
</font>    <font color='#009900'>// small.  So we set overlaps_ignore to behave that way with this line.
</font>    options.overlaps_ignore <font color='#5555FF'>=</font> <font color='#BB00BB'>test_box_overlap</font><font face='Lucida Console'>(</font><font color='#979000'>0.5</font>, <font color='#979000'>0.95</font><font face='Lucida Console'>)</font>;

    net_type <font color='#BB00BB'>net</font><font face='Lucida Console'>(</font>options<font face='Lucida Console'>)</font>;

    <font color='#009900'>// The final layer of the network must be a con layer that contains 
</font>    <font color='#009900'>// options.detector_windows.size() filters.  This is because these final filters are
</font>    <font color='#009900'>// what perform the final "sliding window" detection in the network.  For the dlib
</font>    <font color='#009900'>// vehicle dataset, there will be 3 sliding window detectors, so we will be setting
</font>    <font color='#009900'>// num_filters to 3 here.
</font>    net.<font color='#BB00BB'>subnet</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font>.<font color='#BB00BB'>layer_details</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font>.<font color='#BB00BB'>set_num_filters</font><font face='Lucida Console'>(</font>options.detector_windows.<font color='#BB00BB'>size</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font><font face='Lucida Console'>)</font>;


    dnn_trainer<font color='#5555FF'>&lt;</font>net_type<font color='#5555FF'>&gt;</font> <font color='#BB00BB'>trainer</font><font face='Lucida Console'>(</font>net,<font color='#BB00BB'>sgd</font><font face='Lucida Console'>(</font><font color='#979000'>0.0001</font>,<font color='#979000'>0.9</font><font face='Lucida Console'>)</font><font face='Lucida Console'>)</font>;
    trainer.<font color='#BB00BB'>set_learning_rate</font><font face='Lucida Console'>(</font><font color='#979000'>0.1</font><font face='Lucida Console'>)</font>;
    trainer.<font color='#BB00BB'>be_verbose</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font>;


    <font color='#009900'>// While training, we are going to use early stopping.  That is, we will be checking
</font>    <font color='#009900'>// how good the detector is performing on our test data and when it stops getting
</font>    <font color='#009900'>// better on the test data we will drop the learning rate.  We will keep doing that
</font>    <font color='#009900'>// until the learning rate is less than 1e-4.   These two settings tell the trainer to
</font>    <font color='#009900'>// do that.  Essentially, we are setting the first argument to infinity, and only the
</font>    <font color='#009900'>// test iterations without progress threshold will matter.  In particular, it says that
</font>    <font color='#009900'>// once we observe 1000 testing mini-batches where the test loss clearly isn't
</font>    <font color='#009900'>// decreasing we will lower the learning rate.
</font>    trainer.<font color='#BB00BB'>set_iterations_without_progress_threshold</font><font face='Lucida Console'>(</font><font color='#979000'>50000</font><font face='Lucida Console'>)</font>;
    trainer.<font color='#BB00BB'>set_test_iterations_without_progress_threshold</font><font face='Lucida Console'>(</font><font color='#979000'>1000</font><font face='Lucida Console'>)</font>;

    <font color='#0000FF'>const</font> string sync_filename <font color='#5555FF'>=</font> "<font color='#CC0000'>mmod_cars_sync</font>";
    trainer.<font color='#BB00BB'>set_synchronization_file</font><font face='Lucida Console'>(</font>sync_filename, std::chrono::<font color='#BB00BB'>minutes</font><font face='Lucida Console'>(</font><font color='#979000'>5</font><font face='Lucida Console'>)</font><font face='Lucida Console'>)</font>;




    std::vector<font color='#5555FF'>&lt;</font>matrix<font color='#5555FF'>&lt;</font>rgb_pixel<font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font> mini_batch_samples;
    std::vector<font color='#5555FF'>&lt;</font>std::vector<font color='#5555FF'>&lt;</font>mmod_rect<font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font> mini_batch_labels; 
    random_cropper cropper;
    cropper.<font color='#BB00BB'>set_seed</font><font face='Lucida Console'>(</font><font color='#BB00BB'>time</font><font face='Lucida Console'>(</font><font color='#979000'>0</font><font face='Lucida Console'>)</font><font face='Lucida Console'>)</font>;
    cropper.<font color='#BB00BB'>set_chip_dims</font><font face='Lucida Console'>(</font><font color='#979000'>350</font>, <font color='#979000'>350</font><font face='Lucida Console'>)</font>;
    <font color='#009900'>// Usually you want to give the cropper whatever min sizes you passed to the
</font>    <font color='#009900'>// mmod_options constructor, or very slightly smaller sizes, which is what we do here.
</font>    cropper.<font color='#BB00BB'>set_min_object_size</font><font face='Lucida Console'>(</font><font color='#979000'>69</font>,<font color='#979000'>28</font><font face='Lucida Console'>)</font>; 
    cropper.<font color='#BB00BB'>set_max_rotation_degrees</font><font face='Lucida Console'>(</font><font color='#979000'>2</font><font face='Lucida Console'>)</font>;
    dlib::rand rnd;

    <font color='#009900'>// Log the training parameters to the console
</font>    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> trainer <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> cropper <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;

    <font color='#0000FF'><u>int</u></font> cnt <font color='#5555FF'>=</font> <font color='#979000'>1</font>;
    <font color='#009900'>// Run the trainer until the learning rate gets small.  
</font>    <font color='#0000FF'>while</font><font face='Lucida Console'>(</font>trainer.<font color='#BB00BB'>get_learning_rate</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font> <font color='#5555FF'>&gt;</font><font color='#5555FF'>=</font> <font color='#979000'>1e</font><font color='#5555FF'>-</font><font color='#979000'>4</font><font face='Lucida Console'>)</font>
    <b>{</b>
        <font color='#009900'>// Every 30 mini-batches we do a testing mini-batch.  
</font>        <font color='#0000FF'>if</font> <font face='Lucida Console'>(</font>cnt<font color='#5555FF'>%</font><font color='#979000'>30</font> <font color='#5555FF'>!</font><font color='#5555FF'>=</font> <font color='#979000'>0</font> <font color='#5555FF'>|</font><font color='#5555FF'>|</font> images_test.<font color='#BB00BB'>size</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font> <font color='#5555FF'>=</font><font color='#5555FF'>=</font> <font color='#979000'>0</font><font face='Lucida Console'>)</font>
        <b>{</b>
            <font color='#BB00BB'>cropper</font><font face='Lucida Console'>(</font><font color='#979000'>87</font>, images_train, boxes_train, mini_batch_samples, mini_batch_labels<font face='Lucida Console'>)</font>;
            <font color='#009900'>// We can also randomly jitter the colors and that often helps a detector
</font>            <font color='#009900'>// generalize better to new images.
</font>            <font color='#0000FF'>for</font> <font face='Lucida Console'>(</font><font color='#0000FF'>auto</font><font color='#5555FF'>&amp;</font><font color='#5555FF'>&amp;</font> img : mini_batch_samples<font face='Lucida Console'>)</font>
                <font color='#BB00BB'>disturb_colors</font><font face='Lucida Console'>(</font>img, rnd<font face='Lucida Console'>)</font>;

            <font color='#009900'>// It's a good idea to, at least once, put code here that displays the images
</font>            <font color='#009900'>// and boxes the random cropper is generating.  You should look at them and
</font>            <font color='#009900'>// think about if the output makes sense for your problem.  Most of the time
</font>            <font color='#009900'>// it will be fine, but sometimes you will realize that the pattern of cropping
</font>            <font color='#009900'>// isn't really appropriate for your problem and you will need to make some
</font>            <font color='#009900'>// change to how the mini-batches are being generated.  Maybe you will tweak
</font>            <font color='#009900'>// some of the cropper's settings, or write your own entirely separate code to
</font>            <font color='#009900'>// create mini-batches.  But either way, if you don't look you will never know.
</font>            <font color='#009900'>// An easy way to do this is to create a dlib::image_window to display the
</font>            <font color='#009900'>// images and boxes.
</font>
            trainer.<font color='#BB00BB'>train_one_step</font><font face='Lucida Console'>(</font>mini_batch_samples, mini_batch_labels<font face='Lucida Console'>)</font>;
        <b>}</b>
        <font color='#0000FF'>else</font>
        <b>{</b>
            <font color='#BB00BB'>cropper</font><font face='Lucida Console'>(</font><font color='#979000'>87</font>, images_test, boxes_test, mini_batch_samples, mini_batch_labels<font face='Lucida Console'>)</font>;
            <font color='#009900'>// We can also randomly jitter the colors and that often helps a detector
</font>            <font color='#009900'>// generalize better to new images.
</font>            <font color='#0000FF'>for</font> <font face='Lucida Console'>(</font><font color='#0000FF'>auto</font><font color='#5555FF'>&amp;</font><font color='#5555FF'>&amp;</font> img : mini_batch_samples<font face='Lucida Console'>)</font>
                <font color='#BB00BB'>disturb_colors</font><font face='Lucida Console'>(</font>img, rnd<font face='Lucida Console'>)</font>;

            trainer.<font color='#BB00BB'>test_one_step</font><font face='Lucida Console'>(</font>mini_batch_samples, mini_batch_labels<font face='Lucida Console'>)</font>;
        <b>}</b>
        <font color='#5555FF'>+</font><font color='#5555FF'>+</font>cnt;
    <b>}</b>
    <font color='#009900'>// wait for training threads to stop
</font>    trainer.<font color='#BB00BB'>get_net</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font>;
    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>done training</font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;

    <font color='#009900'>// Save the network to disk
</font>    net.<font color='#BB00BB'>clean</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font>;
    <font color='#BB00BB'>serialize</font><font face='Lucida Console'>(</font>"<font color='#CC0000'>mmod_rear_end_vehicle_detector.dat</font>"<font face='Lucida Console'>)</font> <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> net;


    <font color='#009900'>// It's a really good idea to print the training parameters.  This is because you will
</font>    <font color='#009900'>// invariably be running multiple rounds of training and should be logging the output
</font>    <font color='#009900'>// to a file.  This print statement will include many of the training parameters in
</font>    <font color='#009900'>// your log.
</font>    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> trainer <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> cropper <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;

    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>\nsync_filename: </font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> sync_filename <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>num training images: </font>"<font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> images_train.<font color='#BB00BB'>size</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font> <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>training results: </font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> <font color='#BB00BB'>test_object_detection_function</font><font face='Lucida Console'>(</font>net, images_train, boxes_train, <font color='#BB00BB'>test_box_overlap</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font>, <font color='#979000'>0</font>, options.overlaps_ignore<font face='Lucida Console'>)</font>;
    <font color='#009900'>// Upsampling the data will allow the detector to find smaller cars.  Recall that 
</font>    <font color='#009900'>// we configured it to use a sliding window nominally 70 pixels in size.  So upsampling
</font>    <font color='#009900'>// here will let it find things nominally 35 pixels in size.  Although we include a
</font>    <font color='#009900'>// limit of 1800*1800 here which means "don't upsample an image if it's already larger
</font>    <font color='#009900'>// than 1800*1800".  We do this so we don't run out of RAM, which is a concern because
</font>    <font color='#009900'>// some of the images in the dlib vehicle dataset are really high resolution.
</font>    upsample_image_dataset<font color='#5555FF'>&lt;</font>pyramid_down<font color='#5555FF'>&lt;</font><font color='#979000'>2</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font face='Lucida Console'>(</font>images_train, boxes_train, <font color='#979000'>1800</font><font color='#5555FF'>*</font><font color='#979000'>1800</font><font face='Lucida Console'>)</font>;
    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>training upsampled results: </font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> <font color='#BB00BB'>test_object_detection_function</font><font face='Lucida Console'>(</font>net, images_train, boxes_train, <font color='#BB00BB'>test_box_overlap</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font>, <font color='#979000'>0</font>, options.overlaps_ignore<font face='Lucida Console'>)</font>;


    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>num testing images: </font>"<font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> images_test.<font color='#BB00BB'>size</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font> <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>testing results: </font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> <font color='#BB00BB'>test_object_detection_function</font><font face='Lucida Console'>(</font>net, images_test, boxes_test, <font color='#BB00BB'>test_box_overlap</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font>, <font color='#979000'>0</font>, options.overlaps_ignore<font face='Lucida Console'>)</font>;
    upsample_image_dataset<font color='#5555FF'>&lt;</font>pyramid_down<font color='#5555FF'>&lt;</font><font color='#979000'>2</font><font color='#5555FF'>&gt;</font><font color='#5555FF'>&gt;</font><font face='Lucida Console'>(</font>images_test, boxes_test, <font color='#979000'>1800</font><font color='#5555FF'>*</font><font color='#979000'>1800</font><font face='Lucida Console'>)</font>;
    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> "<font color='#CC0000'>testing upsampled results: </font>" <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> <font color='#BB00BB'>test_object_detection_function</font><font face='Lucida Console'>(</font>net, images_test, boxes_test, <font color='#BB00BB'>test_box_overlap</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font>, <font color='#979000'>0</font>, options.overlaps_ignore<font face='Lucida Console'>)</font>;

    <font color='#009900'>/*
        This program takes many hours to execute on a high end GPU.  It took about a day to
        train on a NVIDIA 1080ti.  The resulting model file is available at
            http://dlib.net/files/mmod_rear_end_vehicle_detector.dat.bz2
        It should be noted that this file on dlib.net has a dlib::shape_predictor appended
        onto the end of it (see <a href="dnn_mmod_find_cars_ex.cpp.html">dnn_mmod_find_cars_ex.cpp</a> for an example of its use).  This
        explains why the model file on dlib.net is larger than the
        mmod_rear_end_vehicle_detector.dat output by this program.

        You can see some videos of this vehicle detector running on YouTube:
            https://www.youtube.com/watch?v=4B3bzmxMAZU
            https://www.youtube.com/watch?v=bP2SUo5vSlc

        Also, the training and testing accuracies were:
            num training images: 2217
            training results: 0.990738 0.736431 0.736073 
            training upsampled results: 0.986837 0.937694 0.936912 
            num testing images: 135
            testing results: 0.988827 0.471372 0.470806 
            testing upsampled results: 0.987879 0.651132 0.650399 
    */</font>

    <font color='#0000FF'>return</font> <font color='#979000'>0</font>;

<b>}</b>
<font color='#0000FF'>catch</font><font face='Lucida Console'>(</font>std::exception<font color='#5555FF'>&amp;</font> e<font face='Lucida Console'>)</font>
<b>{</b>
    cout <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> e.<font color='#BB00BB'>what</font><font face='Lucida Console'>(</font><font face='Lucida Console'>)</font> <font color='#5555FF'>&lt;</font><font color='#5555FF'>&lt;</font> endl;
<b>}</b>





</pre></body></html>