<!DOCTYPE html>
<html>

<head>
    <meta charset="utf-8">
    <meta name="description" content="Panoptic Lifting for 3D Scene Understanding with Neural Fields">
    <meta name="keywords" content="Panoptic Lifting, Segmentation, NeRF, Panoptic, Semantic, Instance">
    <meta name="viewport" content="width=device-width, initial-scale=1">
    <title>Panoptic Lifting</title>

    <!-- Global site tag (gtag.js) - Google Analytics -->
    <link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro" rel="stylesheet">

    <link rel="stylesheet" href="./static/css/bulma.min.css">
    <link rel="stylesheet" href="./static/css/bulma-carousel.min.css">
    <link rel="stylesheet" href="./static/css/bulma-slider.min.css">
    <link rel="stylesheet" href="./static/css/fontawesome.all.min.css">
    <link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
    <link rel="stylesheet" href="./static/css/index.css">

    <style>
		.render_wrapper {
			position: relative;
            height: 180px;
         }
        .render_wrapper_small {
			position: relative;
         }
		.render_div {
			position: absolute;
			top: 0;
			left: 0;
		}
        .roundbox {
            background-color: #a0a0b5;
            height: 555px;
            border-radius: 8px;
            writing-mode: vertical-rl;
            text-orientation: mixed;
            text-align: center;
            width: 100%;
            vertical-align: middle;
            font-weight: bold;
            font-size: large;
            color: white;
        }
        #interpolation-image-wrapper-car{
            text-align: center;
        }
        #interpolation-image-wrapper-chair{
            text-align: center;
        }
        .nested-columns {
            margin-bottom: 0 !important;
        }

        .interp_image_wrapper {
            width: 100%; 
            justify-content: center; 
            display: flex;
        }
        .glowing-border {
            border: 2px solid #dadada;
            border-radius: 7px;
        }

        .glowing-border:focus {
            outline: none;
            border-color: #1f191a;
            box-shadow: 0 0 10px #1f191a;
        }
    </style>

    <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
    <script defer src="./static/js/fontawesome.all.min.js"></script>
    <script src="./static/js/bulma-carousel.min.js"></script>
    <script src="./static/js/bulma-slider.min.js"></script>
    <script src="./static/js/index.js"></script>
</head>

<body>

    <section class="hero">
        <div class="hero-body">
            <div class="container is-max-desktop">
                <div class="columns is-centered">
                    <div class="column has-text-centered">
                        <h1 class="title is-1 publication-title">Panoptic Lifting for 3D Scene Understanding with Neural Fields</h1>
			<h2 class="is-size-3">CVPR 2023 Highlight</h2>
                        <div class="is-size-5 publication-authors">
                            <span class="author-block">
                                <a href="https://niessnerlab.org/members/yawar_siddiqui/profile.html">Yawar Siddiqui</a><sup>1,2</sup>,</span>
                            <span class="author-block">
                                <a href="https://scholar.google.com/citations?user=vW1gaVEAAAAJ">Lorenzo Porzi</a><sup>2</sup>,
                            </span>
                            <span class="author-block">
                                <a href="https://scholar.google.com/citations?hl=de&user=484sccEAAAAJ">Samuel Rota Bulò</a><sup>2</sup>,
                            </span>
                            <span class="author-block">
                                <a href="https://niessnerlab.org/members/norman_mueller/profile.html">Norman Müller</a><sup>1,2</sup>,
                            </span>
                            <span class="author-block">
                                <a href="https://niessnerlab.org/members/matthias_niessner/profile.html">Matthias Nießner</a><sup>1</sup>,
                            </span>
                            <span class="author-block">
                                <a href="https://www.3dunderstanding.org/team.html">Angela Dai</a><sup>1</sup>,
                            </span>
                            <span class="author-block">
                                <a href="https://scholar.google.com/citations?user=CxbDDRMAAAAJ&hl=en">Peter Kontschieder</a><sup>2</sup>
                            </span>
                        </div>

                        <div class="is-size-5 publication-authors">
                            <span class="author-block"><sup>1</sup>Technical University of Munich,</span>
                            <span class="author-block"><sup>2</sup>Meta Reality Labs</span>
                        </div>
                        <div class="is-size-6 publication-authors">
                            (Work was done during Yawar’s and Norman’s internships at Meta Reality Labs Zurich as well as at TUM)
                        </div>

                        <div class="column has-text-centered">
                            <div class="publication-links">
                                <!-- PDF Link. -->
                                <span class="link-block">
                                    <a href="static/PanopticLifting.pdf"
                                        class="external-link button is-normal is-rounded is-dark">
                                        <span class="icon">
                                            <i class="fas fa-file-pdf"></i>
                                        </span>
                                        <span>Paper</span>
                                    </a>
                                </span>
                                <span class="link-block">
                                    <a class="external-link button is-normal is-rounded is-dark" href="https://arxiv.org/abs/2212.09802">
                                        <span class="icon">
                                            <i class="ai ai-arxiv"></i>
                                        </span>
                                        <span>arXiv</span>
                                    </a>
                                </span>
                                <!-- Video Link. -->
                                <span class="link-block">
                                    <a href="https://youtu.be/QtsiL-6rSuM"
                                        class="external-link button is-normal is-rounded is-dark">
                                        <span class="icon">
                                            <svg class="svg-inline--fa fa-youtube fa-w-18" aria-hidden="true"
                                                focusable="false" data-prefix="fab" data-icon="youtube" role="img"
                                                xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"
                                                data-fa-i2svg="">
                                                <path fill="currentColor"
                                                    d="M549.655 124.083c-6.281-23.65-24.787-42.276-48.284-48.597C458.781 64 288 64 288 64S117.22 64 74.629 75.486c-23.497 6.322-42.003 24.947-48.284 48.597-11.412 42.867-11.412 132.305-11.412 132.305s0 89.438 11.412 132.305c6.281 23.65 24.787 41.5 48.284 47.821C117.22 448 288 448 288 448s170.78 0 213.371-11.486c23.497-6.321 42.003-24.171 48.284-47.821 11.412-42.867 11.412-132.305 11.412-132.305s0-89.438-11.412-132.305zm-317.51 213.508V175.185l142.739 81.205-142.739 81.201z">
                                                </path>
                                            </svg><!-- <i class="fab fa-youtube"></i> Font Awesome fontawesome.com -->
                                        </span>
                                        <span>Video</span>
                                    </a>
                                </span>


                                <!-- Github Link. -->
                                <span class="link-block">
                                    <a href="https://github.com/nihalsid/panoptic-lifting" class="external-link button is-normal is-rounded is-dark">
                                        <span class="icon">
                                            <i class="fab fa-github"></i>
                                        </span>
                                        <span>Code</span>
                                    </a>
                                </span>
                                <!-- Dataset Link. -->
                                <span class="link-block"></span>
                            </div>
                        </div>
                    </div>
                </div>
            </div>
        </div>
    </section>

    <section class="hero teaser is-hidden-touch">
        <div class="container is-max-desktop">
            <div class="hero-body">
                <div class="columns">
                    <div class="column is-4" style="display: flex; justify-content: center; align-items: center; padding-top: 0; padding-bottom: 0">
                        <img src="./static/teaser/input_stack.jpg" style="width: 100%; height: fit-content;"/>
                    </div>
                    <div class="column is-2" style="display: flex; justify-content: center; align-items: center; padding-right: 0">
                        <img src="./static/teaser/arrow.jpg" style="width: 100%; height: fit-content;"/>
                    </div>
                    <div class="column is-6" style="height: 325px; padding: 0;">
                        <div style="display: flex; height: 325px; justify-content: center;">
                            <video poster="" autoplay="" controls="" muted="" loop="" style="border-color: #1c1c1c; border-style: solid;">
                                <source src="./static/teaser/teaser.mp4" type="video/mp4">
                            </video>
                        </div>
                    </div>
                </div>
                <h2 class=" subtitle has-text-centered" style="padding-top: 10px">
                    Given only RGB images of an in-the-wild scene as input, <span class="dnerf">Panoptic Lifting</span> optimizes a panoptic radiance field which can be queried for color, depth, semantics, and instances for any point in space. Our method lifts noisy and view-inconsistent machine generated 2D segmentation masks into a consistent 3D panoptic radiance field, without requiring further tracking supervision or 3D bounding boxes.
                </h2>
            </div>
        </div>
    </section>


    <section class="hero is-light is-small is-hidden-touch">
        <div class="hero-body">
            <div class="container" id="office-results">
                <div class="columns is-vcentered">
                    <div class="column is-3">
                        <div class="interp_image_wrapper">
                            <div class="interp_image" id="nearest_color" style="height: 192px; width:256px; background-image: url('./static/slider/nearest.jpg');">
                            </div>
                        </div>
                        <div class="interp_image_wrapper" style="margin-top: 5px;">
                            <div class="interp_image" id="nearest_semantics" style="height: 192px; width:256px; background-image: url('./static/slider/nearest.jpg');">
                            </div>
                        </div>
                        <div class="interp_image_wrapper" style="margin-top: 5px;">
                            <div class="interp_image" id="nearest_instance" style="height: 192px; width:256px; background-image: url('./static/slider/nearest.jpg');">
                            </div>
                        </div>
                    </div>
                    <div class="column columns nested-columns is-vcentered is-4">
                        <div class="column is-2">
                            <div class="item rounded roundbox">
                                <span style="right: 25%; position:relative;">Neural Field Optimization</span>
                            </div>
                        </div>
                        <div class="column is-8">
                            <div class="item item-steve render_wrapper" style="margin-bottom: 5px">
                                <div id="mesh_color" class="render_div"></div>
                            </div>
                            <div class="item item-steve render_wrapper" style="margin-bottom: 5px">
                                <div id="mesh_semantics" class="render_div"></div>
                            </div>
                            <div class="item item-steve render_wrapper">
                                <div id="mesh_instance" class="render_div"></div>
                            </div>
                        </div>
                        <div class="column is-2">
                            <div class="item roundbox">
                                <span style="right: 25%; position:relative;">Volumetric Rendering</span>
                            </div>
                        </div>

                    </div>
                    <div class="column is-5">
                        <div class="columns">
                            <div class="column interp_image_wrapper" style="padding: 0px 0px 0px 0px">
                                <div class="interp_image" id="ours_color" style="height: 192px; width:256px; background-image: url('./static/slider/ours.jpg');">
                                </div>
                            </div>
                            <div class="column interp_image_wrapper" style="padding: 0px 0px 0px 0px">
                                <div class="interp_image" id="ours_depth" style="height: 192px; width:256px; background-image: url('./static/slider/ours.jpg'); background-position: top -192px left 0px;">
                                </div>
                            </div>
                        </div>
                        <div class="columns" style="margin-top: 10px;">
                            <div class="column interp_image_wrapper" style="padding: 0px 0px 0px 0px">
                                <div class="interp_image" id="ours_semantics" style="height: 192px; width:256px; background-image: url('./static/slider/ours.jpg'); background-position: top -384px left 0px;">
                                </div>
                            </div>
                            <div class="column interp_image_wrapper" style="padding: 0px 0px 0px 0px">
                                <div class="interp_image" id="ours_instance" style="height: 192px; width:256px; background-image: url('./static/slider/ours.jpg'); background-position: top -576px left 0px;">
                                </div>
                            </div>
                        </div>
                    </div>
                </div>
                <div class="columns is-vcentered">
                    <div class="column is-3" style="text-align: center">
                        Nearest RGB frames and their machine generated 2D semantic and instance labels
                    </div>
                    <div class="column is-4" style="text-align: center">
                        Optimized panoptic field representation, i.e. color, semantics, and instances with the queried viewpoint (shown as a camera)
                    </div>
                    <div class="column is-5" style="text-align: center">
                        Rendered color, depth, semantics and instances from novel viewpoint
                    </div>
                </div>
            </div>
            <div class="container">
                <input class="slider is-fullwidth is-large is-info" id="interpolation-slider" step="1" min="0" max="419" value="253" type="range"/>
            </div>
            <div class="container">
                <div style="text-align: center;">Use the slider to move across a novel view trajectory along the scene. For a selected viewpoint along the trajectory, the nearest input frames and their machine generated panoptics are shown on the left. The optimized volumetric representation is shown as a mesh in the middle along with the viewpoint. Novel view panoptics obtained by our method are shown on the right. Press <b>R</b> to reset mesh view. </div>
            </div>
        </div>
    </section>


    <section class="section">
        <div class="container is-max-desktop">
            <!-- Abstract. -->
            <div class="columns is-centered has-text-centered">
                <div class="column is-four-fifths">
                    <h2 class="title is-3">Abstract</h2>
                    <div class="content has-text-justified">
                        <p>
                        We propose Panoptic Lifting, a novel approach for learning panoptic 3D volumetric representations from images of in-the-wild scenes.
                        Once trained, our model can render color images together with 3D-consistent panoptic segmentation from novel viewpoints.
                        </p>
                        <p>
                        Unlike existing approaches which use 3D input directly or indirectly, our method requires only machine-generated 2D panoptic segmentation masks inferred from a pre-trained network.
                        Our core contribution is a panoptic lifting scheme based on a neural field representation that generates a unified and multi-view consistent, 3D panoptic representation of the scene.
                        To account for inconsistencies of 2D instance identifiers across views, we solve a linear assignment with a cost based on the model's current predictions and the machine-generated segmentation masks, thus enabling us to lift 2D instances to 3D in a consistent way.
                        We further propose and ablate contributions that make our method more robust to noisy, machine-generated labels, including test-time augmentations for confidence estimates, segment consistency loss, bounded segmentation fields, and gradient stopping.
                        </p>
                        <p>
                            Experimental results validate our approach on the challenging Hypersim, Replica, and ScanNet datasets, improving by 8.4, 13.8, and 10.6% in scene-level PQ over state of the art.
                        </p>
                    </div>
                </div>
            </div>
            <!-- Paper video. -->
            <div class="columns is-centered has-text-centered">
                <div class="column is-four-fifths">
                    <h2 class="title is-3">Video</h2>
                    <div class="publication-video">
                    <iframe src="https://youtube.com/embed/QtsiL-6rSuM" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
                    </div>
                </div>
            </div>
            <!--/ Paper video. -->
        </div>
    </section>


    <section class="section">
        <div class="container is-max-desktop">

            <h3 class="title is-4">Results</h3>
            <div class="content has-text-justified">
                <section class="hero" style="overflow: hidden;">
                    <div id="carousel-demo" class="hero-carousel">
                        <div class="item-1">
                            <video poster="" autoplay="" controls="" muted="" loop="" height="480px">
                                <source src="./static/video/render_result_0.mp4"
                                    type="video/mp4">
                            </video>
                        </div>
                        <div class="item-2">
                            <video poster="" autoplay="" controls="" muted="" loop="" height="480px">
                                <source src="./static/video/render_result_1.mp4"
                                    type="video/mp4">
                            </video>
                        </div>
                        <div class="item-3">
                            <video poster="" autoplay="" controls="" muted="" loop="" height="480px">
                                <source src="./static/video/render_result_2.mp4"
                                    type="video/mp4">
                            </video>
                        </div>
                    </div>
                    <div class="hero-head"></div>
                    <div class="hero-foot"></div>
                </section>
                <!-- End Hero Carousel -->

                <script src="https://cdn.jsdelivr.net/npm/bulma-carousel@4.0.3/dist/js/bulma-carousel.min.js"></script>
                <script>
                    bulmaCarousel.attach('#carousel-demo', {
                        slidesToScroll: 1,
                        slidesToShow: 1
                    });
                </script>

            </div>

            <!-- Novel datasets. -->
            <div class="columns is-centered" style="margin-top: 25px">
                <div class="column is-full-width">
                    <h2 class="title is-4">Scene Editing</h2>
                    <div class="content has-text-justified">
                        <p>
                            Once trained, our method can generate novel views of a scene with object instances deleted, duplicated or manipulated under affine transformations.
                        </p>
                    </div>
                    <div class="columns is-centered">
                        <div class="column" style="background-color: #f5f5f5; margin-left: -5px;">
                            <div class="item item-steve render_wrapper_small">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/editing/original.mp4" type="video/mp4">
                                </video>
                            </div>
                            <div class="item" style="text-align: center">
                                Optimized Scene
                            </div>
                        </div>
                        <div class="column" style="background-color: #f5f5f5; margin-left: 5px;">
                            <div class="item item-steve render_wrapper_small">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/editing/deletion.mp4" type="video/mp4">
                                </video>
                            </div>
                            <div class="item" style="text-align: center">
                                Deletion
                            </div>
                        </div>
                        <div class="column" style="background-color: #f5f5f5; margin-left: 5px;">
                            <div class="item item-steve render_wrapper_small">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/editing/duplication.mp4" type="video/mp4">
                                </video>
                            </div>
                            <div class="item" style="text-align: center">
                                Duplication
                            </div>
                        </div>
                        <div class="column" style="background-color: #f5f5f5; margin-left: 5px;">
                            <div class="item item-steve render_wrapper_small">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/editing/manipulation.mp4" type="video/mp4">
                                </video>
                            </div>
                            <div class="item" style="text-align: center">
                                Manipulation
                            </div>
                        </div>
                    </div>
                </div>
            </div>
            <!--/ Animation. -->

                <!-- Novel datasets. -->
            <div class="columns is-centered" style="margin-top: 25px">
                <div class="column is-full-width">
                    <h2 class="title is-4">Effect of Components</h2>
                    <div class="content has-text-justified">
                        <p>
                            We use a combination of ideas to impart robustness against noisy 2D machine generated labels.
                        </p>
                    </div>
                    <div class="columns is-centered">
                        <div class="column is-4">
                            <div class="buttons is-centered" style="height: 100%">
                              <button class="button glowing-border" id="button_0" style="background-color: #a0a0b5; color: #ffffff; width: 100%">Ours (complete)</button>
                              <button class="button glowing-border" id="button_1" style="background-color: #a0a0b5; color: #ffffff; width: 100%">Ours w/o Segment Consistency</button>
                              <button class="button glowing-border" id="button_2" style="background-color: #a0a0b5; color: #ffffff; width: 100%">Ours w/o Probability Field</button>
                              <button class="button glowing-border" id="button_3" style="background-color: #a0a0b5; color: #ffffff; width: 100%">Ours w/o Gradient Blocking</button>
                            </div>
                        </div>

                        <div class="column" style="background-color: #f5f5f5; margin-left: -5px;">
                            <div class="item item-steve render_wrapper_small full">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/ablation/ours/rgb.mp4" type="video/mp4">
                                </video>
                            </div>
                            <div class="item item-steve render_wrapper_small noseg">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/ablation/noseg/rgb.mp4" type="video/mp4">
                                </video>
                            </div>
                            <div class="item item-steve render_wrapper_small noprob">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/ablation/noprob/rgb.mp4" type="video/mp4">
                                </video>
                            </div>
                            <div class="item item-steve render_wrapper_small noblock">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/ablation/noblock/rgb.mp4" type="video/mp4">
                                </video>
                            </div>
                        </div>
                        <div class="column" style="background-color: #f5f5f5; margin-left: 5px;">
                            <div class="item item-steve render_wrapper_small full">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/ablation/ours/depth.mp4" type="video/mp4">
                                </video>
                            </div>
                            <div class="item item-steve render_wrapper_small noseg">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/ablation/noseg/depth.mp4" type="video/mp4">
                                </video>
                            </div>
                            <div class="item item-steve render_wrapper_small noprob">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/ablation/noprob/depth.mp4" type="video/mp4">
                                </video>
                            </div>
                            <div class="item item-steve render_wrapper_small noblock">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/ablation/noblock/depth.mp4" type="video/mp4">
                                </video>
                            </div>
                        </div>
                        <div class="column" style="background-color: #f5f5f5; margin-left: 5px;">
                            <div class="full item item-steve render_wrapper_small">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/ablation/ours/semantics.mp4" type="video/mp4">
                                </video>
                            </div>
                            <div class="item item-steve render_wrapper_small noseg">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/ablation/noseg/semantics.mp4" type="video/mp4">
                                </video>
                            </div>
                            <div class="item item-steve render_wrapper_small noprob">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/ablation/noprob/semantics.mp4" type="video/mp4">
                                </video>
                            </div>
                            <div class="item item-steve render_wrapper_small noblock">
                                <video poster="" autoplay="" controls="" muted="" loop="">
                                    <source src="./static/ablation/noblock/semantics.mp4" type="video/mp4">
                                </video>
                            </div>
                        </div>
                    </div>

                </div>
            </div>
            <!--/ Animation. -->


            <!--
            <div class="columns is-centered has-text-centered">
                <div class="column is-four-fifths">
                    <h2 class="title is-3">Method Overview</h2>
                </div>
            </div>
            <section class="hero teaser">
                <div class="container is-max-desktop">
                    <div class="hero-body">
                        <img src="./static/teaser/overview.jpg"/>
                    </div>
                </div>
            </section>
            <div class="columns is-centered has-text-centered">
                <div class="column is-four-fifths">
                    <div class="content has-text-justified">
                        <p>Surface features from an input 3D mesh are encoded through a face convolution-based encoder and decoded through a StyleGAN2-inspired decoder to generate textures directly on the surface of the mesh.</p>
                        <p>To ensure that generated textures are realistic, the textured mesh is differentiably rendered from different view points and is critiqued by two discriminators.</p>
                        <p>An image discriminator D<sub>I</sub> operates on full image views from the real or rendered views, while a patch-consistency discriminator D<sub>P</sub> encourages consistency between views by operating on patches coming from a single real view or patches from different views of rendered images.</p>
                    </div>
                </div>
            </div>
            -->

            <!-- Concurrent Work. -->
            <div class="columns is-centered">
                <div class="column is-full-width">
                    <h2 class="title is-3">Related Links</h2>

                    <div class="content has-text-justified">
                        <p>
                            For more work on similar tasks, please check out
                        </p>
                        <p>
                            <a href="https://shuaifengzhi.com/Semantic-NeRF/">Semantic-NeRF</a> extend neural radiance fields (NeRF) to jointly encode semantics with appearance and geometry, given ground-truth (possibly sparse) semantic annotations in addition to RGB images.
                        </p>
                        <p>
                            <a href="https://abhijitkundu.info/projects/pnf/">Panoptic Neural Fields</a> propose an object-aware neural scene representation that decomposes a scene into a set of objects (things) and background (stuff), using machine generated object bounding boxes and machine generated semantic labels.
                        </p>

                        <p>
                            <a href="https://github.com/vLAR-group/DM-NeRF">DM-NeRF</a> tackles scene decomposition by optimizing an object identifier field for a scene given instance annotations for input frames.
                        </p>

                        <p>
                            <a href="https://fuxiao0719.github.io/projects/panopticnerf/">Panoptic NeRF</a> tackles a label transfer task for a scene given a coarse panoptically segmented mesh and machine generated 2D semantic segmentations.
                        </p>

                        <p>
                            <a href="https://nesf3d.github.io/">NeSF</a> produces 3D semantic fields from posed RGB images alone, generalizing over novel scenes.
                        </p>
                    </div>
                </div>
            </div>
            <!--/ Concurrent Work. -->

        </div>
    </section>

    <section class="section" id="BibTeX">
        <div class="container is-max-desktop content">
            <h2 class="title">BibTeX</h2>
            <pre>
<code>@InProceedings{Siddiqui_2023_CVPR,
    author    = {Siddiqui, Yawar and Porzi, Lorenzo and Bul\`o, Samuel Rota and M\"uller, Norman and Nie{\ss}ner, Matthias and Dai, Angela and Kontschieder, Peter},
    title     = {Panoptic Lifting for 3D Scene Understanding With Neural Fields},
    booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
    month     = {June},
    year      = {2023},
    pages     = {9043-9052}
}</code></pre>
        </div>
    </section>


    <footer class="footer">
        <div class="container">
            <div class="content has-text-centered">
                <a class="icon-link" href="static/Texturify.pdf">
                    <i class="fas fa-file-pdf"></i>
                </a>
                <a class="icon-link" href="https://github.com/nihalsid" class="external-link" disabled>
                    <i class="fab fa-github"></i>
                </a>
            </div>
            <div class="columns is-centered">
                <div class="column is-8">
                    <div class="content">
                        <p style="text-align:center">
                            Source code mainly borrowed from <a href="https://keunhong.com/">Keunhong Park</a>'s <a
                                href="https://nerfies.github.io/">Nerfies website</a>.
                        </p>
                        <p style="text-align:center">
                            Please contact <a href="https://niessnerlab.org/members/yawar_siddiqui/profile.html">Yawar Siddiqui</a> for feedback and questions.
                        </p>

                    </div>
                </div>
            </div>
        </div>
    </footer>

    <!-- Import maps polyfill -->
    <!-- Remove this when import maps will be widely supported -->
    <script async src="https://unpkg.com/es-module-shims@1.3.6/dist/es-module-shims.js"></script>

    <script type="importmap">
        {
            "imports": {
                "three": "./js/three.module.js"
            }
        }
    </script>

    <script type="module">

        import * as THREE from 'three';

        import { PLYLoader } from './js/PLYLoader.js';
        import { OrbitControls } from './js/OrbitControls.js'

        let div_to_scene = {
            "mesh_color": {
                "color": null,
            },
            "mesh_instance": {
                "color": null,
            },
            "mesh_semantics": {
                "color": null,
            }
        }

        let mouse_button_down = false;
        let list_of_orbit_controls = []
        let style_camera = null;
        let camera = null;
        let render_colors = true;
        let style_id = "0"
        let poses = null;
        let camera_mesh = {
            "mesh_color": null,
            "mesh_instance": null,
            "mesh_semantics": null
        }
        let start_idx = 253;
        let img_height = 192;
        let row_height_ours = 192 * 4;
        let row_height_nearest = 192 * 3;
        let col_width = 256;
        let rotate_enable = true;
        let rotate_count = 0;


        function setup_camera(div_name){
            let container = document.getElementById(div_name);
            let width = container.parentElement.clientWidth;
            let height = container.parentElement.clientHeight;
            console.log(width, height)
            let camera = new THREE.PerspectiveCamera( 35, width / height, 0.1, 50 );
            let camera_init_position = new THREE.Vector3( 0.5614, -0.1732, 0.1237);
            if (div_name.includes("chair")){
                camera_init_position = camera_init_position.multiplyScalar(1.5)
            }
            else if (div_name.includes("style")) {
                camera_init_position = camera_init_position.multiplyScalar(1.25)
            }
            camera.position.set(camera_init_position.x, camera_init_position.y, camera_init_position.z);
            camera.up.set(0, 0, 1);
            camera.lookAt(0, 0, 0);
            return camera;
        }

        function setup_render_divs(div_name, mesh_path){
            if (camera == null) {
                camera = setup_camera(div_name)
            }
            let orbit_control = create_render_div(camera, div_name, mesh_path)
            list_of_orbit_controls.push(orbit_control)
        }

        function setup_style_render_divs(div_name, mesh_path){
            if (style_camera == null) {
                style_camera = setup_camera(div_name)
            }
            let orbit_control = create_style_render_div(style_camera, div_name, mesh_path, true)
            list_of_orbit_controls.push(orbit_control)
            document.getElementById("style_button_0").addEventListener("click", set_style_0)
            document.getElementById("style_button_1").addEventListener("click", set_style_1)
            document.getElementById("style_button_2").addEventListener("click", set_style_2)
        }

        function create_render_div(camera, div_id, mesh_path) {
            let container;
            let renderer, controls;

            init();
            animate();

            function init() {

                container = document.getElementById(div_id);
                let width = container.parentElement.clientWidth;
                let height = container.parentElement.clientHeight;

                div_to_scene[div_id]["color"] = new THREE.Scene();
                div_to_scene[div_id]["color"].background = new THREE.Color( 0xffffff );

                // PLY file

                const loader = new PLYLoader();
                loader.load( mesh_path, function ( geometry ) {
                    geometry.computeVertexNormals();
                    let material_color = new THREE.MeshBasicMaterial( { color: 0xffffff, vertexColors: THREE.VertexColors, side: THREE.DoubleSide} );
                    const mesh_color = new THREE.Mesh( geometry, material_color );
                    div_to_scene[div_id]["color"].add( mesh_color );
                }, (xhr) => {
                    console.log((xhr.loaded / xhr.total) * 100 + '% loaded')
                }, (error) => {
                    console.log(error)
                }
                );
                
                loader.load( "./models/camera.ply", function ( geometry ) {
                    geometry.computeVertexNormals();
                    let material_color = new THREE.MeshBasicMaterial( { color: 0xffffff, vertexColors: THREE.VertexColors, side: THREE.DoubleSide} );
                    camera_mesh[div_id] = new THREE.Mesh( geometry, material_color );
                    camera_mesh[div_id].setRotationFromEuler(new THREE.Euler(0, 0, 0, 'XYZ'))
                    camera_mesh[div_id].rotateZ(poses[start_idx][3]);
                    camera_mesh[div_id].rotateY(poses[start_idx][2]);
                    camera_mesh[div_id].rotateX(poses[start_idx][1]);
                    camera_mesh[div_id].position.x = poses[start_idx][0].x;
                    camera_mesh[div_id].position.y = poses[start_idx][0].y;
                    camera_mesh[div_id].position.z = poses[start_idx][0].z;
                    div_to_scene[div_id]["color"].add( camera_mesh[div_id] );
                }, (xhr) => {
                    console.log('camera ' + (xhr.loaded / xhr.total) * 100 + '% loaded')
                }, (error) => {
                    console.log(error)
                });

                // lights

                div_to_scene[div_id]["color"].add( new THREE.HemisphereLight( 0x333333, 0x222222 ) );
                addShadowedLight(div_to_scene[div_id]["color"], 1, 1, 1, 0xffffff, 1.35 );
                addShadowedLight(div_to_scene[div_id]["color"],  0.5, 1, - 1, 0xffffff, 1 );

                // renderer

                renderer = new THREE.WebGLRenderer( { antialias: true } );
                renderer.setPixelRatio( window.devicePixelRatio );
                renderer.setSize( width, height);
                renderer.outputEncoding = THREE.sRGBEncoding;

                renderer.shadowMap.enabled = true;

                container.appendChild( renderer.domElement );

                controls = new OrbitControls(camera, renderer.domElement)
                controls.enableDamping = false
                controls.autoRotate = true
                controls.autoRotateSpeed = 0.35
                // resize

                window.addEventListener( 'resize', onWindowResize );
            }

            function onWindowResize() {
                let width = container.clientWidth;
                let height = container.clientHeight;
                camera.aspect = width / height;
                camera.rotation.z += 0.005;
                renderer.setSize( width, height );
            }

            function animate() {
                requestAnimationFrame( animate );
                render();
                rotate_count += 1
                if (rotate_count == 500 && rotate_enable) {
                    rotate_count = 0;
                    controls.autoRotate = false
                    controls.update();
                    controls.autoRotate = true
                    controls.autoRotateSpeed *= -1
                }
                if (!rotate_enable){
                    rotate_count = 0;
                    controls.autoRotate = false
                    controls.update();
                }
            }

            function render() {
                renderer.render( div_to_scene[div_id][render_colors ? "color" : "geo"], camera );
                controls.update();
            }

            return controls;
        }

        function addShadowedLight(scene, x, y, z, color, intensity ) {

            const directionalLight = new THREE.DirectionalLight( color, intensity );
            directionalLight.position.set( x, y, z );
            scene.add( directionalLight );

            directionalLight.castShadow = true;

            const d = 1;
            directionalLight.shadow.camera.left = - d;
            directionalLight.shadow.camera.right = d;
            directionalLight.shadow.camera.top = d;
            directionalLight.shadow.camera.bottom = - d;

            directionalLight.shadow.camera.near = 1;
            directionalLight.shadow.camera.far = 4;

            directionalLight.shadow.mapSize.width = 1024;
            directionalLight.shadow.mapSize.height = 1024;

            directionalLight.shadow.bias = - 0.001;

        }

        document.addEventListener('keydown', logKey);

        function logKey(evt) {
            if (evt.keyCode === 71 && !mouse_button_down) {
                switch_geometry()
            }
            if (evt.keyCode === 82 && !mouse_button_down) {
                reset_orbit_controls()
            }
        }

        function switch_geometry() {
            console.log(camera.position)
            render_colors = !render_colors
        }

        function reset_orbit_controls() {
            list_of_orbit_controls.forEach(oc => {
                oc.reset()
            })
        }

        document.body.onmousedown = function(evt) {
            if (evt.button === 0)
                mouse_button_down = true
        }
        document.body.onmouseup = function(evt) {
            if (evt.button === 0)
                mouse_button_down = false
        }

        window.onload = function() {
            let slider = document.getElementsByClassName("slider")[0]
            slider.removeAttribute("tabIndex")
            poses = []
            fetch('./models/pose.txt').then(response => response.text()).then(text => {
                text.split(/\r?\n/).forEach(line => {
                    let items = []
                    if (line.length > 0) {
                        line.split(/,/).forEach(item => {
                            items.push(parseFloat(item))
                        })
                        poses.push([new THREE.Vector3(items[0], items[1], items[2]), items[3], items[4], items[5]])
                    }
                });
            });
            
            document.getElementById('interpolation-slider').addEventListener('input', function(event){
                rotate_enable = false;
                ["mesh_color", "mesh_semantics", "mesh_instance"].forEach(div_id => {
                    let value = parseInt(event.target.value);
                    camera_mesh[div_id].setRotationFromEuler(new THREE.Euler(0, 0, 0, 'XYZ'))
                    camera_mesh[div_id].rotateZ(poses[value][3]);
                    camera_mesh[div_id].rotateY(poses[value][2]);
                    camera_mesh[div_id].rotateX(poses[value][1]);
                    camera_mesh[div_id].position.x = poses[value][0].x;
                    camera_mesh[div_id].position.y = poses[value][0].y;
                    camera_mesh[div_id].position.z = poses[value][0].z;
                    let _row_idx = Math.floor(value / 25);
                    let _col_idx = value % 25;
                    slide_ours(_row_idx, _col_idx)
                    slide_nearest(_row_idx, _col_idx)
                })
            });
            
            function slide_ours(row_idx, col_idx) {
                document.getElementById('ours_color').style.backgroundPosition = "top "+ (-row_idx * row_height_ours - img_height * 0) +"px left "+ (-col_idx * col_width) +"px" 
                document.getElementById('ours_depth').style.backgroundPosition = "top "+ (-row_idx * row_height_ours - img_height * 1) +"px left "+ (-col_idx * col_width) +"px" 
                document.getElementById('ours_semantics').style.backgroundPosition = "top "+ (-row_idx * row_height_ours - img_height * 2) +"px left "+ (-col_idx * col_width) +"px" 
                document.getElementById('ours_instance').style.backgroundPosition = "top "+ (-row_idx * row_height_ours - img_height * 3) +"px left "+ (-col_idx * col_width) +"px" 
            }

            function slide_nearest(row_idx, col_idx) {
                document.getElementById('nearest_color').style.backgroundPosition = "top "+ (-row_idx * row_height_nearest - img_height * 0) +"px left "+ (-col_idx * col_width) +"px" 
                document.getElementById('nearest_semantics').style.backgroundPosition = "top "+ (-row_idx * row_height_nearest - img_height * 1) +"px left "+ (-col_idx * col_width) +"px" 
                document.getElementById('nearest_instance').style.backgroundPosition = "top "+ (-row_idx * row_height_nearest - img_height * 2) +"px left "+ (-col_idx * col_width) +"px" 
            }

            function set_ablation_state(state) {
                let states = ["full", "noseg", "noprob", "noblock"];
                for (let i = 0; i < states.length; i++){
                    let all_items = document.getElementsByClassName(states[i])
                    if (i == state){
                        [].forEach.call(all_items, item => {
                            // item.style.display = 'box'
                            item.style.visibility = 'visible'
                            item.style.maxHeight = '100%'
                        })
                    }
                    else {
                        [].forEach.call(all_items, item => {
                            // item.style.display = 'none'
                            item.style.visibility = 'hidden'
                            item.style.maxHeight = 0
                        })
                    }
                }
            }

            set_ablation_state(0)
            // document.getElementById("button_0").focus();
            document.getElementById('button_0').addEventListener('click', function(event){
                set_ablation_state(0)
            })
            document.getElementById('button_1').addEventListener('click', function(event){
                set_ablation_state(1)
            })
            document.getElementById('button_2').addEventListener('click', function(event){
                set_ablation_state(2)
            })
            document.getElementById('button_3').addEventListener('click', function(event){
                set_ablation_state(3)
            })


            let _row_idx = Math.floor(start_idx / 25);
            let _col_idx = start_idx % 25;
            
            slide_ours(_row_idx, _col_idx)
            slide_nearest(_row_idx, _col_idx)



            setup_render_divs("mesh_color", './models/colors.ply')
            setup_render_divs("mesh_semantics", './models/semantics.ply')
            setup_render_divs("mesh_instance", './models/instance.ply')

        };

    </script>
</body>

</html>
