<!DOCTYPE html>
<html>
<head>
  <meta charset="utf-8">
  <!-- Meta tags for social media banners, these should be filled in appropriatly as they are your "business card" -->
  <!-- Replace the content tag with appropriate information -->
  <meta name="description" content="DESCRIPTION META TAG">
  <meta property="og:title" content="SOCIAL MEDIA TITLE TAG"/>
  <meta property="og:description" content="SOCIAL MEDIA DESCRIPTION TAG TAG"/>
  <meta property="og:url" content="URL OF THE WEBSITE"/>
  <!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X630-->
  <meta property="og:image" content="static/image/your_banner_image.png" />
  <meta property="og:image:width" content="1200"/>
  <meta property="og:image:height" content="630"/>


  <meta name="twitter:title" content="TWITTER BANNER TITLE META TAG">
  <meta name="twitter:description" content="TWITTER BANNER DESCRIPTION META TAG">
  <!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X600-->
  <meta name="twitter:image" content="static/images/your_twitter_banner_image.png">
  <meta name="twitter:card" content="summary_large_image">
  <!-- Keywords for your paper to be indexed by-->
  <meta name="keywords" content="KEYWORDS SHOULD BE PLACED HERE">
  <meta name="viewport" content="width=device-width, initial-scale=1">


  <title>CrossViewDiff</title>
  <link rel="icon" type="image/x-icon" href="">
  <link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro"
  rel="stylesheet">

  <link rel="stylesheet" href="static/css/bulma.min.css">
  <link rel="stylesheet" href="static/css/bulma-carousel.min.css">
  <link rel="stylesheet" href="static/css/bulma-slider.min.css">
  <link rel="stylesheet" href="static/css/fontawesome.all.min.css">
  <link rel="stylesheet"
  href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
  <link rel="stylesheet" href="static/css/index.css">

  <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
  <script src="https://documentcloud.adobe.com/view-sdk/main.js"></script>
  <script defer src="static/js/fontawesome.all.min.js"></script>
  <script src="static/js/bulma-carousel.min.js"></script>
  <script src="static/js/bulma-slider.min.js"></script>
  <script src="static/js/index.js"></script>
</head>
<body>


  <section class="hero">
    <div class="hero-body">
      <div class="container is-max-desktop">
        <div class="columns is-centered">
          <div class="column has-text-centered">
            <h1 class="title is-2 publication-title"><b>CrossViewDiff:</b>
              A Cross-View Diffusion Model for Satellite-to-Street View Synthesis</h1>
            <div class="is-size-5 publication-authors">
              <!-- Paper authors -->
              <span class="author-block">
                <a href="https://liweijia.github.io/" target="_blank">Weijia Li</a><sup>1 *</sup>,</span>
              <span class="author-block">
              <a href="" target="_blank">Jun He</a><sup>1 *</sup>,</span>
              <span class="author-block">
                <a href="https://yejy53.github.io/" target="_blank">Junyan Ye</a><sup>1,2 *</sup>,</span>
              </span>
              <span class="author-block">
                <a href="" target="_blank">Huaping Zhong</a><sup>3 *</sup>,</span>
              </span>
              </div>
              <div class="is-size-5 publication-authors">
                <span class="author-block">
                  <a href="" target="_blank">Zhimeng Zheng</a><sup>2</sup>,</span>
                </span>
                <span class="author-block">
                  <a href="" target="_blank">Zilong Huang</a><sup>1</sup>,</span>
                </span>
                <span class="author-block">
                  <a href="https://dahua.site/" target="_blank">Dahua Lin</a><sup>2</sup>,</span>
                </span>
                <span class="author-block">
                  <a href="https://conghui.github.io/" target="_blank">Conghui He</a><sup>2,3 <math xmlns="http://www.w3.org/1998/Math/MathML"><mo>†</mo></math></sup></span>
                </span>
              </span>
            </div>

            <div class="is-size-5 publication-authors">
              <span class="author-block"><sup>1</sup>Sun Yat-Sen University </span>
              <span class="author-block"><sup>2</sup>Shanghai AI Laboratory </span>
              <span class="author-block"><sup>3</sup>SenseTime</span>
            </div>

              <h4><sup>*</sup>Equal Contribution <sup><math xmlns="http://www.w3.org/1998/Math/MathML"><mo>†</mo></math></sup>Correspondence<sup></h4>

                    <!-- Supplementary PDF link -->
                    <span class="link-block">
                      <a href="static/2408.14765v1.pdf" target="_blank"
                      class="external-link button is-normal is-rounded is-dark">
                      <span class="icon">
                        <i class="fas fa-file-pdf"></i>
                      </span>
                      <span>Paper</span>
                    </a>
                  </span>

                  <!-- Github link -->
                  <span class="link-block">
                    <a href="https://opendatalab.github.io/CrossViewDiff/" target="_blank"
                    class="external-link button is-normal is-rounded is-dark">
                    <span class="icon">
                      <i class="fab fa-github"></i>
                    </span>
                    <span>Code</span>
                  </a>
                </span>

                <!-- ArXiv abstract Link -->
                <span class="link-block">
                  <a href="https://arxiv.org/abs/2408.14765v1" target="_blank"
                  class="external-link button is-normal is-rounded is-dark">
                  <span class="icon">
                    <i class="ai ai-arxiv"></i>
                  </span>
                  <span>arXiv</span>
                </a>
              </span>
            </div>
          </div>
        </div>
      </div>
    </div>
  </div>
</section>


<!-- Paper abstract -->
<section class="section hero is-light">
  <div class="container is-max-desktop">
    <div class="columns is-centered has-text-centered">
      <div class="column is-four-fifths">
        <h2 class="title is-3">Abstract</h2>
        <div class="content has-text-justified">
          <p>
            Satellite-to-street view synthesis aims at generating a realistic street-view image from its corresponding satellite-view image. Although stable diffusion models have exhibit remarkable performance in a variety of image generation applications, their reliance on similar-view inputs to control the generated structure or texture restricts their application to the challenging cross-view synthesis task. 
In this work, we propose CrossViewDiff, a cross-view diffusion model for satellite-to-street view synthesis. To address the challenges posed by the large discrepancy across views, we design the satellite scene structure estimation and cross-view texture mapping modules to construct the structural and textural controls for street-view image synthesis.
We further design a cross-view control guided denoising process that incorporates the above controls via an enhanced cross-view attention module.
To achieve a more comprehensive evaluation of the synthesis results, we additionally design a GPT-based scoring method as a supplement to standard evaluation metrics.
We also explore the effect of different data sources (e.g., text, maps, building heights, and multi-temporal satellite imagery) on this task.
Results on three public cross-view datasets show that CrossViewDiff outperforms current state-of-the-art on both standard and GPT-based evaluation metrics, generating high-quality street-view panoramas with more realistic structures and textures across rural, suburban, and urban scenes.
          </p>
        </div>
      </div>
    </div>
  </div>
</section>
<!-- End paper abstract -->


<!-- Method -->
<section class="section">
  <div class="container is-max-desktop">
    <!-- Abstract. -->
    <div class="columns is-centered has-text-centered">
      <div class="column is-five-fifths">
        <h2 class="title is-3">Method</h2>
        <div class="content has-text-justified">
          
        <div class="column is-centered has-text-centered">
          <img src="./static/images/pipeline.jpg"
                class="interpolation-image"
                alt="Interpolate start reference image."
                width="150%"/>
                <figcaption style="font-size: 14px;text-align: center;">Framework of CrossViewDiff</figcaption>
        </div>


        <p style="font-size: 20px;">
          Overview of <b>CrossViewDiff</b>. Initially, we utilize depth estimation methods to create 3D
          voxels as intermediaries for information across different viewpoints. Subsequently, we
          establish structural and textural control constraints based on the satellite and 3D
          voxels. Finally, using an enhanced Cross-Attention mechanism, we integrate CrossView Controls information, guiding the Denoising Process to produce the synthesized
          street-view images.
        </p>


        </div>
      </div>
    </div>

  </div>
</section>

<!-- End method -->

<!-- Results -->
<section class="section hero is-light">
  <div class="container is-max-desktop">
    <div class="columns is-centered has-text-centered">
      <div class="column is-five-fifths">
        <h2 class="title is-3">Results</h2>
        <div class="content has-text-justified">
        
        <h3>Qualitative Comparison</h3>
        <p style="font-size: 20px;">

        </p>
        <div class="column is-centered has-text-centered">
          <img src="./static/images/Comparison.png"
                class="interpolation-image"
                alt="Interpolate start reference image."
                width="150%"/>
                <figcaption style="font-size: 14px;text-align: center;">Qualitative comparison of synthesis results on CVUSA, CVACT and OmniCity</figcaption>
        </div>

        <h3>Qualitative Ablation</h3>
        <div class="column is-centered has-text-centered">
          <img src="./static/images/Ablation experiment.png"
                class="interpolation-image"
                alt="Interpolate start reference image."
                width="150%"/>
                <figcaption style="font-size: 14px;text-align: center;">Qualitative ablation results on CVUSA and OmniCity</figcaption>
        </div>

        </div>
      </div>
    </div>

  </div>
</section>


<!-- End results -->

<!-- Evaluation -->
<section class="section">
  <div class="container is-max-desktop">
    <div class="columns is-centered has-text-centered">
      <div class="column is-five-fifths">
        <h2 class="title is-3">Evaluation</h2>
        <div class="content has-text-justified">
          <!-- index evaluation-->
          <h3>Quantitative Evaluation</h3>
          <p style="font-size: 20px;">
            We present a quantitative comparison of different methods on the CVUSA, CVACT and OmniCity datasets, 
            evaluating them in terms of various metrics. Compared to the state-of-the-art method for cross-view synthesis (Sat2Density), 
            our method achieved significant improvements in SSIM and FID scores by <b>9.44%</b> and <b>42.70%</b>
            on CVUSA, respectively. Similarly, enhancements of <b>6.46%</b> and <b>10.94%</b> in SSIM and
            FIDwere observed on CVACT. Our method achieved significant improvements in
            SSIM and FID by <b>11.71%</b> and <b>52.21%</b> on OmniCity, respectively.
          </p>

         <div class="column is-centered has-text-centered">
           <img src="./static/images/Compare_table.png"
                 class="interpolation-image"
                 alt="Interpolate start reference image."/>
                 <figcaption style="font-size: 14px;text-align: center;">Quantitative comparison of different methods on CVUSA,CVACT and OmniCity
                  datasets in terms of various evaluation metrics</figcaption>
         </div>
          <!-- End index evaluation-->

          <!-- GPT evalution-->
          <h3>GPT-based Evaluation</h3>
           <p style="font-size: 20px;">
              Beyond conventional similarity and realism metrics, we also leverage the powerful 
              visual-linguistic capabilities of existing MLLM large models to design CrossScore for evaluating synthetic images.
           </p>

          <div class="column is-centered has-text-centered">
            <img src="./static/images/Figure_GPT_Score.jpg"
                  class="interpolation-image"
                  alt="Interpolate start reference image."/>
                  <figcaption style="font-size: 14px;text-align: center;">The overall process for automated evaluation using GPT-4o</figcaption>
          </div>
          <p style="font-size: 20px;">
            Our method significantly outperforms
            other GAN-based and diffusion-based generation methods in the three evaluation
            dimensions of Consistency, Visual Realism, and Perceptual Quality. This also indicates that the street-view images synthesized by our method are more aligned with
            human user needs, which aids in subsequent applications such as virtual scene tasks.
          </p>
          <div class="column is-centered has-text-centered">
            <img src="./static/images/GPT_score.jpg"
                  class="interpolation-image"
                  alt="Interpolate start reference image."
                  width="50%"/>
                  <figcaption style="font-size: 14px;text-align: center;"> GPT-based evaluation metrics for
                    Cross-View Synthesis</figcaption>
          </div>
          <!--End GPT evaluation-->
        </div>
      </div>
    </div>

  </div>
</section>

<!-- End evaluation -->



<!--BibTex citation -->
  <section class="section" id="BibTeX">
    <div class="container is-max-desktop content">
      <h2 class="title">BibTeX</h2>
      <pre><code>BibTex Code</code></pre>
    </div>
</section>
<!--End BibTex citation -->


  <footer class="footer">
  <div class="container">
    <div class="columns is-centered">
      <div class="column is-8">
        <div class="content">

          <p>
            This page was built using the <a href="https://github.com/eliahuhorwitz/Academic-project-page-template" target="_blank">Academic Project Page Template</a> which was adopted from the <a href="https://nerfies.github.io" target="_blank">Nerfies</a> project page.
            You are free to borrow the of this website, we just ask that you link back to this page in the footer. <br> This website is licensed under a <a rel="license"  href="http://creativecommons.org/licenses/by-sa/4.0/" target="_blank">Creative
            Commons Attribution-ShareAlike 4.0 International License</a>.
          </p>

        </div>
      </div>
    </div>
  </div>
</footer>

<!-- Statcounter tracking code -->
  
<!-- You can add a tracker to track page visits by creating an account at statcounter.com -->

    <!-- End of Statcounter Code -->

  </body>
  </html>
