<!DOCTYPE html>

<html lang="en">
    <head>

        <!-- Metadata -->
        <meta charset="utf-8"/>
        <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"/>
        <meta name="description" content="website"/>
        <meta name="author" content="khang nguyen"/>
        <title>Zhang Jia-Qi | PhD in CS&T'25 @ buaa</title>
        <link rel="icon" type="image/x-icon" href="assets/img/mandu_icon.png"/>
        
        <!-- Font Awesome icons -->
        <script src="https://use.fontawesome.com/releases/v5.15.3/js/all.js"></script>
		
        <!-- Google fonts-->
        <!-- <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
        <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
        <link href="https://fonts.googleapis.com/css?family=Saira+Extra+Condensed:500,700" rel="stylesheet" type="text/css"/>
        <link href="https://fonts.googleapis.com/css?family=Muli:400,400i,800,800i" rel="stylesheet" type="text/css"/>
        <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.4.0/css/font-awesome.min.css">
        <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/OwlCarousel2/2.3.4/assets/owl.carousel.min.css">
        <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/OwlCarousel2/2.3.4/assets/owl.theme.default.min.css"> -->
        
        <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@4.3.1/dist/css/bootstrap.min.css">
        <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
        <link href="https://fonts.loli.net/css?family=Saira+Extra+Condensed:500,700&display=swap" rel="stylesheet" type="text/css"/>
        <link href="https://fonts.loli.net/css?family=Muli:400,400i,800,800i&display=swap" rel="stylesheet" type="text/css"/>
        <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/font-awesome@4.4.0/css/font-awesome.min.css">        
        <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/OwlCarousel2/2.3.4/assets/owl.carousel.min.css">
        <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/OwlCarousel2/2.3.4/assets/owl.theme.default.min.css">

        <!-- Core theme CSS -->
        <link href="styles/styles.css" rel="stylesheet"/>        

    </head>

    <body class="light-theme">
        <nav class="topnav navbar navbar-expand-lg navbar-light bg-white fixed-top">
            <div class="container">
                <a class="navbar-brand" href="./index.html"><strong>Zhang Jia-Qi @ BUAA</strong></a>
                <button  
                    id="top-navbar-button"
                    class="navbar-toggler"  
                    type="button"  
                    data-toggle="collapse"  
                    data-target="#navbarLinks"  
                    aria-controls="navbarLinks"  
                    aria-expanded="false"  
                    aria-label="Toggle navigation">  
                    <span class="navbar-toggler-icon"></span>  
                </button>  
                <div class="collapse navbar-collapse justify-content-end" id="navbarLinks">
                    <ul class="navbar-nav ml-auto d-flex align-items-center">
                        <li class="nav-item">
                            <a class="nav-link" href="#updates">News</a>
                        </li>
                        <li class="nav-item">
                            <a class="nav-link" href="#research">Publications</a>
                        </li>
                        <li class="nav-item">
                            <a class="nav-link" href="#outreach">Projects</a>
                        </li>
                        <li class="nav-item">
                            <a class="nav-link" href="#gallery">Gallery</a>
                        </li>
                    </ul>
                </div>                
            </div>
        </nav>

        <!-- Moving particles -->
        <canvas id="canvas"></canvas>

        <!-- Progress bar on top -->
        <div class="progress-bar-container">
            <div class="progress-bar" id="progressBar"></div>
        </div>

        <!-- Back to top button -->
        <a id="back-to-top-button"></a>

        <!-- Toggle dark/light theme button -->
        <!-- <button class="toggle-theme-button" onclick="toggleTheme()">☀️</button> -->
        
        <!-- Assitant icon saying about theme changes -->
        <!-- <div class="popup-icon-container" id="popupIconContainer" draggable="true">
            <div class="icon"><img src="assets/img/mandu_icon.png" width="65" height="65"></div>
            <div class="speech-balloon"></div>
        </div> -->
        
        <!-- Dismissal area for assistant icon -->
        <!-- <div class="dismissal-area" id="dismissalArea">&#10006;</div> -->

        <!-- Content -->
        <div class="container custom-margin">            
            <!-- About section -->

<div class="row mb-4">
    <div class="col-lg-2 col-md-4">
        <div class="ring-container">
            <div class="ring">
                <div class="hollow-ring">
                    <img class="profile-image" src="assets/img/new_pic.jpg" alt="Zhang Jia-Qi (张加其)" />
                     
                        <div class="emoji-indicator">
                            🌏 
                            <span class="hover-text"> China </span> 
                        </div> 
                    
                </div>
            </div>
        </div>
        <hr />
        <div class="social-icons">	
             <a class="social-icon" href="https://scholar.google.com/citations?hl=zh-CN&amp;user=a-CCwW8AAAAJ" target="_blank" rel="noopener" title="Google Scholar"><i class="fa fa-graduation-cap" style="font-size: 35px; color: #4285f4"></i></a> 
             <a class="social-icon" href="https://orcid.org/0000-0002-8482-3666" target="_blank" rel="noopener" title="ORCID"><i class="fab fa-orcid" style="font-size: 35px; color: #9ece7a"></i></a> 
             <a class="social-icon" href="https://www.researchgate.net/profile/Zhang-Jia-Qi-2" target="_blank" rel="noopener" title="ResearchGate"><i class="fab fa-researchgate" style="font-size: 35px; color: #00D0BB"></i></a> 
        </div>
        <div class="social-icons">
             <a class="social-icon" href="https://github.com/Jiaqi-zhang" target="_blank" rel="noopener" title="GitHub"><i class="fab fa-github" style="font-size: 35px; color: #171515"></i></a> 
            <!--  -->
             <a class="social-icon" href="https://www.researchgate.net/profile/Zhang-Jia-Qi-2" target="_blank" rel="noopener" title="Resume"><i class="fas fa-file-alt" style="font-size: 35px; color: #bd5d38"></i></a> 
        </div>
        <p></p>
    </div>

    <div class="col-lg-10 col-md-8">
        <h2>Zhang Jia-Qi (张加其) </h2>
        <p></p>
        I am currently a Ph.D. candidate at the State Key Laboratory of Virtual Reality Technology and Systems at Beihang University (BUAA), under the supervision of <hightlight>Professor Shimin Hu (胡事民)</hightlight> and <a href="http://miaowang.me/" target="_blank" rel="noopener">Professor Miao Wang (汪淼)</a>. You can reach me via email at: <b>zhangjiaqi79@buaa.edu.cn</b>📧
        <p></p>
        I obtained both my Bachelor's and Master's degrees in Software Engineering from North China Electric Power University, where I was mentored by <a href="https://cce.ncepu.edu.cn/szdw/jsml/rjgcjys/2912c61d37bb491685892ee0c6b2e717.htm">Professor Su-Qin Wang (王素琴)</a> and <a href="https://cce.ncepu.edu.cn/szdw/jsml/rjgcjys/11887a993a444b23b4af863190aed157.htm">Professor Min Shi (石敏)</a>. During my Master's program, I also conducted academic research under the guidance of <a href="http://geometrylearning.com/lin/" target="_blank" rel="noopener">Professor Lin Gao (高林)</a> at the Institute of Computing Technology, Chinese Academy of Sciences.      
        <p></p>
              
        <p></p>
        <code>&gt;&gt; My research interests include the generation of <b>3D character animations</b>, with a primary focus on the automatic generation of 3D character movements from text or speech, as well as the generation of interactions between characters and objects within virtual environments.</code>
        <!-- <p></p> -->
    </div>
</div>

<!-- Updates section -->

<hr />

<div class="row custom-offset" id="updates">
    <div class="col">
        <h2 clss="mb-5">🔥 News</h2>
        <p></p>
        <!-- <div class="owl-carousel owl-theme"> -->
            
                <!-- <div class="news-card">
    <img src="" class="w-full rounded-lg">
    <div class="news-desc">Our work <hightlight>HyT2M</hightlight> about text to motion is accepted by <hightlight>FCS (CCF B)</hightlight>.</div>
    <div class="news-time">2025-08</div>
</div> -->

<div class="row mb-2">
    <div class="col-sm-12">
        📰 2025-08 Our work <hightlight>HyT2M</hightlight> about text to motion is accepted by <hightlight>FCS (CCF B)</hightlight>.
    </div>
</div>

            
                <!-- <div class="news-card">
    <img src="" class="w-full rounded-lg">
    <div class="news-desc">Our work <hightlight>SHGS</hightlight> about garment simulation is accepted by <hightlight>PG 2025 (CCF B)</hightlight>.</div>
    <div class="news-time">2025-08</div>
</div> -->

<div class="row mb-2">
    <div class="col-sm-12">
        📰 2025-08 Our work <hightlight>SHGS</hightlight> about garment simulation is accepted by <hightlight>PG 2025 (CCF B)</hightlight>.
    </div>
</div>

            
                <!-- <div class="news-card">
    <img src="assets/img/updates_gan/tvcg_2024.png" class="w-full rounded-lg">
    <div class="news-desc">Our work <hightlight>SMRNet</hightlight> about motion retargeting is accepted by <hightlight>TVCG (CCF A)</hightlight>.</div>
    <div class="news-time">2024-07</div>
</div> -->

<div class="row mb-2">
    <div class="col-sm-12">
        📰 2024-07 Our work <hightlight>SMRNet</hightlight> about motion retargeting is accepted by <hightlight>TVCG (CCF A)</hightlight>.
    </div>
</div>

            
                <!-- <div class="news-card">
    <img src="assets/img/updates_gan/cg_2023.png" class="w-full rounded-lg">
    <div class="news-desc">Our work <hightlight>HoughLaneNet</hightlight> is accepted by <hightlight>Computers & Graphics 2023 (CCF C)</hightlight> and received the <hightlight>Best Paper Award</hightlight>.</div>
    <div class="news-time">2023-10</div>
</div> -->

<div class="row mb-2">
    <div class="col-sm-12">
        📰 2023-10 Our work <hightlight>HoughLaneNet</hightlight> is accepted by <hightlight>Computers &amp; Graphics 2023 (CCF C)</hightlight> and received the <hightlight>Best Paper Award</hightlight>.
    </div>
</div>

            
        <!-- </div> -->
        <p></p>
    </div>
</div>

<!-- Research section -->

<hr />

<div class="row custom-offset" id="research">
    <div class="col">
        <h2 clss="mb-5">📝 Publications</h2>
        <p></p>
        <div id="filters">
            <button class="filter-button active" data-filter="*">all</button>
            
                <button class="filter-button" data-filter="animation">animation</button>
            
                <button class="filter-button" data-filter="colorization">colorization</button>
            
                <button class="filter-button" data-filter="lane detection">lane detection</button>
            
        </div>
        <p></p>
        <div id="projects" class="isotope">
            
                <div class="project" data-filter="animation">
    <div class="row mb-4">
        <div class="col-sm-4">
            <img width="100%" height="auto" class="w-full rounded-lg" src="assets/img/hyt2m.png" />
        </div>
        <div class="col-sm-8">
            
            <b>Generative Masked Text-to-Motion Model with Hybrid Vector Quantization</b>
            <br />
            <i><a href="https://link.springer.com/journal/11704" target="_blank" rel="noopener">Frontiers of Computer Science, FCS 2025, CCF B</a></i> 
            
            <br />
            <b><u>Jia-Qi Zhang</u></b>, Jia-Jun Wang, Fang-Lue Zhang, Miao Wang
            <br />
            
            
            
            
            
            <br />
            <u><b><i>Abstract</i></b>:</u> 
            Text-based motion generation enhances the flexibility of human motion design and editing, enabling applications in animation, virtual reality, and beyond. However, diffusion-based methods for text-to-motion generation often produce low-quality results. Conditional autoregressive approaches leveraging vector quantization variational autoencoders (VQ-VAE) struggle with vector 
            <span class="collapse" id="text_to_motion">
                quantization errors, requiring hierarchical or residual quantization. This increases the length of quantized token sequences, forcing the model to predict more tokens from text input, which complicates high-quality generation. To address this, we introduce HyT2M, an innovative text-to-motion model based on a hybrid VQ-VAE framework. Our approach decomposes motion into global and local components: local motion is quantized using a single vector quantization layer to preserve fine details, while global motion is reconstructed via residual vector quantization (RVQ) to compensate for errors caused by the limited perceptual range of local components. This hybrid strategy shortens token sequences while maintaining high reconstruction quality, easing the burden on the second-stage model. Furthermore, we develop a conditional masked transformer with a hybrid cross-guidance module, leveraging global motion tokens to enhance local motion predictions. This improves accuracy and usability for motion editing. Experiments on the HumanML3D, KIT-ML, and Motion-X datasets indicate that HyT2M achieves competitive results and excels in tasks such as motion completion and long-motion generation.
            </span> 
            <span> <a href="#text_to_motion" data-toggle="collapse" onclick="toggleText(this)" id="link-text_to_motion">... See More</a></span>
        </div>                       
    </div>
</div>

            
                <div class="project" data-filter="animation">
    <div class="row mb-4">
        <div class="col-sm-4">
            <img width="100%" height="auto" class="w-full rounded-lg" src="assets/img/shgs.jpg" />
        </div>
        <div class="col-sm-8">
            
            <b>Self-Supervised Humidity-Controllable Garment Simulation via Capillary Bridge Modeling</b>
            <br />
            <i><a href="https://pg2025.nccu.edu.tw/" target="_blank" rel="noopener">Pacific Conference on Computer Graphics and Applications, PG 2025, CCF B</a></i> 
            
            <br />
            Min Shi, Xin-Ran Wang, <b><u>Jia-Qi Zhang</u></b>, Lin Gao, Deng-Ming Zhu, Hong-Yan Zhang
            <br />
            
            
            
            
            
            <br />
            <u><b><i>Abstract</i></b>:</u> 
            Simulating wet clothing remains a significant challenge due to the complex physical interactions between moist fabric and the human body, compounded by the lack of dedicated datasets for training data-driven models. Existing self-supervised approaches struggle to capture moisture-induced dynamics such as skin adhesion, anisotropic surface resistance, and non-linear 
            <span class="collapse" id="garment_simulation">
                wrinkling, leading to limited accuracy and efficiency. To address this, we present SHGS, a novel self-supervised framework for humidity-controllable clothing simulation grounded in the physical modeling of capillary bridges that form between fabric and skin. We abstract the forces induced by wetness into two physically motivated components: a normal adhesive force derived from Laplace pressure and a tangential shear-resistance force that opposes relative motion along the fabric surface. By formulating these forces as potential energy for conservative effects and as mechanical work for non-conservative effects, we construct a physics-consistent wetness loss. This enables self-supervised training without requiring labeled data of wet clothing. Our humidity-sensitive dynamics are driven by a multi-layer graph neural network, which facilitates a smooth and physically realistic transition between different moisture levels. This architecture decouples the garment's dynamics in wet and dry states through a local weight interpolation mechanism, adjusting the fabric's behavior in response to varying humidity conditions. Experiments demonstrate that SHGS outperforms existing methods in both visual fidelity and computational efficiency, marking a significant advancement in realistic wet-cloth simulation.
            </span> 
            <span> <a href="#garment_simulation" data-toggle="collapse" onclick="toggleText(this)" id="link-garment_simulation">... See More</a></span>
        </div>                       
    </div>
</div>

            
                <div class="project" data-filter="animation">
    <div class="row mb-4">
        <div class="col-sm-4">
            <img width="100%" height="auto" class="w-full rounded-lg" src="assets/img/mr/smr.png" />
        </div>
        <div class="col-sm-8">
            
            <b>Skinned Motion Retargeting with Preservation of Body Part Relationships</b>
            <br />
            <i><a href="https://ieeexplore.ieee.org/xpl/RecentIssue.jsp?punumber=2945" target="_blank" rel="noopener">IEEE Transactions on Visualization and Computer Graphics, TVCG 2024, <strong>CCF A</strong></a></i> 
            
            <br />
            <b><u>Jia-Qi Zhang</u></b>, Miao Wang, Fu-Cheng Zhang, Fang-Lue Zhang
            <br />
             <a href="https://ieeexplore.ieee.org/document/10586814" target="_blank" rel="noopener">[PDF]</a> 
            
            
            
            
            <br />
            <u><b><i>Abstract</i></b>:</u> 
            Motion retargeting is an active research area in computer graphics and animation, allowing for the transfer of motion from one character to another, thereby creating diverse animated character data. While this technology has numerous applications in animation, games, and movies, current methods often produce unnatural or semantically inconsistent motion when applied
            <span class="collapse" id="motion_retargeting">
                to characters with different shapes or joint counts. This is primarily due to a lack of consideration for the geometric and spatial relationships between the body parts of the source and target characters. To tackle this challenge, we introduce a novel spatially-preserving Skinned Motion Retargeting Network (SMRNet) capable of handling motion retargeting for characters with varying shapes and skeletal structures while maintaining semantic consistency. By learning a hybrid representation of the character's skeleton and shape in a rest pose, SMRNet transfers the rotation and root joint position of the source character's motion to the target character through embedded rest pose feature alignment. Additionally, it incorporates a differentiable loss function to further preserve the spatial consistency of body parts between the source and target. Comprehensive quantitative and qualitative evaluations demonstrate the superiority of our approach over existing alternatives, particularly in preserving spatial relationships more effectively.
            </span> 
            <span> <a href="#motion_retargeting" data-toggle="collapse" onclick="toggleText(this)" id="link-motion_retargeting">... See More</a></span>
        </div>                       
    </div>
</div>

            
                <div class="project" data-filter="lane detection">
    <div class="row mb-4">
        <div class="col-sm-4">
            <img width="100%" height="auto" class="w-full rounded-lg" src="assets/img/lane/houghlanenet.png" />
        </div>
        <div class="col-sm-8">
            
            <b>HoughLaneNet: Lane Detection with Deep Hough Transform and Dynamic Convolution</b>
            <br />
            <i><a href="https://www.sciencedirect.com/journal/computers-and-graphics" target="_blank" rel="noopener">Computers &amp; Graphics, 2023, <strong>Best Paper</strong>, CCF C</a></i> 
            
            <br />
            <b><u>Jia-Qi Zhang</u></b>, Hao-Bin Duan, Jun-Long Chen, Ariel Shamir, Miao Wang
            <br />
             <a href="https://arxiv.org/pdf/2307.03494.pdf" target="_blank" rel="noopener">[PDF]</a> 
             | <a href="https://github.com/Jiaqi-zhang/HoughLaneNet" target="_blank" rel="noopener">[CODE]</a> 
            
            
            
            <br />
            <u><b><i>Abstract</i></b>:</u> 
            The task of lane detection has garnered considerable attention in the field of autonomous driving due to its complexity. Lanes can present difficulties for detection, as they can be narrow, fragmented, and often obscured by heavy traffic. However, it has been observed that the lanes have a geometrical structure that resembles a straight line, leading to improved lane detection
            <span class="collapse" id="lane_detection">
                results when utilizing this characteristic. To address this challenge, we propose a hierarchical Deep Hough Transform (DHT) approach that combines all lane features in an image into the Hough parameter space. Additionally, we refine the point selection method and incorporate a Dynamic Convolution Module to effectively differentiate between lanes in the original image. Our network architecture comprises a backbone network, either a ResNet or Pyramid Vision Transformer, a Feature Pyramid Network as the neck to extract multi-scale features, and a hierarchical DHT-based feature aggregation head to accurately segment each lane. By utilizing the lane features in the Hough parameter space, the network learns dynamic convolution kernel parameters corresponding to each lane, allowing the Dynamic Convolution Module to effectively differentiate between lane features. Subsequently, the lane features are fed into the feature decoder, which predicts the final position of the lane. Our proposed network structure demonstrates improved performance in detecting heavily occluded or worn lane images, as evidenced by our extensive experimental results, which show that our method outperforms or is on par with state-of-the-art techniques.
            </span> 
            <span> <a href="#lane_detection" data-toggle="collapse" onclick="toggleText(this)" id="link-lane_detection">... See More</a></span>
        </div>                       
    </div>
</div>

            
                <div class="project" data-filter="colorization">
    <div class="row mb-4">
        <div class="col-sm-4">
            <img width="100%" height="auto" class="w-full rounded-lg" src="assets/img/videocolor/Colorization.png" />
        </div>
        <div class="col-sm-8">
            
            <b>Reference-Based Deep Line Art Video Colorization</b>
            <br />
            <i><a href="https://ieeexplore.ieee.org/xpl/RecentIssue.jsp?punumber=2945" target="_blank" rel="noopener">IEEE Transactions on Visualization and Computer Graphics, TVCG 2022, <strong>CCF A</strong></a></i> 
            
            <br />
            Min Shi#, <b><u>Jia-Qi Zhang#</u></b>, Shu-Yu Chen, Lin Gao, Yu-Kun Lai, Fang-Lue Zhang
            <br />
             <a href="https://ieeexplore.ieee.org/document/9693178" target="_blank" rel="noopener">[PDF]</a> 
            
             | <a href="https://youtu.be/-1e7hRmXCEY" target="_blank" rel="noopener">[DEMO]</a> 
            
            
            <br />
            <u><b><i>Abstract</i></b>:</u> 
            Coloring line art images based on the colors of reference images is a crucial stage in animation production, which is time-consuming and tedious. This paper proposes a deep architecture to automatically color line art videos with the same color style as the given reference images. Our framework consists of a color transform network and a temporal refinement
            <span class="collapse" id="color_video">
                network based on 3U-net. The color transform network takes the target line art images as well as the line art and color images of the reference images as input and generates corresponding target color images. To cope with the large differences between each target line art image and the reference color images, we propose a distance attention layer that utilizes non-local similarity matching to determine the region correspondences between the target image and the reference images and transforms the local color information from the references to the target. To ensure global color style consistency, we further incorporate Adaptive Instance Normalization (AdaIN) with the transformation parameters obtained from a multiple-layer AdaIN that describes the global color style of the references extracted by an embedder network. The temporal refinement network learns spatiotemporal features through 3D convolutions to ensure the temporal color consistency of the results. Our model can achieve even better coloring results by fine-tuning the parameters with only a small number of samples when dealing with an animation of a new style. To evaluate our method, we build a line art coloring dataset. Experiments show that our method achieves the best performance on line art video coloring compared to the current state-of-the-art methods.
            </span> 
            <span> <a href="#color_video" data-toggle="collapse" onclick="toggleText(this)" id="link-color_video">... See More</a></span>
        </div>                       
    </div>
</div>

            
                <div class="project" data-filter="colorization">
    <div class="row mb-4">
        <div class="col-sm-4">
            <img width="100%" height="auto" class="w-full rounded-lg" src="assets/img/color_survey/survey.png" />
        </div>
        <div class="col-sm-8">
            
            <b>A review of image and video colorization: From analogies to deep learning</b>
            <br />
            <i><a href="https://www.sciencedirect.com/journal/visual-informatics" target="_blank" rel="noopener">Visual Informatics, VI 2022</a></i> 
            
            <br />
            Shu-Yu Chen, <b><u>Jia-Qi Zhang</u></b>, You-You Zhao, Paul L. Rosin, Yu-Kun Lai, Lin Gao
            <br />
             <a href="https://www.sciencedirect.com/science/article/pii/S2468502X22000389" target="_blank" rel="noopener">[PDF]</a> 
            
            
            
            
            <br />
            <u><b><i>Abstract</i></b>:</u> 
            Image colorization is a classic and important topic in computer graphics, where the aim is to add color to a monochromatic input image to produce a colorful result. In this survey, we present the history of colorization research in chronological order and summarize popular algorithms in this field. Early work on colorization mostly focused on developing techniques to improve the
            <span class="collapse" id="color_survey">
                colorization quality. In the last few years, researchers have considered more possibilities such as combining colorization with NLP (natural language processing) and focused more on industrial applications. To better control the color, various types of color control are designed, such as providing reference images or color-scribbles. We have created a taxonomy of the colorization methods according to the input type, divided into grayscale, sketch-based and hybrid. The pros and cons are discussed for each algorithm, and they are compared according to their main characteristics. Finally, we discuss how deep learning, and in particular Generative Adversarial Networks (GANs), has changed this field.
            </span> 
            <span> <a href="#color_survey" data-toggle="collapse" onclick="toggleText(this)" id="link-color_survey">... See More</a></span>
        </div>                       
    </div>
</div>

            
                <div class="project" data-filter="animation">
    <div class="row mb-4">
        <div class="col-sm-4">
            <img width="100%" height="auto" class="w-full rounded-lg" src="assets/img/text2anim/egteaser.png" />
        </div>
        <div class="col-sm-8">
            
            <b>Write-An-Animation: High-level Text-based Animation Editing with Character-Scene Interaction</b>
            <br />
            <i><a href="https://pg2022.org/" target="_blank" rel="noopener">Pacific Conference on Computer Graphics and Applications, PG 2021, CCF B</a></i> 
            
            <br />
            <b><u>Jia-Qi Zhang</u></b>, Xiang Xu, Shen Z.M, Huang Z.H, Yang Zhao, Cao Y.P, Wan P.F, Miao Wang
            <br />
             <a href="https://diglib.eg.org/bitstream/handle/10.1111/cgf14415/v40i7pp217-228.pdf" target="_blank" rel="noopener">[PDF]</a> 
            
             | <a href="https://youtu.be/3kgzasBL-UQ" target="_blank" rel="noopener">[DEMO]</a> 
            
            
            <br />
            <u><b><i>Abstract</i></b>:</u> 
            3D animation production for storytelling requires essential manual processes of virtual scene composition, character creation, and motion editing, etc. Although professional artists can favorably create 3D animations using software, it remains a complex and challenging task for novice users to handle and learn such tools for content creation. In this paper, we present
            <span class="collapse" id="anim_sence">
                Write-An-Animation, a 3D animation system that allows novice users to create, edit, preview, and render animations, all through text editing. Based on the input texts describing virtual scenes and human motions in natural languages, our system first parses the texts as semantic scene graphs, then retrieves 3D object models for virtual scene composition and motion clips for character animation. Character motion is synthesized with the combination of generative locomotions using neural state machine as well as template action motions retrieved from the dataset. Moreover, to make the virtual scene layout compatible with character motion, we propose an iterative scene layout and character motion optimization algorithm that jointly considers character-object collision and interaction. We demonstrate the effectiveness of our system with customized texts and public film scripts. Experimental results indicate that our system can generate satisfactory animations from texts.
            </span> 
            <span> <a href="#anim_sence" data-toggle="collapse" onclick="toggleText(this)" id="link-anim_sence">... See More</a></span>
        </div>                       
    </div>
</div>

            
                <div class="project" data-filter="colorization">
    <div class="row mb-4">
        <div class="col-sm-4">
            <img width="100%" height="auto" class="w-full rounded-lg" src="assets/img/active/color2020.png" />
        </div>
        <div class="col-sm-8">
            
            <b>Active Colorization for Cartoon Line Drawing</b>
            <br />
            <i><a href="https://ieeexplore.ieee.org/xpl/RecentIssue.jsp?punumber=2945" target="_blank" rel="noopener">IEEE Transactions on Visualization and Computer Graphics, TVCG 2020, <strong>CCF A</strong></a></i> 
            
            <br />
            Shu-Yu Chen#, <b><u>Jia-Qi Zhang#</u></b>, Lin Gao, Yue He, Shi-Hong Xia, Min Shi, Fang-Lue Zhang
            <br />
             <a href="https://ieeexplore.ieee.org/abstract/document/9143503" target="_blank" rel="noopener">[PDF]</a> 
            
             | <a href="https://youtu.be/OCkJhaoupcU" target="_blank" rel="noopener">[DEMO]</a> 
            
            
            <br />
            <u><b><i>Abstract</i></b>:</u> 
            In the animation industry, the colorization of raw sketch images is a vitally important but very time-consuming task. This article focuses on providing a novel solution that semiautomatically colorizes a set of images using a single colorized reference image. Our method is able to provide coherent colors for regions that have similar semantics to those in the reference image. An active learning
            <span class="collapse" id="color_sketch">
                based framework is used to match local regions, followed by mixed-integer quadratic programming (MIQP) which considers the spatial contexts to further refine the matching results. We efficiently utilize user interactions to achieve high accuracy in the final colorized images. Experiments show that our method outperforms the current state-of-the-art deep learning based colorization method in terms of color coherency with the reference image. The region matching framework could potentially be applied to other applications, such as color transfer.
            </span> 
            <span> <a href="#color_sketch" data-toggle="collapse" onclick="toggleText(this)" id="link-color_sketch">... See More</a></span>
        </div>                       
    </div>
</div>

            
        </div>
        <p></p>
    </div>
</div>

<!-- Outreach section -->

<hr />

<div class="row custom-offset" id="outreach">
    <div class="col">
        <h2 clss="mb-5">💻 Projects</h2>
        <p></p>
        
            <div class="row mb-4">
    <div class="col-sm-4">
        <img width="100%" height="auto" class="w-full rounded-lg" src="assets/img/vr_subway.gif" />
    </div>
    <div class="col-sm-8">
         <b><i>Subway</i>:</b> 
        <b>Target Identification in Multi-View Videos and Virtual Avatar Generation</b>
        <br />
        <i><a href="https://hackmit.org/" target="_blank" rel="noopener">CRRC INSTITUTE 2022 (Beijing, China.)</a></i>
        <br />
        <u>Jia-Qi Zhang</u>, Yi-Jun Li.
        <br />
        
        
        
        <br />
        <u><b><i>Description</i></b>:</u> Using data collected from three cameras, this project reconstructs the driver's movements within a subway cockpit in three dimensions. It includes the generation of a 3D environment, character positioning, navigation, interaction implementation and demonstration.
        <br />
        <u><b><i>Prize</i></b>:</u> Received a certification of practical application.
    </div>
</div>

        
        <p></p>
    </div>
</div>

<!-- Gallery section -->

<hr />

<div class="row custom-offset" id="gallery">
    <div class="col">
        <h2 clss="mb-5">📸 Gallery</h2>
        <p></p>
        
        <p></p>
        
            <div class="gallery">
    <img src="assets/memo/3dv.jpg" alt="won the grand prize in 3DV" width="800" height="600" />
    <div class="desc">won the grand prize in 3DV (2023)</div>
</div>

        
            <div class="gallery">
    <img src="assets/memo/cg_pic.jpg" alt="photo with my mentor and junior" width="800" height="600" />
    <div class="desc">photo with my mentor and junior (2023)</div>
</div>

        
            <div class="gallery">
    <img src="assets/memo/cg_best.jpg" alt="won the Best Paper Award for the first time" width="800" height="600" />
    <div class="desc">won the Best Paper Award for the first time (2023)</div>
</div>

        
        <p></p>
    </div>
</div>

<!-- Footer section -->

<div>‎</div>
<hr />

<footer class="pt-2 my-md-2 pt-md-">
    <div class="row justify-content-center">
        <div class="col-7 col-md text-left align-self-center">
            <p class="h6">© Zhang Jia-Qi, <span id="currentYear"></span></p>
            <p class="h6">Contact: 37 Xueyuan Road, Haidian District, Beijing, P.R. China, 100191.</p>
            <a href="https://github.com/mkhangg/academic-website" target="_blank" rel="noopener"><b>&gt; Web template powered by mkhangg.</b></a>
        </div>
        <div class="col col-md text-right">
            
                <img class="mr+4" src="assets/img/buaa_logo.png" data-canonical-src="assets/img/buaa_logo.png" alt="BUAA" title="BUAA" width="100" />
            
                <img class="mr+4" src="assets/img/ncepu_logo.png" data-canonical-src="assets/img/ncepu_logo.png" alt="NCEPU" title="NCEPU" width="100" />
            
        </div>
    </div>
    <p></p>
</footer>


        </div>

        <!-- Bootstrap core JS-->
        <script src="https://code.jquery.com/jquery-3.5.1.min.js"></script>
        <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js"></script>
        <!-- <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js"></script> -->
        <script src="https://cdn.jsdelivr.net/npm/bootstrap@4.3.1/dist/js/bootstrap.min.js"></script>  
        <script src="https://cdn.jsdelivr.net/npm/bootstrap@4.6.0/dist/js/bootstrap.bundle.min.js"></script>

        <!-- Isotope JS -->
        <script src="https://cdn.jsdelivr.net/npm/isotope-layout@3.0.2/dist/isotope.pkgd.min.js"></script>

        <!-- OwlCarousel2 JS -->
        <script src="https://cdnjs.cloudflare.com/ajax/libs/OwlCarousel2/2.3.4/owl.carousel.min.js"></script>
        
		<!-- Third party plugin JS-->
        <script src="https://cdnjs.cloudflare.com/ajax/libs/animejs/3.2.1/anime.min.js"></script>

		<!-- Core theme JS-->
        <script src="js/scripts.js"></script>

    </body>
</html>
